mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-12-24 21:55:29 +00:00
Unify calling conventions for converter functions and modules. (lyx2lyx)
git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/trunk@9433 a592a061-630c-0410-9148-cb99ea01b6c8
This commit is contained in:
parent
780ee52866
commit
4bd289f1e3
@ -1,3 +1,19 @@
|
||||
2005-01-04 José Matos <jamatos@lyx.org>
|
||||
|
||||
* LyX.py (set_format): fix typo.
|
||||
|
||||
* lyx_0_12.py:
|
||||
* lyx_1_0_0.py:
|
||||
* lyx_1_0_1.py:
|
||||
* lyx_1_1_4.py:
|
||||
* lyx_1_1_5.py:
|
||||
* lyx_1_1_6.py:
|
||||
* lyx_1_1_6fix3.py:
|
||||
* lyx_1_2.py:
|
||||
* lyx_1_3.py:
|
||||
* lyx_1_4.py: unify the call convention of convertion
|
||||
functions. Now they all accept a file.
|
||||
|
||||
2004-12-03 José Matos <jamatos@lyx.org>
|
||||
|
||||
* LyX.py: format up to 238.
|
||||
|
@ -243,7 +243,7 @@ class LyX_Base:
|
||||
def set_format(self):
|
||||
" Set the file format of the file, in the header."
|
||||
if self.format <= 217:
|
||||
format = str(float(format)/100)
|
||||
format = str(float(self.format)/100)
|
||||
else:
|
||||
format = str(self.format)
|
||||
i = find_token(self.header, "\\lyxformat", 0)
|
||||
|
@ -21,7 +21,8 @@ import string
|
||||
from parser_tools import find_token, find_re, check_token
|
||||
|
||||
|
||||
def space_before_layout(lines):
|
||||
def space_before_layout(file):
|
||||
lines = file.body
|
||||
i = 2 # skip first layout
|
||||
while 1:
|
||||
i = find_token(lines, '\\layout', i)
|
||||
@ -33,7 +34,8 @@ def space_before_layout(lines):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def formula_inset_space_eat(lines):
|
||||
def formula_inset_space_eat(file):
|
||||
lines = file.body
|
||||
i=0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Formula", i)
|
||||
@ -45,7 +47,8 @@ def formula_inset_space_eat(lines):
|
||||
|
||||
|
||||
# Update from tabular format 2 to 4
|
||||
def update_tabular(lines):
|
||||
def update_tabular(file):
|
||||
lines = file.body
|
||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||
i=0
|
||||
while 1:
|
||||
@ -76,7 +79,8 @@ def update_tabular(lines):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def final_dot(lines):
|
||||
def final_dot(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
if lines[i][-1:] == '.' and lines[i+1][:1] != '\\' and lines[i+1][:1] != ' ' and len(lines[i]) + len(lines[i+1])<= 72 and lines[i+1] != '':
|
||||
@ -86,7 +90,8 @@ def final_dot(lines):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_inset_label(lines):
|
||||
def update_inset_label(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset Label', i)
|
||||
@ -96,7 +101,8 @@ def update_inset_label(lines):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_latexdel(lines):
|
||||
def update_latexdel(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset LatexDel', i)
|
||||
@ -106,13 +112,15 @@ def update_latexdel(lines):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_vfill(lines):
|
||||
def update_vfill(file):
|
||||
lines = file.body
|
||||
for i in range(len(lines)):
|
||||
lines[i] = string.replace(lines[i],'\\fill_top','\\added_space_top vfill')
|
||||
lines[i] = string.replace(lines[i],'\\fill_bottom','\\added_space_bottom vfill')
|
||||
|
||||
|
||||
def update_space_units(lines):
|
||||
def update_space_units(file):
|
||||
lines = file.body
|
||||
added_space_bottom = re.compile(r'\\added_space_bottom ([^ ]*)')
|
||||
added_space_top = re.compile(r'\\added_space_top ([^ ]*)')
|
||||
for i in range(len(lines)):
|
||||
@ -129,11 +137,13 @@ def update_space_units(lines):
|
||||
lines[i] = string.replace(lines[i], old, new)
|
||||
|
||||
|
||||
def update_inset_accent(lines):
|
||||
def update_inset_accent(file):
|
||||
lines = file.body
|
||||
pass
|
||||
|
||||
|
||||
def remove_cursor(lines):
|
||||
def remove_cursor(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
cursor_re = re.compile(r'.*(\\cursor \d*)')
|
||||
while 1:
|
||||
@ -145,7 +155,8 @@ def remove_cursor(lines):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def remove_empty_insets(lines):
|
||||
def remove_empty_insets(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset ',i)
|
||||
@ -157,7 +168,8 @@ def remove_empty_insets(lines):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def remove_formula_latex(lines):
|
||||
def remove_formula_latex(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\latex formula_latex ', i)
|
||||
@ -171,13 +183,15 @@ def remove_formula_latex(lines):
|
||||
del lines[i]
|
||||
|
||||
|
||||
def add_end_document(lines):
|
||||
def add_end_document(file):
|
||||
lines = file.body
|
||||
i = find_token(lines, '\\the_end', 0)
|
||||
if i == -1:
|
||||
lines.append('\\the_end')
|
||||
|
||||
|
||||
def header_update(lines, file):
|
||||
def header_update(file):
|
||||
lines = file.header
|
||||
i = 0
|
||||
l = len(lines)
|
||||
while i < l:
|
||||
@ -265,21 +279,16 @@ def update_latexaccents(file):
|
||||
|
||||
|
||||
def convert(file):
|
||||
header_update(file.header, file)
|
||||
add_end_document(file.body)
|
||||
remove_cursor(file.body)
|
||||
final_dot(file.body)
|
||||
update_inset_label(file.body)
|
||||
update_latexdel(file.body)
|
||||
update_space_units(file.body)
|
||||
update_inset_accent(file.body)
|
||||
space_before_layout(file.body)
|
||||
formula_inset_space_eat(file.body)
|
||||
update_tabular(file.body)
|
||||
update_vfill(file.body)
|
||||
remove_empty_insets(file.body)
|
||||
remove_formula_latex(file.body)
|
||||
update_latexaccents(file)
|
||||
table = [header_update, add_end_document, remove_cursor,
|
||||
final_dot, update_inset_label, update_latexdel,
|
||||
update_space_units, update_inset_accent,
|
||||
space_before_layout, formula_inset_space_eat,
|
||||
update_tabular, update_vfill, remove_empty_insets,
|
||||
remove_formula_latex, update_latexaccents]
|
||||
|
||||
for conv in table:
|
||||
conv(file)
|
||||
|
||||
file.format = 215
|
||||
|
||||
|
||||
|
@ -17,6 +17,11 @@
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
def convert(file):
|
||||
table = []
|
||||
|
||||
for conv in table:
|
||||
conv(file)
|
||||
|
||||
file.format = 215
|
||||
|
||||
|
||||
|
@ -17,6 +17,11 @@
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
def convert(file):
|
||||
table = []
|
||||
|
||||
for conv in table:
|
||||
conv(file)
|
||||
|
||||
file.format = 215
|
||||
|
||||
|
||||
|
@ -17,6 +17,11 @@
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
def convert(file):
|
||||
table = []
|
||||
|
||||
for conv in table:
|
||||
conv(file)
|
||||
|
||||
file.format = 215
|
||||
|
||||
|
||||
|
@ -24,7 +24,8 @@ from parser_tools import find_token, find_token_backwards, find_re
|
||||
layout_exp = re.compile(r"\\layout (\S*)")
|
||||
math_env = ["\\[","\\begin{eqnarray*}","\\begin{eqnarray}","\\begin{equation}"]
|
||||
|
||||
def replace_protected_separator(lines):
|
||||
def replace_protected_separator(file):
|
||||
lines = file.body
|
||||
i=0
|
||||
while 1:
|
||||
i = find_token(lines, "\\protected_separator", i)
|
||||
@ -47,7 +48,8 @@ def replace_protected_separator(lines):
|
||||
del lines[i]
|
||||
|
||||
|
||||
def merge_formula_inset(lines):
|
||||
def merge_formula_inset(file):
|
||||
lines = file.body
|
||||
i=0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Formula", i)
|
||||
@ -59,7 +61,8 @@ def merge_formula_inset(lines):
|
||||
|
||||
|
||||
# Update from tabular format 4 to 5 if necessary
|
||||
def update_tabular(lines):
|
||||
def update_tabular(file):
|
||||
lines = file.body
|
||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||
i=0
|
||||
while 1:
|
||||
@ -90,7 +93,8 @@ def update_tabular(lines):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_toc(lines):
|
||||
def update_toc(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset LatexCommand \\tableofcontents', i)
|
||||
@ -100,13 +104,15 @@ def update_toc(lines):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def remove_cursor(lines):
|
||||
def remove_cursor(file):
|
||||
lines = file.body
|
||||
i = find_token(lines, '\\cursor', 0)
|
||||
if i != -1:
|
||||
del lines[i]
|
||||
|
||||
|
||||
def remove_vcid(lines):
|
||||
def remove_vcid(file):
|
||||
lines = file.header
|
||||
i = find_token(lines, '\\lyxvcid', 0)
|
||||
if i != -1:
|
||||
del lines[i]
|
||||
@ -115,14 +121,16 @@ def remove_vcid(lines):
|
||||
del lines[i]
|
||||
|
||||
|
||||
def first_layout(lines):
|
||||
def first_layout(file):
|
||||
lines = file.body
|
||||
while (lines[0] == ""):
|
||||
del lines[0]
|
||||
if lines[0][:7] != "\\layout":
|
||||
lines[:0] = ["\\layout Standard"]
|
||||
|
||||
|
||||
def remove_space_in_units(lines):
|
||||
def remove_space_in_units(file):
|
||||
lines = file.header
|
||||
margins = ["\\topmargin","\\rightmargin",
|
||||
"\\leftmargin","\\bottommargin"]
|
||||
|
||||
@ -148,14 +156,13 @@ def remove_space_in_units(lines):
|
||||
|
||||
|
||||
def convert(file):
|
||||
first_layout(file.body)
|
||||
remove_vcid(file.header)
|
||||
remove_cursor(file.body)
|
||||
update_toc(file.body)
|
||||
replace_protected_separator(file.body)
|
||||
merge_formula_inset(file.body)
|
||||
update_tabular(file.body)
|
||||
remove_space_in_units(file.header)
|
||||
table = [first_layout, remove_vcid, remove_cursor, update_toc,
|
||||
replace_protected_separator, merge_formula_inset,
|
||||
update_tabular, remove_space_in_units]
|
||||
|
||||
for conv in table:
|
||||
conv(file)
|
||||
|
||||
file.format = 216
|
||||
|
||||
|
||||
|
@ -22,7 +22,8 @@ from parser_tools import find_re, find_tokens, find_token, check_token
|
||||
|
||||
|
||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||
def update_tabular(lines, opt):
|
||||
def update_tabular(file):
|
||||
lines = file.body
|
||||
i=0
|
||||
while 1:
|
||||
i = find_re(lines, lyxtable_re, i)
|
||||
@ -105,7 +106,7 @@ def update_tabular(lines, opt):
|
||||
end = find_token(lines, '\\newline', i)
|
||||
|
||||
if end == -1:
|
||||
opt.error("Malformed LyX file.")
|
||||
file.error("Malformed LyX file.")
|
||||
|
||||
end = end - i
|
||||
while end > 0:
|
||||
@ -262,7 +263,8 @@ def set_paragraph_properties(lines, prop_dict):
|
||||
return result[:]
|
||||
|
||||
|
||||
def update_language(header):
|
||||
def update_language(file):
|
||||
header = file.header
|
||||
i = find_token(header, "\\language", 0)
|
||||
if i == -1:
|
||||
# no language, should emit a warning
|
||||
@ -275,8 +277,11 @@ def update_language(header):
|
||||
|
||||
|
||||
def convert(file):
|
||||
update_tabular(file.body, file)
|
||||
update_language(file.header)
|
||||
table = [update_tabular, update_language]
|
||||
|
||||
for conv in table:
|
||||
conv(file)
|
||||
|
||||
file.format = 217
|
||||
|
||||
|
||||
|
@ -31,7 +31,8 @@ align_table = {"0": "top", "2": "left", "4": "right", "8": "center"}
|
||||
use_table = {"0": "none", "1": "parbox"}
|
||||
table_meta_re = re.compile(r'<LyXTabular version="?1"? rows="?(\d*)"? columns="?(\d*)"?>')
|
||||
|
||||
def update_tabular(lines, opt):
|
||||
def update_tabular(file):
|
||||
lines = file.body
|
||||
i=0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset Tabular', i)
|
||||
@ -48,7 +49,7 @@ def update_tabular(lines, opt):
|
||||
|
||||
j = find_token(lines, '</LyXTabular>', i) + 1
|
||||
if j == 0:
|
||||
opt.warning( "Error: Bad lyx format i=%d j=%d" % (i,j))
|
||||
file.warning( "Error: Bad lyx format i=%d j=%d" % (i,j))
|
||||
break
|
||||
|
||||
new_table = table_update(lines[i:j])
|
||||
@ -114,7 +115,11 @@ def table_update(lines):
|
||||
|
||||
|
||||
def convert(file):
|
||||
update_tabular(file.body, file)
|
||||
table = [update_tabular]
|
||||
|
||||
for conv in table:
|
||||
conv(file)
|
||||
|
||||
file.format = 218
|
||||
|
||||
|
||||
|
@ -71,7 +71,8 @@ def get_width(mo):
|
||||
#
|
||||
# Change \begin_float .. \end_float into \begin_inset Float .. \end_inset
|
||||
#
|
||||
def remove_oldfloat(lines, opt):
|
||||
def remove_oldfloat(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_float", i)
|
||||
@ -82,7 +83,7 @@ def remove_oldfloat(lines, opt):
|
||||
|
||||
floattype = string.split(lines[i])[1]
|
||||
if not floats.has_key(floattype):
|
||||
opt.warning("Error! Unknown float type " + floattype)
|
||||
file.warning("Error! Unknown float type " + floattype)
|
||||
floattype = "fig"
|
||||
|
||||
# skip \end_deeper tokens
|
||||
@ -125,7 +126,7 @@ def remove_oldfloat(lines, opt):
|
||||
flag = 1
|
||||
new.append("")
|
||||
if token == "\\lang":
|
||||
new.append(token+" "+ opt.language)
|
||||
new.append(token+" "+ file.language)
|
||||
else:
|
||||
new.append(token+" default ")
|
||||
|
||||
@ -136,7 +137,8 @@ def remove_oldfloat(lines, opt):
|
||||
pextra_type2_rexp = re.compile(r".*\\pextra_type\s+[12]")
|
||||
pextra_type2_rexp2 = re.compile(r".*(\\layout|\\pextra_type\s+2)")
|
||||
|
||||
def remove_pextra(lines):
|
||||
def remove_pextra(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
flag = 0
|
||||
while 1:
|
||||
@ -213,7 +215,8 @@ ert_begin = ["\\begin_inset ERT",
|
||||
"\\layout Standard"]
|
||||
|
||||
|
||||
def remove_oldert(lines):
|
||||
def remove_oldert(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
|
||||
@ -322,7 +325,8 @@ def remove_oldert(lines):
|
||||
|
||||
|
||||
# ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future.
|
||||
def remove_oldertinset(lines):
|
||||
def remove_oldertinset(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset ERT", i)
|
||||
@ -351,7 +355,8 @@ def is_ert_paragraph(lines, i):
|
||||
return check_token(lines[k], "\\layout")
|
||||
|
||||
|
||||
def combine_ert(lines):
|
||||
def combine_ert(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset ERT", i)
|
||||
@ -392,7 +397,8 @@ def write_attribute(x, token, value):
|
||||
x.append("\t"+token+" "+value)
|
||||
|
||||
|
||||
def remove_figinset(lines):
|
||||
def remove_figinset(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Figure", i)
|
||||
@ -466,7 +472,8 @@ def remove_figinset(lines):
|
||||
attr_re = re.compile(r' \w*="(false|0|)"')
|
||||
line_re = re.compile(r'<(features|column|row|cell)')
|
||||
|
||||
def update_tabular(lines):
|
||||
def update_tabular(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset Tabular', i)
|
||||
@ -662,7 +669,8 @@ def update_longtables(file):
|
||||
|
||||
|
||||
# Figure insert are hidden feature of lyx 1.1.6. This might be removed in the future.
|
||||
def fix_oldfloatinset(lines):
|
||||
def fix_oldfloatinset(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Float ", i)
|
||||
@ -674,7 +682,8 @@ def fix_oldfloatinset(lines):
|
||||
i = i+1
|
||||
|
||||
|
||||
def change_listof(lines):
|
||||
def change_listof(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset LatexCommand \\listof", i)
|
||||
@ -685,7 +694,8 @@ def change_listof(lines):
|
||||
i = i+1
|
||||
|
||||
|
||||
def change_infoinset(lines):
|
||||
def change_infoinset(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Info", i)
|
||||
@ -711,7 +721,8 @@ def change_infoinset(lines):
|
||||
i = i+5
|
||||
|
||||
|
||||
def change_preamble(lines):
|
||||
def change_preamble(file):
|
||||
lines = file.header
|
||||
i = find_token(lines, "\\use_amsmath", 0)
|
||||
if i == -1:
|
||||
return
|
||||
@ -720,18 +731,14 @@ def change_preamble(lines):
|
||||
|
||||
|
||||
def convert(file):
|
||||
change_preamble(file.header)
|
||||
change_listof(file.body)
|
||||
fix_oldfloatinset(file.body)
|
||||
update_tabular(file.body)
|
||||
update_longtables(file)
|
||||
remove_pextra(file.body)
|
||||
remove_oldfloat(file.body, file)
|
||||
remove_figinset(file.body)
|
||||
remove_oldertinset(file.body)
|
||||
remove_oldert(file.body)
|
||||
combine_ert(file.body)
|
||||
change_infoinset(file.body)
|
||||
table = [change_preamble, change_listof, fix_oldfloatinset,
|
||||
update_tabular, update_longtables, remove_pextra,
|
||||
remove_oldfloat, remove_figinset, remove_oldertinset,
|
||||
remove_oldert, combine_ert, change_infoinset]
|
||||
|
||||
for conv in table:
|
||||
conv(file)
|
||||
|
||||
file.format = 220
|
||||
|
||||
|
||||
|
@ -22,7 +22,8 @@ import re
|
||||
from parser_tools import find_token, find_end_of_inset, get_value,\
|
||||
find_token2, del_token
|
||||
|
||||
def change_insetgraphics(lines):
|
||||
def change_insetgraphics(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Graphics", i)
|
||||
@ -79,7 +80,8 @@ def change_insetgraphics(lines):
|
||||
i = i+1
|
||||
|
||||
|
||||
def change_tabular(lines):
|
||||
def change_tabular(file):
|
||||
lines = file.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "<column", i)
|
||||
@ -91,8 +93,11 @@ def change_tabular(lines):
|
||||
|
||||
|
||||
def convert(file):
|
||||
change_insetgraphics(file.body)
|
||||
change_tabular(file.body)
|
||||
table = [change_insetgraphics, change_tabular]
|
||||
|
||||
for conv in table:
|
||||
conv(file)
|
||||
|
||||
file.format = 221
|
||||
|
||||
|
||||
|
@ -1511,33 +1511,30 @@ def use_x_binary(file):
|
||||
# Convertion hub
|
||||
#
|
||||
def convert(file):
|
||||
table = { 223 : [insert_tracking_changes, add_end_header, remove_color_default,
|
||||
convert_spaces, convert_bibtex, remove_insetparent],
|
||||
224 : [convert_external, convert_comment],
|
||||
225 : [add_end_layout, layout2begin_layout, convert_end_document,
|
||||
convert_table_valignment_middle, convert_breaks],
|
||||
226 : [convert_note],
|
||||
227 : [convert_box],
|
||||
228 : [convert_collapsable, convert_ert],
|
||||
229 : [convert_minipage],
|
||||
230 : [convert_jurabib],
|
||||
231 : [convert_float],
|
||||
232 : [convert_bibtopic],
|
||||
233 : [convert_graphics, convert_names],
|
||||
234 : [convert_cite_engine],
|
||||
235 : [convert_paperpackage],
|
||||
236 : [convert_bullets, add_begin_header, add_begin_body,
|
||||
normalize_papersize, strip_end_space],
|
||||
237 : [use_x_boolean],
|
||||
238 : [update_latexaccents]}
|
||||
table = [[223, [insert_tracking_changes, add_end_header, remove_color_default,
|
||||
convert_spaces, convert_bibtex, remove_insetparent]],
|
||||
[224, [convert_external, convert_comment]],
|
||||
[225, [add_end_layout, layout2begin_layout, convert_end_document,
|
||||
convert_table_valignment_middle, convert_breaks]],
|
||||
[226, [convert_note]],
|
||||
[227, [convert_box]],
|
||||
[228, [convert_collapsable, convert_ert]],
|
||||
[229, [convert_minipage]],
|
||||
[230, [convert_jurabib]],
|
||||
[231, [convert_float]],
|
||||
[232, [convert_bibtopic]],
|
||||
[233, [convert_graphics, convert_names]],
|
||||
[234, [convert_cite_engine]],
|
||||
[235, [convert_paperpackage]],
|
||||
[236, [convert_bullets, add_begin_header, add_begin_body,
|
||||
normalize_papersize, strip_end_space]],
|
||||
[237, [use_x_boolean]],
|
||||
[238, [update_latexaccents]]]
|
||||
|
||||
chain = table.keys()
|
||||
chain.sort()
|
||||
|
||||
for version in chain:
|
||||
for version, conv_steps in table:
|
||||
if file.format >= version:
|
||||
continue
|
||||
for convert in table[version]:
|
||||
for convert in conv_steps:
|
||||
convert(file)
|
||||
file.format = version
|
||||
if file.end_format == file.format:
|
||||
@ -1545,34 +1542,30 @@ def convert(file):
|
||||
|
||||
|
||||
def revert(file):
|
||||
table = { 237: [],
|
||||
236: [use_x_binary],
|
||||
235: [denormalize_papersize, remove_begin_body,remove_begin_header,
|
||||
revert_bullets],
|
||||
234: [revert_paperpackage],
|
||||
233: [revert_cite_engine],
|
||||
232: [revert_names],
|
||||
231: [revert_bibtopic],
|
||||
230: [revert_float],
|
||||
229: [revert_jurabib],
|
||||
228: [],
|
||||
227: [revert_collapsable, revert_ert],
|
||||
226: [revert_box, revert_external_2],
|
||||
225: [revert_note],
|
||||
224: [rm_end_layout, begin_layout2layout, revert_end_document,
|
||||
revert_valignment_middle, convert_vspace, convert_frameless_box],
|
||||
223: [revert_external_2, revert_comment],
|
||||
221: [rm_end_header, revert_spaces, revert_bibtex,
|
||||
rm_tracking_changes, rm_body_changes]}
|
||||
table = [[237, []],
|
||||
[236, [use_x_binary]],
|
||||
[235, [denormalize_papersize, remove_begin_body,remove_begin_header,
|
||||
revert_bullets]],
|
||||
[234, [revert_paperpackage]],
|
||||
[233, [revert_cite_engine]],
|
||||
[232, [revert_names]],
|
||||
[231, [revert_bibtopic]],
|
||||
[230, [revert_float]],
|
||||
[229, [revert_jurabib]],
|
||||
[228, []],
|
||||
[227, [revert_collapsable, revert_ert]],
|
||||
[226, [revert_box, revert_external_2]],
|
||||
[225, [revert_note]],
|
||||
[224, [rm_end_layout, begin_layout2layout, revert_end_document,
|
||||
revert_valignment_middle, convert_vspace, convert_frameless_box]],
|
||||
[223, [revert_external_2, revert_comment]],
|
||||
[221, [rm_end_header, revert_spaces, revert_bibtex,
|
||||
rm_tracking_changes, rm_body_changes]]]
|
||||
|
||||
chain = table.keys()
|
||||
chain.sort()
|
||||
chain.reverse()
|
||||
|
||||
for version in chain:
|
||||
for version, conv_steps in table:
|
||||
if file.format <= version:
|
||||
continue
|
||||
for convert in table[version]:
|
||||
for convert in conv_steps:
|
||||
convert(file)
|
||||
file.format = version
|
||||
if file.end_format == file.format:
|
||||
|
Loading…
Reference in New Issue
Block a user