2002-08-01 15:26:32 +00:00
|
|
|
# This file is part of lyx2lyx
|
2006-08-02 14:19:22 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2002-08-01 15:26:32 +00:00
|
|
|
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
|
2006-08-02 14:19:22 +00:00
|
|
|
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
2002-08-01 15:26:32 +00:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
""" Convert files to the file format generated by lyx 1.2"""
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
import re
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2006-07-27 18:30:13 +00:00
|
|
|
from parser_tools import find_token, find_token_backwards, \
|
2006-08-02 14:23:45 +00:00
|
|
|
find_tokens, find_tokens_backwards, \
|
2006-08-02 14:19:22 +00:00
|
|
|
find_beginning_of, find_end_of, find_re, \
|
2006-07-27 18:30:13 +00:00
|
|
|
is_nonempty_line, find_nonempty_line, \
|
|
|
|
get_value, check_token
|
|
|
|
|
|
|
|
####################################################################
|
|
|
|
# Private helper functions
|
|
|
|
|
|
|
|
def get_layout(line, default_layout):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Get layout, if empty return the default layout."
|
2006-08-02 15:45:44 +00:00
|
|
|
tokens = line.split()
|
2006-07-27 18:30:13 +00:00
|
|
|
if len(tokens) > 1:
|
|
|
|
return tokens[1]
|
|
|
|
return default_layout
|
|
|
|
|
|
|
|
|
|
|
|
def get_paragraph(lines, i, format):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Finds the paragraph that contains line i."
|
2006-07-27 18:30:13 +00:00
|
|
|
begin_layout = "\\layout"
|
|
|
|
|
|
|
|
while i != -1:
|
|
|
|
i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
|
|
|
|
if i == -1: return -1
|
|
|
|
if check_token(lines[i], begin_layout):
|
|
|
|
return i
|
|
|
|
i = find_beginning_of_inset(lines, i)
|
|
|
|
return -1
|
|
|
|
|
|
|
|
|
|
|
|
def get_next_paragraph(lines, i, format):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Finds the paragraph after the paragraph that contains line i."
|
2006-07-27 18:30:13 +00:00
|
|
|
tokens = ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"]
|
|
|
|
|
|
|
|
while i != -1:
|
|
|
|
i = find_tokens(lines, tokens, i)
|
|
|
|
if not check_token(lines[i], "\\begin_inset"):
|
|
|
|
return i
|
|
|
|
i = find_end_of_inset(lines, i)
|
|
|
|
return -1
|
|
|
|
|
|
|
|
|
|
|
|
def find_beginning_of_inset(lines, i):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Find beginning of inset, where lines[i] is included."
|
2006-07-27 18:30:13 +00:00
|
|
|
return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset")
|
|
|
|
|
|
|
|
|
|
|
|
def find_end_of_inset(lines, i):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Finds the matching \end_inset"
|
2006-07-27 18:30:13 +00:00
|
|
|
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
|
|
|
|
|
|
|
|
|
|
|
def find_end_of_tabular(lines, i):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Finds the matching end of tabular."
|
2006-07-27 18:30:13 +00:00
|
|
|
return find_end_of(lines, i, "<lyxtabular", "</lyxtabular")
|
|
|
|
|
|
|
|
|
|
|
|
def get_tabular_lines(lines, i):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Returns a lists of tabular lines."
|
2006-07-27 18:30:13 +00:00
|
|
|
result = []
|
|
|
|
i = i+1
|
|
|
|
j = find_end_of_tabular(lines, i)
|
|
|
|
if j == -1:
|
|
|
|
return []
|
|
|
|
|
|
|
|
while i <= j:
|
|
|
|
if check_token(lines[i], "\\begin_inset"):
|
|
|
|
i = find_end_of_inset(lines, i)+1
|
|
|
|
else:
|
|
|
|
result.append(i)
|
|
|
|
i = i+1
|
|
|
|
return result
|
|
|
|
|
|
|
|
# End of helper functions
|
|
|
|
####################################################################
|
|
|
|
|
2002-08-01 15:26:32 +00:00
|
|
|
|
|
|
|
floats = {
|
2002-08-02 19:25:14 +00:00
|
|
|
"footnote": ["\\begin_inset Foot",
|
2006-07-01 19:16:09 +00:00
|
|
|
"collapsed true"],
|
2002-08-02 19:25:14 +00:00
|
|
|
"margin": ["\\begin_inset Marginal",
|
2006-07-01 19:16:09 +00:00
|
|
|
"collapsed true"],
|
2002-08-02 19:25:14 +00:00
|
|
|
"fig": ["\\begin_inset Float figure",
|
2006-07-01 19:16:09 +00:00
|
|
|
"wide false",
|
|
|
|
"collapsed false"],
|
2002-08-02 19:25:14 +00:00
|
|
|
"tab": ["\\begin_inset Float table",
|
2006-07-01 19:16:09 +00:00
|
|
|
"wide false",
|
|
|
|
"collapsed false"],
|
2002-08-02 19:25:14 +00:00
|
|
|
"alg": ["\\begin_inset Float algorithm",
|
2006-07-01 19:16:09 +00:00
|
|
|
"wide false",
|
|
|
|
"collapsed false"],
|
2002-08-02 19:25:14 +00:00
|
|
|
"wide-fig": ["\\begin_inset Float figure",
|
2006-07-01 19:16:09 +00:00
|
|
|
"wide true",
|
|
|
|
"collapsed false"],
|
2002-08-02 19:25:14 +00:00
|
|
|
"wide-tab": ["\\begin_inset Float table",
|
2006-07-01 19:16:09 +00:00
|
|
|
"wide true",
|
|
|
|
"collapsed false"]
|
2002-08-01 15:26:32 +00:00
|
|
|
}
|
|
|
|
|
2002-08-03 14:29:12 +00:00
|
|
|
font_tokens = ["\\family", "\\series", "\\shape", "\\size", "\\emph",
|
2006-07-01 19:16:09 +00:00
|
|
|
"\\bar", "\\noun", "\\color", "\\lang", "\\latex"]
|
2002-08-03 14:29:12 +00:00
|
|
|
|
2002-09-12 12:02:54 +00:00
|
|
|
pextra_type3_rexp = re.compile(r".*\\pextra_type\s+3")
|
|
|
|
pextra_rexp = re.compile(r"\\pextra_type\s+(\S+)"+\
|
2006-07-01 19:16:09 +00:00
|
|
|
r"(\s+\\pextra_alignment\s+(\S+))?"+\
|
|
|
|
r"(\s+\\pextra_hfill\s+(\S+))?"+\
|
|
|
|
r"(\s+\\pextra_start_minipage\s+(\S+))?"+\
|
|
|
|
r"(\s+(\\pextra_widthp?)\s+(\S*))?")
|
2002-09-12 12:02:54 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-09-12 12:02:54 +00:00
|
|
|
def get_width(mo):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Get width from a regular expression. "
|
2002-09-12 15:10:23 +00:00
|
|
|
if mo.group(10):
|
2006-07-01 19:16:09 +00:00
|
|
|
if mo.group(9) == "\\pextra_widthp":
|
|
|
|
return mo.group(10)+"col%"
|
|
|
|
else:
|
|
|
|
return mo.group(10)
|
2002-09-12 12:02:54 +00:00
|
|
|
else:
|
2006-07-01 19:16:09 +00:00
|
|
|
return "100col%"
|
2002-09-12 12:02:54 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_oldfloat(document):
|
|
|
|
" Change \begin_float .. \end_float into \begin_inset Float .. \end_inset"
|
|
|
|
lines = document.body
|
2002-08-01 15:26:32 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_float", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
# There are no nested floats, so finding the end of the float is simple
|
|
|
|
j = find_token(lines, "\\end_float", i+1)
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
floattype = lines[i].split()[1]
|
2006-07-01 19:16:09 +00:00
|
|
|
if not floats.has_key(floattype):
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Error! Unknown float type " + floattype)
|
2006-07-01 19:16:09 +00:00
|
|
|
floattype = "fig"
|
|
|
|
|
|
|
|
# skip \end_deeper tokens
|
|
|
|
i2 = i+1
|
|
|
|
while check_token(lines[i2], "\\end_deeper"):
|
|
|
|
i2 = i2+1
|
|
|
|
if i2 > i+1:
|
2006-08-02 14:19:22 +00:00
|
|
|
j2 = get_next_paragraph(lines, j + 1, document.format + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
lines[j2:j2] = ["\\end_deeper "]*(i2-(i+1))
|
|
|
|
|
|
|
|
new = floats[floattype]+[""]
|
|
|
|
|
|
|
|
# Check if the float is floatingfigure
|
|
|
|
k = find_re(lines, pextra_type3_rexp, i, j)
|
|
|
|
if k != -1:
|
|
|
|
mo = pextra_rexp.search(lines[k])
|
|
|
|
width = get_width(mo)
|
|
|
|
lines[k] = re.sub(pextra_rexp, "", lines[k])
|
|
|
|
new = ["\\begin_inset Wrap figure",
|
|
|
|
'width "%s"' % width,
|
|
|
|
"collapsed false",
|
|
|
|
""]
|
|
|
|
|
|
|
|
new = new+lines[i2:j]+["\\end_inset ", ""]
|
|
|
|
|
|
|
|
# After a float, all font attributes are reseted.
|
|
|
|
# We need to output '\foo default' for every attribute foo
|
|
|
|
# whose value is not default before the float.
|
|
|
|
# The check here is not accurate, but it doesn't matter
|
|
|
|
# as extra '\foo default' commands are ignored.
|
|
|
|
# In fact, it might be safer to output '\foo default' for all
|
|
|
|
# font attributes.
|
2006-08-02 14:19:22 +00:00
|
|
|
k = get_paragraph(lines, i, document.format + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
flag = 0
|
|
|
|
for token in font_tokens:
|
|
|
|
if find_token(lines, token, k, i) != -1:
|
|
|
|
if not flag:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
flag = 1
|
|
|
|
new.append("")
|
|
|
|
if token == "\\lang":
|
2006-08-02 14:19:22 +00:00
|
|
|
new.append(token+" "+ document.language)
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
|
|
|
new.append(token+" default ")
|
|
|
|
|
|
|
|
lines[i:j+1] = new
|
|
|
|
i = i+1
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-10-08 15:37:52 +00:00
|
|
|
pextra_type2_rexp = re.compile(r".*\\pextra_type\s+[12]")
|
2002-09-12 12:02:54 +00:00
|
|
|
pextra_type2_rexp2 = re.compile(r".*(\\layout|\\pextra_type\s+2)")
|
2005-02-15 12:04:26 +00:00
|
|
|
pextra_widthp = re.compile(r"\\pextra_widthp")
|
2002-09-12 12:02:54 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_pextra(document):
|
|
|
|
" Remove pextra token."
|
|
|
|
lines = document.body
|
2002-08-01 15:26:32 +00:00
|
|
|
i = 0
|
|
|
|
flag = 0
|
|
|
|
while 1:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_re(lines, pextra_type2_rexp, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2002-10-08 15:37:52 +00:00
|
|
|
|
2005-02-15 12:04:26 +00:00
|
|
|
# Sometimes the \pextra_widthp argument comes in it own
|
|
|
|
# line. If that happens insert it back in this line.
|
|
|
|
if pextra_widthp.search(lines[i+1]):
|
|
|
|
lines[i] = lines[i] + ' ' + lines[i+1]
|
|
|
|
del lines[i+1]
|
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
mo = pextra_rexp.search(lines[i])
|
2002-10-08 15:37:52 +00:00
|
|
|
width = get_width(mo)
|
|
|
|
|
|
|
|
if mo.group(1) == "1":
|
|
|
|
# handle \pextra_type 1 (indented paragraph)
|
|
|
|
lines[i] = re.sub(pextra_rexp, "\\leftindent "+width+" ", lines[i])
|
|
|
|
i = i+1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# handle \pextra_type 2 (minipage)
|
2006-07-01 19:16:09 +00:00
|
|
|
position = mo.group(3)
|
|
|
|
hfill = mo.group(5)
|
|
|
|
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
|
|
|
|
|
|
|
start = ["\\begin_inset Minipage",
|
|
|
|
"position " + position,
|
|
|
|
"inner_position 0",
|
|
|
|
'height "0pt"',
|
|
|
|
'width "%s"' % width,
|
|
|
|
"collapsed false"
|
|
|
|
]
|
|
|
|
if flag:
|
|
|
|
flag = 0
|
|
|
|
if hfill:
|
|
|
|
start = ["","\hfill",""]+start
|
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
start = ['\\layout %s' % document.default_layout,''] + start
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
j0 = find_token_backwards(lines,"\\layout", i-1)
|
2006-08-02 14:19:22 +00:00
|
|
|
j = get_next_paragraph(lines, i, document.format + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
count = 0
|
|
|
|
while 1:
|
|
|
|
# collect more paragraphs to the minipage
|
|
|
|
count = count+1
|
|
|
|
if j == -1 or not check_token(lines[j], "\\layout"):
|
|
|
|
break
|
|
|
|
i = find_re(lines, pextra_type2_rexp2, j+1)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
mo = pextra_rexp.search(lines[i])
|
|
|
|
if not mo:
|
|
|
|
break
|
|
|
|
if mo.group(7) == "1":
|
|
|
|
flag = 1
|
|
|
|
break
|
|
|
|
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
|
|
|
j = find_tokens(lines, ["\\layout", "\\end_float"], i+1)
|
|
|
|
|
|
|
|
mid = lines[j0:j]
|
|
|
|
end = ["\\end_inset "]
|
|
|
|
|
|
|
|
lines[j0:j] = start+mid+end
|
|
|
|
i = i+1
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
def is_empty(lines):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Are all the lines empty?"
|
2002-08-02 20:34:20 +00:00
|
|
|
return filter(is_nonempty_line, lines) == []
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-10-01 08:21:47 +00:00
|
|
|
move_rexp = re.compile(r"\\(family|series|shape|size|emph|numeric|bar|noun|end_deeper)")
|
2003-07-15 14:04:35 +00:00
|
|
|
ert_rexp = re.compile(r"\\begin_inset|\\hfill|.*\\SpecialChar")
|
2002-08-02 19:25:14 +00:00
|
|
|
spchar_rexp = re.compile(r"(.*)(\\SpecialChar.*)")
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_oldert(document):
|
|
|
|
" Remove old ERT inset."
|
2006-02-13 07:48:26 +00:00
|
|
|
ert_begin = ["\\begin_inset ERT",
|
|
|
|
"status Collapsed",
|
|
|
|
"",
|
2006-08-02 14:19:22 +00:00
|
|
|
'\\layout %s' % document.default_layout,
|
2006-02-13 07:48:26 +00:00
|
|
|
""]
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-08-02 19:25:14 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = i+1
|
|
|
|
while 1:
|
2002-10-11 18:08:21 +00:00
|
|
|
# \end_inset is for ert inside a tabular cell. The other tokens
|
|
|
|
# are obvious.
|
2006-07-01 19:16:09 +00:00
|
|
|
j = find_tokens(lines, ["\\latex default", "\\layout", "\\begin_inset", "\\end_inset", "\\end_float", "\\the_end"],
|
|
|
|
j)
|
|
|
|
if check_token(lines[j], "\\begin_inset"):
|
|
|
|
j = find_end_of_inset(lines, j)+1
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
if check_token(lines[j], "\\layout"):
|
|
|
|
while j-1 >= 0 and check_token(lines[j-1], "\\begin_deeper"):
|
|
|
|
j = j-1
|
|
|
|
|
|
|
|
# We need to remove insets, special chars & font commands from ERT text
|
|
|
|
new = []
|
|
|
|
new2 = []
|
|
|
|
if check_token(lines[i], "\\layout LaTeX"):
|
2006-08-02 14:19:22 +00:00
|
|
|
new = ['\layout %s' % document.default_layout, "", ""]
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
k = i+1
|
|
|
|
while 1:
|
|
|
|
k2 = find_re(lines, ert_rexp, k, j)
|
|
|
|
inset = hfill = specialchar = 0
|
|
|
|
if k2 == -1:
|
|
|
|
k2 = j
|
|
|
|
elif check_token(lines[k2], "\\begin_inset"):
|
|
|
|
inset = 1
|
2003-07-15 14:04:35 +00:00
|
|
|
elif check_token(lines[k2], "\\hfill"):
|
|
|
|
hfill = 1
|
|
|
|
del lines[k2]
|
|
|
|
j = j-1
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
|
|
|
specialchar = 1
|
|
|
|
mo = spchar_rexp.match(lines[k2])
|
|
|
|
lines[k2] = mo.group(1)
|
|
|
|
specialchar_str = mo.group(2)
|
|
|
|
k2 = k2+1
|
|
|
|
|
|
|
|
tmp = []
|
|
|
|
for line in lines[k:k2]:
|
2002-10-01 08:21:47 +00:00
|
|
|
# Move some lines outside the ERT inset:
|
2006-07-01 19:16:09 +00:00
|
|
|
if move_rexp.match(line):
|
|
|
|
if new2 == []:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
new2 = [""]
|
|
|
|
new2.append(line)
|
|
|
|
elif not check_token(line, "\\latex"):
|
|
|
|
tmp.append(line)
|
|
|
|
|
|
|
|
if is_empty(tmp):
|
|
|
|
if filter(lambda x:x != "", tmp) != []:
|
|
|
|
if new == []:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
lines[i-1] = lines[i-1]+" "
|
|
|
|
else:
|
|
|
|
new = new+[" "]
|
|
|
|
else:
|
|
|
|
new = new+ert_begin+tmp+["\\end_inset ", ""]
|
|
|
|
|
|
|
|
if inset:
|
|
|
|
k3 = find_end_of_inset(lines, k2)
|
|
|
|
new = new+[""]+lines[k2:k3+1]+[""] # Put an empty line after \end_inset
|
|
|
|
k = k3+1
|
|
|
|
# Skip the empty line after \end_inset
|
|
|
|
if not is_nonempty_line(lines[k]):
|
|
|
|
k = k+1
|
|
|
|
new.append("")
|
2003-07-15 14:04:35 +00:00
|
|
|
elif hfill:
|
2004-12-03 18:33:19 +00:00
|
|
|
new = new + ["\\hfill", ""]
|
2003-07-15 14:04:35 +00:00
|
|
|
k = k2
|
2006-07-01 19:16:09 +00:00
|
|
|
elif specialchar:
|
|
|
|
if new == []:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
lines[i-1] = lines[i-1]+specialchar_str
|
|
|
|
new = [""]
|
|
|
|
else:
|
|
|
|
new = new+[specialchar_str, ""]
|
|
|
|
k = k2
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
new = new+new2
|
|
|
|
if not check_token(lines[j], "\\latex "):
|
|
|
|
new = new+[""]+[lines[j]]
|
|
|
|
lines[i:j+1] = new
|
|
|
|
i = i+1
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2002-10-01 08:21:47 +00:00
|
|
|
# Delete remaining "\latex xxx" tokens
|
2002-08-21 07:33:25 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\latex ", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
del lines[i]
|
2002-08-21 07:33:25 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_oldertinset(document):
|
|
|
|
" ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
|
|
|
lines = document.body
|
2002-08-06 12:10:09 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset ERT", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(lines, i)
|
|
|
|
k = find_token(lines, "\\layout", i+1)
|
2006-08-02 14:19:22 +00:00
|
|
|
l = get_paragraph(lines, i, document.format + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
if lines[k] == lines[l]: # same layout
|
|
|
|
k = k+1
|
|
|
|
new = lines[k:j]
|
|
|
|
lines[i:j+1] = new
|
|
|
|
i = i+1
|
2002-08-06 12:10:09 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def is_ert_paragraph(document, i):
|
|
|
|
" Is this a ert paragraph? "
|
|
|
|
lines = document.body
|
2006-02-13 07:48:26 +00:00
|
|
|
if not check_token(lines[i], "\\layout"):
|
|
|
|
return 0
|
2006-08-02 14:19:22 +00:00
|
|
|
if not document.is_default_layout(get_layout(lines[i], document.default_layout)):
|
2002-10-09 20:36:25 +00:00
|
|
|
return 0
|
|
|
|
|
2002-08-02 20:34:20 +00:00
|
|
|
i = find_nonempty_line(lines, i+1)
|
|
|
|
if not check_token(lines[i], "\\begin_inset ERT"):
|
2006-07-01 19:16:09 +00:00
|
|
|
return 0
|
2002-10-09 20:36:25 +00:00
|
|
|
|
2002-08-10 13:34:57 +00:00
|
|
|
j = find_end_of_inset(lines, i)
|
2002-08-02 20:34:20 +00:00
|
|
|
k = find_nonempty_line(lines, j+1)
|
|
|
|
return check_token(lines[k], "\\layout")
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def combine_ert(document):
|
|
|
|
" Combine ERT paragraphs."
|
|
|
|
lines = document.body
|
2002-08-02 20:34:20 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset ERT", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
j = get_paragraph(lines, i, document.format + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
count = 0
|
|
|
|
text = []
|
2006-08-02 14:19:22 +00:00
|
|
|
while is_ert_paragraph(document, j):
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
count = count+1
|
|
|
|
i2 = find_token(lines, "\\layout", j+1)
|
|
|
|
k = find_token(lines, "\\end_inset", i2+1)
|
|
|
|
text = text+lines[i2:k]
|
|
|
|
j = find_token(lines, "\\layout", k+1)
|
|
|
|
if j == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
if count >= 2:
|
|
|
|
j = find_token(lines, "\\layout", i+1)
|
|
|
|
lines[j:k] = text
|
2002-08-02 20:34:20 +00:00
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
i = i+1
|
2003-10-13 09:50:10 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
oldunits = ["pt", "cm", "in", "text%", "col%"]
|
|
|
|
|
|
|
|
def get_length(lines, name, start, end):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Get lenght."
|
2002-08-02 19:25:14 +00:00
|
|
|
i = find_token(lines, name, start, end)
|
|
|
|
if i == -1:
|
2006-07-01 19:16:09 +00:00
|
|
|
return ""
|
2006-08-02 15:45:44 +00:00
|
|
|
x = lines[i].split()
|
2002-08-02 19:25:14 +00:00
|
|
|
return x[2]+oldunits[int(x[1])]
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-28 13:26:39 +00:00
|
|
|
def write_attribute(x, token, value):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Write attribute."
|
2002-08-02 19:25:14 +00:00
|
|
|
if value != "":
|
2006-07-01 19:16:09 +00:00
|
|
|
x.append("\t"+token+" "+value)
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_figinset(document):
|
|
|
|
" Remove figinset."
|
|
|
|
lines = document.body
|
2002-08-02 19:25:14 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset Figure", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(lines, i)
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
if ( len(lines[i].split()) > 2 ):
|
|
|
|
lyxwidth = lines[i].split()[3]+"pt"
|
|
|
|
lyxheight = lines[i].split()[4]+"pt"
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
|
|
|
lyxwidth = ""
|
|
|
|
lyxheight = ""
|
|
|
|
|
|
|
|
filename = get_value(lines, "file", i+1, j)
|
|
|
|
|
|
|
|
width = get_length(lines, "width", i+1, j)
|
|
|
|
# what does width=5 mean ?
|
|
|
|
height = get_length(lines, "height", i+1, j)
|
|
|
|
rotateAngle = get_value(lines, "angle", i+1, j)
|
|
|
|
if width == "" and height == "":
|
|
|
|
size_type = "0"
|
|
|
|
else:
|
|
|
|
size_type = "1"
|
|
|
|
|
|
|
|
flags = get_value(lines, "flags", i+1, j)
|
|
|
|
x = int(flags)%4
|
|
|
|
if x == 1:
|
|
|
|
display = "monochrome"
|
|
|
|
elif x == 2:
|
|
|
|
display = "gray"
|
|
|
|
else:
|
|
|
|
display = "color"
|
|
|
|
|
|
|
|
subcaptionText = ""
|
|
|
|
subcaptionLine = find_token(lines, "subcaption", i+1, j)
|
|
|
|
if subcaptionLine != -1:
|
2003-04-01 14:48:13 +00:00
|
|
|
subcaptionText = lines[subcaptionLine][11:]
|
2006-07-01 19:16:09 +00:00
|
|
|
if subcaptionText != "":
|
|
|
|
subcaptionText = '"'+subcaptionText+'"'
|
|
|
|
|
|
|
|
k = find_token(lines, "subfigure", i+1,j)
|
|
|
|
if k == -1:
|
|
|
|
subcaption = 0
|
|
|
|
else:
|
|
|
|
subcaption = 1
|
|
|
|
|
|
|
|
new = ["\\begin_inset Graphics FormatVersion 1"]
|
|
|
|
write_attribute(new, "filename", filename)
|
|
|
|
write_attribute(new, "display", display)
|
|
|
|
if subcaption:
|
|
|
|
new.append("\tsubcaption")
|
|
|
|
write_attribute(new, "subcaptionText", subcaptionText)
|
|
|
|
write_attribute(new, "size_type", size_type)
|
|
|
|
write_attribute(new, "width", width)
|
|
|
|
write_attribute(new, "height", height)
|
|
|
|
if rotateAngle != "":
|
|
|
|
new.append("\trotate")
|
|
|
|
write_attribute(new, "rotateAngle", rotateAngle)
|
|
|
|
write_attribute(new, "rotateOrigin", "leftBaseline")
|
|
|
|
write_attribute(new, "lyxsize_type", "1")
|
|
|
|
write_attribute(new, "lyxwidth", lyxwidth)
|
|
|
|
write_attribute(new, "lyxheight", lyxheight)
|
|
|
|
new = new + ["\\end_inset"]
|
|
|
|
lines[i:j+1] = new
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-28 10:45:21 +00:00
|
|
|
attr_re = re.compile(r' \w*="(false|0|)"')
|
|
|
|
line_re = re.compile(r'<(features|column|row|cell)')
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def update_tabular(document):
|
|
|
|
" Convert tabular format 2 to 3."
|
2005-02-17 19:38:40 +00:00
|
|
|
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-08-28 10:45:21 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2005-02-17 19:38:40 +00:00
|
|
|
i = find_re(lines, regexp, i)
|
2002-08-28 10:45:21 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
for k in get_tabular_lines(lines, i):
|
|
|
|
if check_token(lines[k], "<lyxtabular"):
|
2006-08-02 15:45:44 +00:00
|
|
|
lines[k] = lines[k].replace('version="2"', 'version="3"')
|
2006-07-01 19:16:09 +00:00
|
|
|
elif check_token(lines[k], "<column"):
|
2006-08-02 15:45:44 +00:00
|
|
|
lines[k] = lines[k].replace('width=""', 'width="0pt"')
|
2002-08-31 11:27:01 +00:00
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
if line_re.match(lines[k]):
|
|
|
|
lines[k] = re.sub(attr_re, "", lines[k])
|
2002-08-28 10:45:21 +00:00
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
i = i+1
|
2002-08-28 10:45:21 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2004-12-03 18:33:19 +00:00
|
|
|
##
|
|
|
|
# Convert tabular format 2 to 3
|
|
|
|
#
|
|
|
|
# compatibility read for old longtable options. Now we can make any
|
|
|
|
# row part of the header/footer type we want before it was strict
|
|
|
|
# sequential from the first row down (as LaTeX does it!). So now when
|
|
|
|
# we find a header/footer line we have to go up the rows and set it
|
|
|
|
# on all preceding rows till the first or one with already a h/f option
|
|
|
|
# set. If we find a firstheader on the same line as a header or a
|
|
|
|
# lastfooter on the same line as a footer then this should be set empty.
|
|
|
|
# (Jug 20011220)
|
|
|
|
|
|
|
|
# just for compatibility with old python versions
|
|
|
|
# python >= 2.3 has real booleans (False and True)
|
|
|
|
false = 0
|
|
|
|
true = 1
|
|
|
|
|
|
|
|
class row:
|
2006-08-02 14:19:22 +00:00
|
|
|
" Simple data structure to deal with long table info."
|
2004-12-03 18:33:19 +00:00
|
|
|
def __init__(self):
|
2006-07-01 19:16:09 +00:00
|
|
|
self.endhead = false # header row
|
|
|
|
self.endfirsthead = false # first header row
|
|
|
|
self.endfoot = false # footer row
|
|
|
|
self.endlastfoot = false # last footer row
|
2004-12-03 18:33:19 +00:00
|
|
|
|
|
|
|
|
|
|
|
def haveLTFoot(row_info):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Does row has LTFoot?"
|
2004-12-03 18:33:19 +00:00
|
|
|
for row_ in row_info:
|
|
|
|
if row_.endfoot:
|
|
|
|
return true
|
|
|
|
return false
|
|
|
|
|
|
|
|
|
|
|
|
def setHeaderFooterRows(hr, fhr, fr, lfr, rows_, row_info):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Set Header/Footer rows."
|
2004-12-03 18:33:19 +00:00
|
|
|
endfirsthead_empty = false
|
|
|
|
endlastfoot_empty = false
|
|
|
|
# set header info
|
|
|
|
while (hr > 0):
|
|
|
|
hr = hr - 1
|
|
|
|
row_info[hr].endhead = true
|
|
|
|
|
|
|
|
# set firstheader info
|
|
|
|
if fhr and fhr < rows_:
|
|
|
|
if row_info[fhr].endhead:
|
|
|
|
while fhr > 0:
|
|
|
|
fhr = fhr - 1
|
|
|
|
row_info[fhr].endfirsthead = true
|
|
|
|
row_info[fhr].endhead = false
|
|
|
|
elif row_info[fhr - 1].endhead:
|
|
|
|
endfirsthead_empty = true
|
|
|
|
else:
|
|
|
|
while fhr > 0 and not row_info[fhr - 1].endhead:
|
|
|
|
fhr = fhr - 1
|
|
|
|
row_info[fhr].endfirsthead = true
|
|
|
|
|
|
|
|
# set footer info
|
|
|
|
if fr and fr < rows_:
|
|
|
|
if row_info[fr].endhead and row_info[fr - 1].endhead:
|
|
|
|
while fr > 0 and not row_info[fr - 1].endhead:
|
|
|
|
fr = fr - 1
|
|
|
|
row_info[fr].endfoot = true
|
|
|
|
row_info[fr].endhead = false
|
|
|
|
elif row_info[fr].endfirsthead and row_info[fr - 1].endfirsthead:
|
|
|
|
while fr > 0 and not row_info[fr - 1].endfirsthead:
|
|
|
|
fr = fr - 1
|
|
|
|
row_info[fr].endfoot = true
|
|
|
|
row_info[fr].endfirsthead = false
|
|
|
|
elif not row_info[fr - 1].endhead and not row_info[fr - 1].endfirsthead:
|
|
|
|
while fr > 0 and not row_info[fr - 1].endhead and not row_info[fr - 1].endfirsthead:
|
|
|
|
fr = fr - 1
|
|
|
|
row_info[fr].endfoot = true
|
|
|
|
|
|
|
|
# set lastfooter info
|
|
|
|
if lfr and lfr < rows_:
|
|
|
|
if row_info[lfr].endhead and row_info[lfr - 1].endhead:
|
|
|
|
while lfr > 0 and not row_info[lfr - 1].endhead:
|
|
|
|
lfr = lfr - 1
|
|
|
|
row_info[lfr].endlastfoot = true
|
|
|
|
row_info[lfr].endhead = false
|
|
|
|
elif row_info[lfr].endfirsthead and row_info[lfr - 1].endfirsthead:
|
|
|
|
while lfr > 0 and not row_info[lfr - 1].endfirsthead:
|
|
|
|
lfr = lfr - 1
|
|
|
|
row_info[lfr].endlastfoot = true
|
|
|
|
row_info[lfr].endfirsthead = false
|
|
|
|
elif row_info[lfr].endfoot and row_info[lfr - 1].endfoot:
|
|
|
|
while lfr > 0 and not row_info[lfr - 1].endfoot:
|
|
|
|
lfr = lfr - 1
|
|
|
|
row_info[lfr].endlastfoot = true
|
|
|
|
row_info[lfr].endfoot = false
|
|
|
|
elif not row_info[fr - 1].endhead and not row_info[fr - 1].endfirsthead and not row_info[fr - 1].endfoot:
|
|
|
|
while lfr > 0 and not row_info[lfr - 1].endhead and not row_info[lfr - 1].endfirsthead and not row_info[lfr - 1].endfoot:
|
|
|
|
lfr = lfr - 1
|
|
|
|
row_info[lfr].endlastfoot = true
|
|
|
|
elif haveLTFoot(row_info):
|
|
|
|
endlastfoot_empty = true
|
|
|
|
|
|
|
|
return endfirsthead_empty, endlastfoot_empty
|
|
|
|
|
|
|
|
|
|
|
|
def insert_attribute(lines, i, attribute):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Insert attribute in lines[i]."
|
2006-08-02 15:45:44 +00:00
|
|
|
last = lines[i].find('>')
|
2004-12-03 18:33:19 +00:00
|
|
|
lines[i] = lines[i][:last] + ' ' + attribute + lines[i][last:]
|
|
|
|
|
|
|
|
|
|
|
|
rows_re = re.compile(r'rows="(\d*)"')
|
|
|
|
longtable_re = re.compile(r'islongtable="(\w)"')
|
|
|
|
ltvalues_re = re.compile(r'endhead="(-?\d*)" endfirsthead="(-?\d*)" endfoot="(-?\d*)" endlastfoot="(-?\d*)"')
|
|
|
|
lt_features_re = re.compile(r'(endhead="-?\d*" endfirsthead="-?\d*" endfoot="-?\d*" endlastfoot="-?\d*")')
|
2006-08-02 14:19:22 +00:00
|
|
|
def update_longtables(document):
|
|
|
|
" Update longtables to new format."
|
2005-02-17 19:38:40 +00:00
|
|
|
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
2006-08-02 14:19:22 +00:00
|
|
|
body = document.body
|
2004-12-03 18:33:19 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2005-02-17 19:38:40 +00:00
|
|
|
i = find_re(body, regexp, i)
|
2004-12-03 18:33:19 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
i = find_token(body, "<lyxtabular", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
# get number of rows in the table
|
|
|
|
rows = int(rows_re.search(body[i]).group(1))
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
i = find_token(body, '<features', i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2005-06-09 09:58:08 +00:00
|
|
|
|
2004-12-03 18:33:19 +00:00
|
|
|
# is this a longtable?
|
|
|
|
longtable = longtable_re.search(body[i])
|
|
|
|
|
|
|
|
if not longtable:
|
|
|
|
# islongtable is missing add it
|
|
|
|
body[i] = body[i][:10] + 'islongtable="false" ' + body[i][10:]
|
|
|
|
|
|
|
|
if not longtable or longtable.group(1) != "true":
|
|
|
|
# remove longtable elements from features
|
|
|
|
features = lt_features_re.search(body[i])
|
|
|
|
if features:
|
2006-08-02 15:45:44 +00:00
|
|
|
body[i] = body[i].replace(features.group(1), "")
|
2004-12-03 18:33:19 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
row_info = row() * rows
|
|
|
|
res = ltvalues_re.search(body[i])
|
|
|
|
if not res:
|
|
|
|
continue
|
|
|
|
|
|
|
|
endfirsthead_empty, endlastfoot_empty = setHeaderFooterRows(res.group(1), res.group(2), res.group(3), res.group(4), rows, row_info)
|
|
|
|
|
|
|
|
if endfirsthead_empty:
|
|
|
|
insert_attribute(body, i, 'firstHeadEmpty="true"')
|
|
|
|
|
|
|
|
if endfirsthead_empty:
|
|
|
|
insert_attribute(body, i, 'lastFootEmpty="true"')
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
for j in range(rows):
|
|
|
|
i = find_token(body, '<row', i)
|
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
self.endfoot = false # footer row
|
|
|
|
self.endlastfoot = false # last footer row
|
2004-12-03 18:33:19 +00:00
|
|
|
if row_info[j].endhead:
|
|
|
|
insert_attribute(body, i, 'endhead="true"')
|
|
|
|
|
|
|
|
if row_info[j].endfirsthead:
|
|
|
|
insert_attribute(body, i, 'endfirsthead="true"')
|
|
|
|
|
|
|
|
if row_info[j].endfoot:
|
|
|
|
insert_attribute(body, i, 'endfoot="true"')
|
|
|
|
|
|
|
|
if row_info[j].endlastfoot:
|
|
|
|
insert_attribute(body, i, 'endlastfoot="true"')
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def fix_oldfloatinset(document):
|
|
|
|
" Figure insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
|
|
|
lines = document.body
|
2002-10-01 14:17:31 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset Float ", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2002-10-01 14:17:31 +00:00
|
|
|
j = find_token(lines, "collapsed", i)
|
|
|
|
if j != -1:
|
|
|
|
lines[j:j] = ["wide false"]
|
|
|
|
i = i+1
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def change_listof(document):
|
|
|
|
" Change listof insets."
|
|
|
|
lines = document.body
|
2002-10-01 14:17:31 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset LatexCommand \\listof", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2003-01-17 08:20:52 +00:00
|
|
|
type = re.search(r"listof(\w*)", lines[i]).group(1)[:-1]
|
2002-10-01 14:17:31 +00:00
|
|
|
lines[i] = "\\begin_inset FloatList "+type
|
|
|
|
i = i+1
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def change_infoinset(document):
|
|
|
|
" Change info inset."
|
|
|
|
lines = document.body
|
2003-03-11 14:47:57 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(lines, "\\begin_inset Info", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 15:45:44 +00:00
|
|
|
txt = lines[i][18:].lstrip()
|
2003-03-12 18:41:13 +00:00
|
|
|
new = ["\\begin_inset Note", "collapsed true", ""]
|
|
|
|
j = find_token(lines, "\\end_inset", i)
|
|
|
|
if j == -1:
|
|
|
|
break
|
2003-03-13 12:04:31 +00:00
|
|
|
|
|
|
|
note_lines = lines[i+1:j]
|
|
|
|
if len(txt) > 0:
|
|
|
|
note_lines = [txt]+note_lines
|
|
|
|
|
|
|
|
for line in note_lines:
|
2006-08-02 14:19:22 +00:00
|
|
|
new = new + ['\layout %s' % document.default_layout, ""]
|
2006-08-02 15:45:44 +00:00
|
|
|
tmp = line.split('\\')
|
2003-03-12 18:41:13 +00:00
|
|
|
new = new + [tmp[0]]
|
|
|
|
for x in tmp[1:]:
|
|
|
|
new = new + ["\\backslash ", x]
|
|
|
|
lines[i:j] = new
|
2003-03-11 14:47:57 +00:00
|
|
|
i = i+5
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def change_header(document):
|
|
|
|
" Update header."
|
|
|
|
lines = document.header
|
2002-08-02 19:25:14 +00:00
|
|
|
i = find_token(lines, "\\use_amsmath", 0)
|
|
|
|
if i == -1:
|
2006-07-01 19:16:09 +00:00
|
|
|
return
|
2002-08-02 19:25:14 +00:00
|
|
|
lines[i+1:i+1] = ["\\use_natbib 0",
|
2006-07-01 19:16:09 +00:00
|
|
|
"\use_numerical_citations 0"]
|
2002-08-02 19:25:14 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
supported_versions = ["1.2.%d" % i for i in range(5)] + ["1.2"]
|
2005-08-18 17:33:26 +00:00
|
|
|
convert = [[220, [change_header, change_listof, fix_oldfloatinset,
|
2005-01-05 18:52:59 +00:00
|
|
|
update_tabular, update_longtables, remove_pextra,
|
|
|
|
remove_oldfloat, remove_figinset, remove_oldertinset,
|
|
|
|
remove_oldert, combine_ert, change_infoinset]]]
|
|
|
|
revert = []
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-01 15:26:32 +00:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|