2002-08-01 15:26:32 +00:00
|
|
|
# This file is part of lyx2lyx
|
|
|
|
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
|
2006-08-02 14:19:22 +00:00
|
|
|
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
2002-08-01 15:26:32 +00:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
2011-08-25 23:10:36 +00:00
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
"""Convert files to the file format generated by lyx 1.2"""
|
2006-08-02 14:19:22 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
import re
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
from parser_tools import (
|
2024-06-18 09:25:52 +00:00
|
|
|
check_token,
|
|
|
|
find_beginning_of,
|
|
|
|
find_end_of,
|
|
|
|
find_nonempty_line,
|
|
|
|
find_re,
|
2024-06-15 09:06:06 +00:00
|
|
|
find_token,
|
|
|
|
find_token_backwards,
|
|
|
|
find_tokens,
|
|
|
|
find_tokens_backwards,
|
|
|
|
get_value,
|
2024-06-18 09:25:52 +00:00
|
|
|
is_nonempty_line,
|
2024-06-15 09:06:06 +00:00
|
|
|
)
|
2006-07-27 18:30:13 +00:00
|
|
|
|
|
|
|
####################################################################
|
|
|
|
# Private helper functions
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
2006-07-27 18:30:13 +00:00
|
|
|
def get_layout(line, default_layout):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Get layout, if empty return the default layout."
|
2006-08-02 15:45:44 +00:00
|
|
|
tokens = line.split()
|
2006-07-27 18:30:13 +00:00
|
|
|
if len(tokens) > 1:
|
|
|
|
return tokens[1]
|
|
|
|
return default_layout
|
|
|
|
|
|
|
|
|
|
|
|
def get_paragraph(lines, i, format):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Finds the paragraph that contains line i."
|
2006-07-27 18:30:13 +00:00
|
|
|
begin_layout = "\\layout"
|
|
|
|
|
|
|
|
while i != -1:
|
|
|
|
i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
|
2024-06-15 09:06:06 +00:00
|
|
|
if i == -1:
|
|
|
|
return -1
|
2006-07-27 18:30:13 +00:00
|
|
|
if check_token(lines[i], begin_layout):
|
|
|
|
return i
|
|
|
|
i = find_beginning_of_inset(lines, i)
|
|
|
|
return -1
|
|
|
|
|
|
|
|
|
|
|
|
def get_next_paragraph(lines, i, format):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Finds the paragraph after the paragraph that contains line i."
|
2006-07-27 18:30:13 +00:00
|
|
|
tokens = ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"]
|
|
|
|
|
|
|
|
while i != -1:
|
|
|
|
i = find_tokens(lines, tokens, i)
|
|
|
|
if not check_token(lines[i], "\\begin_inset"):
|
|
|
|
return i
|
|
|
|
i = find_end_of_inset(lines, i)
|
|
|
|
return -1
|
|
|
|
|
|
|
|
|
|
|
|
def find_beginning_of_inset(lines, i):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Find beginning of inset, where lines[i] is included."
|
2006-07-27 18:30:13 +00:00
|
|
|
return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset")
|
|
|
|
|
|
|
|
|
|
|
|
def find_end_of_inset(lines, i):
|
2024-06-15 09:06:06 +00:00
|
|
|
r"Finds the matching \end_inset"
|
2006-07-27 18:30:13 +00:00
|
|
|
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
|
|
|
|
|
|
|
|
|
|
|
def find_end_of_tabular(lines, i):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Finds the matching end of tabular."
|
2006-07-27 18:30:13 +00:00
|
|
|
return find_end_of(lines, i, "<lyxtabular", "</lyxtabular")
|
|
|
|
|
|
|
|
|
|
|
|
def get_tabular_lines(lines, i):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Returns a lists of tabular lines."
|
2006-07-27 18:30:13 +00:00
|
|
|
result = []
|
2024-06-15 09:06:06 +00:00
|
|
|
i = i + 1
|
2006-07-27 18:30:13 +00:00
|
|
|
j = find_end_of_tabular(lines, i)
|
|
|
|
if j == -1:
|
|
|
|
return []
|
|
|
|
|
|
|
|
while i <= j:
|
|
|
|
if check_token(lines[i], "\\begin_inset"):
|
2024-06-15 09:06:06 +00:00
|
|
|
i = find_end_of_inset(lines, i) + 1
|
2006-07-27 18:30:13 +00:00
|
|
|
else:
|
|
|
|
result.append(i)
|
2024-06-15 09:06:06 +00:00
|
|
|
i = i + 1
|
2006-07-27 18:30:13 +00:00
|
|
|
return result
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
2006-07-27 18:30:13 +00:00
|
|
|
# End of helper functions
|
|
|
|
####################################################################
|
|
|
|
|
2002-08-01 15:26:32 +00:00
|
|
|
|
|
|
|
floats = {
|
2024-06-15 09:06:06 +00:00
|
|
|
"footnote": ["\\begin_inset Foot", "collapsed true"],
|
|
|
|
"margin": ["\\begin_inset Marginal", "collapsed true"],
|
|
|
|
"fig": ["\\begin_inset Float figure", "wide false", "collapsed false"],
|
|
|
|
"tab": ["\\begin_inset Float table", "wide false", "collapsed false"],
|
|
|
|
"alg": ["\\begin_inset Float algorithm", "wide false", "collapsed false"],
|
|
|
|
"wide-fig": ["\\begin_inset Float figure", "wide true", "collapsed false"],
|
|
|
|
"wide-tab": ["\\begin_inset Float table", "wide true", "collapsed false"],
|
2002-08-01 15:26:32 +00:00
|
|
|
}
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
font_tokens = [
|
|
|
|
"\\family",
|
|
|
|
"\\series",
|
|
|
|
"\\shape",
|
|
|
|
"\\size",
|
|
|
|
"\\emph",
|
|
|
|
"\\bar",
|
|
|
|
"\\noun",
|
|
|
|
"\\color",
|
|
|
|
"\\lang",
|
|
|
|
"\\latex",
|
|
|
|
]
|
2002-08-03 14:29:12 +00:00
|
|
|
|
2002-09-12 12:02:54 +00:00
|
|
|
pextra_type3_rexp = re.compile(r".*\\pextra_type\s+3")
|
2024-06-15 09:06:06 +00:00
|
|
|
pextra_rexp = re.compile(
|
|
|
|
r"\\pextra_type\s+(\S+)"
|
|
|
|
+ r"(\s+\\pextra_alignment\s+(\S+))?"
|
|
|
|
+ r"(\s+\\pextra_hfill\s+(\S+))?"
|
|
|
|
+ r"(\s+\\pextra_start_minipage\s+(\S+))?"
|
|
|
|
+ r"(\s+(\\pextra_widthp?)\s+(\S*))?"
|
|
|
|
)
|
2002-09-12 12:02:54 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-09-12 12:02:54 +00:00
|
|
|
def get_width(mo):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Get width from a regular expression."
|
2002-09-12 15:10:23 +00:00
|
|
|
if mo.group(10):
|
2006-07-01 19:16:09 +00:00
|
|
|
if mo.group(9) == "\\pextra_widthp":
|
2024-06-15 09:06:06 +00:00
|
|
|
return mo.group(10) + "col%"
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
|
|
|
return mo.group(10)
|
2002-09-12 12:02:54 +00:00
|
|
|
else:
|
2006-07-01 19:16:09 +00:00
|
|
|
return "100col%"
|
2002-09-12 12:02:54 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_oldfloat(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
r"Change \begin_float .. \end_float into \begin_inset Float .. \end_inset"
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-08-01 15:26:32 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_float", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
# There are no nested floats, so finding the end of the float is simple
|
2024-06-15 09:06:06 +00:00
|
|
|
j = find_token(lines, "\\end_float", i + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
floattype = lines[i].split()[1]
|
2015-03-11 12:04:46 +00:00
|
|
|
if floattype not in floats:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Error! Unknown float type " + floattype)
|
2006-07-01 19:16:09 +00:00
|
|
|
floattype = "fig"
|
|
|
|
|
|
|
|
# skip \end_deeper tokens
|
2024-06-15 09:06:06 +00:00
|
|
|
i2 = i + 1
|
2006-07-01 19:16:09 +00:00
|
|
|
while check_token(lines[i2], "\\end_deeper"):
|
2024-06-15 09:06:06 +00:00
|
|
|
i2 = i2 + 1
|
|
|
|
if i2 > i + 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
j2 = get_next_paragraph(lines, j + 1, document.format + 1)
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[j2:j2] = ["\\end_deeper "] * (i2 - (i + 1))
|
2006-07-01 19:16:09 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
new = floats[floattype] + [""]
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
# Check if the float is floatingfigure
|
|
|
|
k = find_re(lines, pextra_type3_rexp, i, j)
|
|
|
|
if k != -1:
|
|
|
|
mo = pextra_rexp.search(lines[k])
|
|
|
|
width = get_width(mo)
|
|
|
|
lines[k] = re.sub(pextra_rexp, "", lines[k])
|
2024-06-15 09:06:06 +00:00
|
|
|
new = [
|
|
|
|
"\\begin_inset Wrap figure",
|
|
|
|
'width "%s"' % width,
|
|
|
|
"collapsed false",
|
|
|
|
"",
|
|
|
|
]
|
2006-07-01 19:16:09 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
new = new + lines[i2:j] + ["\\end_inset ", ""]
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
# After a float, all font attributes are reseted.
|
|
|
|
# We need to output '\foo default' for every attribute foo
|
|
|
|
# whose value is not default before the float.
|
|
|
|
# The check here is not accurate, but it doesn't matter
|
|
|
|
# as extra '\foo default' commands are ignored.
|
|
|
|
# In fact, it might be safer to output '\foo default' for all
|
|
|
|
# font attributes.
|
2006-08-02 14:19:22 +00:00
|
|
|
k = get_paragraph(lines, i, document.format + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
flag = 0
|
|
|
|
for token in font_tokens:
|
|
|
|
if find_token(lines, token, k, i) != -1:
|
|
|
|
if not flag:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
flag = 1
|
|
|
|
new.append("")
|
|
|
|
if token == "\\lang":
|
2024-06-15 09:06:06 +00:00
|
|
|
new.append(token + " " + document.language)
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
new.append(token + " default ")
|
2006-07-01 19:16:09 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[i : j + 1] = new
|
|
|
|
i = i + 1
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-10-08 15:37:52 +00:00
|
|
|
pextra_type2_rexp = re.compile(r".*\\pextra_type\s+[12]")
|
2002-09-12 12:02:54 +00:00
|
|
|
pextra_type2_rexp2 = re.compile(r".*(\\layout|\\pextra_type\s+2)")
|
2005-02-15 12:04:26 +00:00
|
|
|
pextra_widthp = re.compile(r"\\pextra_widthp")
|
2002-09-12 12:02:54 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_pextra(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Remove pextra token."
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-08-01 15:26:32 +00:00
|
|
|
i = 0
|
|
|
|
flag = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_re(lines, pextra_type2_rexp, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2002-10-08 15:37:52 +00:00
|
|
|
|
2005-02-15 12:04:26 +00:00
|
|
|
# Sometimes the \pextra_widthp argument comes in it own
|
|
|
|
# line. If that happens insert it back in this line.
|
2024-06-15 09:06:06 +00:00
|
|
|
if pextra_widthp.search(lines[i + 1]):
|
|
|
|
lines[i] = lines[i] + " " + lines[i + 1]
|
|
|
|
del lines[i + 1]
|
2005-02-15 12:04:26 +00:00
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
mo = pextra_rexp.search(lines[i])
|
2002-10-08 15:37:52 +00:00
|
|
|
width = get_width(mo)
|
|
|
|
|
|
|
|
if mo.group(1) == "1":
|
|
|
|
# handle \pextra_type 1 (indented paragraph)
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[i] = re.sub(pextra_rexp, "\\leftindent " + width + " ", lines[i])
|
|
|
|
i = i + 1
|
2002-10-08 15:37:52 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# handle \pextra_type 2 (minipage)
|
2006-07-01 19:16:09 +00:00
|
|
|
position = mo.group(3)
|
|
|
|
hfill = mo.group(5)
|
|
|
|
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
start = [
|
|
|
|
"\\begin_inset Minipage",
|
|
|
|
"position " + position,
|
|
|
|
"inner_position 0",
|
|
|
|
'height "0pt"',
|
|
|
|
'width "%s"' % width,
|
|
|
|
"collapsed false",
|
|
|
|
]
|
2006-07-01 19:16:09 +00:00
|
|
|
if flag:
|
|
|
|
flag = 0
|
|
|
|
if hfill:
|
2024-06-15 09:06:06 +00:00
|
|
|
start = ["", r"\hfill", ""] + start
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
start = ["\\layout %s" % document.default_layout, ""] + start
|
2006-07-01 19:16:09 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
j0 = find_token_backwards(lines, "\\layout", i - 1)
|
2006-08-02 14:19:22 +00:00
|
|
|
j = get_next_paragraph(lines, i, document.format + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
count = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
# collect more paragraphs to the minipage
|
2024-06-15 09:06:06 +00:00
|
|
|
count = count + 1
|
2006-07-01 19:16:09 +00:00
|
|
|
if j == -1 or not check_token(lines[j], "\\layout"):
|
|
|
|
break
|
2024-06-15 09:06:06 +00:00
|
|
|
i = find_re(lines, pextra_type2_rexp2, j + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
mo = pextra_rexp.search(lines[i])
|
|
|
|
if not mo:
|
|
|
|
break
|
|
|
|
if mo.group(7) == "1":
|
|
|
|
flag = 1
|
|
|
|
break
|
|
|
|
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
2024-06-15 09:06:06 +00:00
|
|
|
j = find_tokens(lines, ["\\layout", "\\end_float"], i + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
mid = lines[j0:j]
|
|
|
|
end = ["\\end_inset "]
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[j0:j] = start + mid + end
|
|
|
|
i = i + 1
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
def is_empty(lines):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Are all the lines empty?"
|
2015-03-11 12:04:46 +00:00
|
|
|
return list(filter(is_nonempty_line, lines)) == []
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
move_rexp = re.compile(r"\\(family|series|shape|size|emph|numeric|bar|noun|end_deeper)")
|
2003-07-15 14:04:35 +00:00
|
|
|
ert_rexp = re.compile(r"\\begin_inset|\\hfill|.*\\SpecialChar")
|
2002-08-02 19:25:14 +00:00
|
|
|
spchar_rexp = re.compile(r"(.*)(\\SpecialChar.*)")
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_oldert(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Remove old ERT inset."
|
|
|
|
ert_begin = [
|
|
|
|
"\\begin_inset ERT",
|
|
|
|
"status Collapsed",
|
|
|
|
"",
|
|
|
|
"\\layout %s" % document.default_layout,
|
|
|
|
"",
|
|
|
|
]
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-08-02 19:25:14 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2024-06-15 09:06:06 +00:00
|
|
|
j = i + 1
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2002-10-11 18:08:21 +00:00
|
|
|
# \end_inset is for ert inside a tabular cell. The other tokens
|
|
|
|
# are obvious.
|
2024-06-15 09:06:06 +00:00
|
|
|
j = find_tokens(
|
|
|
|
lines,
|
|
|
|
[
|
|
|
|
"\\latex default",
|
|
|
|
"\\layout",
|
|
|
|
"\\begin_inset",
|
|
|
|
"\\end_inset",
|
|
|
|
"\\end_float",
|
|
|
|
"\\the_end",
|
|
|
|
],
|
|
|
|
j,
|
|
|
|
)
|
2006-07-01 19:16:09 +00:00
|
|
|
if check_token(lines[j], "\\begin_inset"):
|
2024-06-15 09:06:06 +00:00
|
|
|
j = find_end_of_inset(lines, j) + 1
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
if check_token(lines[j], "\\layout"):
|
2024-06-15 09:06:06 +00:00
|
|
|
while j - 1 >= 0 and check_token(lines[j - 1], "\\begin_deeper"):
|
|
|
|
j = j - 1
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
# We need to remove insets, special chars & font commands from ERT text
|
|
|
|
new = []
|
|
|
|
new2 = []
|
|
|
|
if check_token(lines[i], "\\layout LaTeX"):
|
2024-06-15 09:06:06 +00:00
|
|
|
new = [r"\layout %s" % document.default_layout, "", ""]
|
2006-07-01 19:16:09 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
k = i + 1
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
k2 = find_re(lines, ert_rexp, k, j)
|
|
|
|
inset = hfill = specialchar = 0
|
|
|
|
if k2 == -1:
|
|
|
|
k2 = j
|
|
|
|
elif check_token(lines[k2], "\\begin_inset"):
|
|
|
|
inset = 1
|
2003-07-15 14:04:35 +00:00
|
|
|
elif check_token(lines[k2], "\\hfill"):
|
|
|
|
hfill = 1
|
|
|
|
del lines[k2]
|
2024-06-15 09:06:06 +00:00
|
|
|
j = j - 1
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
|
|
|
specialchar = 1
|
|
|
|
mo = spchar_rexp.match(lines[k2])
|
|
|
|
lines[k2] = mo.group(1)
|
|
|
|
specialchar_str = mo.group(2)
|
2024-06-15 09:06:06 +00:00
|
|
|
k2 = k2 + 1
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
tmp = []
|
|
|
|
for line in lines[k:k2]:
|
2002-10-01 08:21:47 +00:00
|
|
|
# Move some lines outside the ERT inset:
|
2006-07-01 19:16:09 +00:00
|
|
|
if move_rexp.match(line):
|
|
|
|
if new2 == []:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
new2 = [""]
|
|
|
|
new2.append(line)
|
|
|
|
elif not check_token(line, "\\latex"):
|
|
|
|
tmp.append(line)
|
|
|
|
|
|
|
|
if is_empty(tmp):
|
2015-03-11 12:04:46 +00:00
|
|
|
if [x for x in tmp if x != ""] != []:
|
2006-07-01 19:16:09 +00:00
|
|
|
if new == []:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[i - 1] = lines[i - 1] + " "
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
new = new + [" "]
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
new = new + ert_begin + tmp + ["\\end_inset ", ""]
|
2006-07-01 19:16:09 +00:00
|
|
|
|
|
|
|
if inset:
|
|
|
|
k3 = find_end_of_inset(lines, k2)
|
2024-06-15 09:06:06 +00:00
|
|
|
new = (
|
|
|
|
new + [""] + lines[k2 : k3 + 1] + [""]
|
|
|
|
) # Put an empty line after \end_inset
|
|
|
|
k = k3 + 1
|
2006-07-01 19:16:09 +00:00
|
|
|
# Skip the empty line after \end_inset
|
|
|
|
if not is_nonempty_line(lines[k]):
|
2024-06-15 09:06:06 +00:00
|
|
|
k = k + 1
|
2006-07-01 19:16:09 +00:00
|
|
|
new.append("")
|
2003-07-15 14:04:35 +00:00
|
|
|
elif hfill:
|
2004-12-03 18:33:19 +00:00
|
|
|
new = new + ["\\hfill", ""]
|
2003-07-15 14:04:35 +00:00
|
|
|
k = k2
|
2006-07-01 19:16:09 +00:00
|
|
|
elif specialchar:
|
|
|
|
if new == []:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[i - 1] = lines[i - 1] + specialchar_str
|
2006-07-01 19:16:09 +00:00
|
|
|
new = [""]
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
new = new + [specialchar_str, ""]
|
2006-07-01 19:16:09 +00:00
|
|
|
k = k2
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
new = new + new2
|
2006-07-01 19:16:09 +00:00
|
|
|
if not check_token(lines[j], "\\latex "):
|
2024-06-15 09:06:06 +00:00
|
|
|
new = new + [""] + [lines[j]]
|
|
|
|
lines[i : j + 1] = new
|
|
|
|
i = i + 1
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2002-10-01 08:21:47 +00:00
|
|
|
# Delete remaining "\latex xxx" tokens
|
2002-08-21 07:33:25 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\latex ", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
del lines[i]
|
2002-08-21 07:33:25 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_oldertinset(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-08-06 12:10:09 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset ERT", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(lines, i)
|
2024-06-15 09:06:06 +00:00
|
|
|
k = find_token(lines, "\\layout", i + 1)
|
2006-08-02 14:19:22 +00:00
|
|
|
l = get_paragraph(lines, i, document.format + 1)
|
2024-06-15 09:06:06 +00:00
|
|
|
if lines[k] == lines[l]: # same layout
|
|
|
|
k = k + 1
|
2006-07-01 19:16:09 +00:00
|
|
|
new = lines[k:j]
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[i : j + 1] = new
|
|
|
|
i = i + 1
|
2002-08-06 12:10:09 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def is_ert_paragraph(document, i):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Is this a ert paragraph?"
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2006-02-13 07:48:26 +00:00
|
|
|
if not check_token(lines[i], "\\layout"):
|
|
|
|
return 0
|
2006-08-02 14:19:22 +00:00
|
|
|
if not document.is_default_layout(get_layout(lines[i], document.default_layout)):
|
2002-10-09 20:36:25 +00:00
|
|
|
return 0
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
i = find_nonempty_line(lines, i + 1)
|
2002-08-02 20:34:20 +00:00
|
|
|
if not check_token(lines[i], "\\begin_inset ERT"):
|
2006-07-01 19:16:09 +00:00
|
|
|
return 0
|
2002-10-09 20:36:25 +00:00
|
|
|
|
2002-08-10 13:34:57 +00:00
|
|
|
j = find_end_of_inset(lines, i)
|
2024-06-15 09:06:06 +00:00
|
|
|
k = find_nonempty_line(lines, j + 1)
|
2002-08-02 20:34:20 +00:00
|
|
|
return check_token(lines[k], "\\layout")
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def combine_ert(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Combine ERT paragraphs."
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-08-02 20:34:20 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset ERT", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
j = get_paragraph(lines, i, document.format + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
count = 0
|
|
|
|
text = []
|
2006-08-02 14:19:22 +00:00
|
|
|
while is_ert_paragraph(document, j):
|
2024-06-15 09:06:06 +00:00
|
|
|
count = count + 1
|
|
|
|
i2 = find_token(lines, "\\layout", j + 1)
|
|
|
|
k = find_token(lines, "\\end_inset", i2 + 1)
|
|
|
|
text = text + lines[i2:k]
|
|
|
|
j = find_token(lines, "\\layout", k + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
if j == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
if count >= 2:
|
2024-06-15 09:06:06 +00:00
|
|
|
j = find_token(lines, "\\layout", i + 1)
|
2006-07-01 19:16:09 +00:00
|
|
|
lines[j:k] = text
|
2002-08-02 20:34:20 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
i = i + 1
|
2003-10-13 09:50:10 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
oldunits = ["pt", "cm", "in", "text%", "col%"]
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
def get_length(lines, name, start, end):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Get lenght."
|
2002-08-02 19:25:14 +00:00
|
|
|
i = find_token(lines, name, start, end)
|
|
|
|
if i == -1:
|
2006-07-01 19:16:09 +00:00
|
|
|
return ""
|
2006-08-02 15:45:44 +00:00
|
|
|
x = lines[i].split()
|
2024-06-15 09:06:06 +00:00
|
|
|
return x[2] + oldunits[int(x[1])]
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-28 13:26:39 +00:00
|
|
|
def write_attribute(x, token, value):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Write attribute."
|
2002-08-02 19:25:14 +00:00
|
|
|
if value != "":
|
2024-06-15 09:06:06 +00:00
|
|
|
x.append("\t" + token + " " + value)
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_figinset(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Remove figinset."
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-08-02 19:25:14 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset Figure", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(lines, i)
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
if len(lines[i].split()) > 2:
|
|
|
|
lyxwidth = lines[i].split()[3] + "pt"
|
|
|
|
lyxheight = lines[i].split()[4] + "pt"
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
|
|
|
lyxwidth = ""
|
|
|
|
lyxheight = ""
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
filename = get_value(lines, "file", i + 1, j)
|
2006-07-01 19:16:09 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
width = get_length(lines, "width", i + 1, j)
|
2006-07-01 19:16:09 +00:00
|
|
|
# what does width=5 mean ?
|
2024-06-15 09:06:06 +00:00
|
|
|
height = get_length(lines, "height", i + 1, j)
|
|
|
|
rotateAngle = get_value(lines, "angle", i + 1, j)
|
2006-07-01 19:16:09 +00:00
|
|
|
if width == "" and height == "":
|
|
|
|
size_type = "0"
|
|
|
|
else:
|
|
|
|
size_type = "1"
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
flags = get_value(lines, "flags", i + 1, j)
|
|
|
|
x = int(flags) % 4
|
2006-07-01 19:16:09 +00:00
|
|
|
if x == 1:
|
|
|
|
display = "monochrome"
|
|
|
|
elif x == 2:
|
|
|
|
display = "gray"
|
|
|
|
else:
|
|
|
|
display = "color"
|
|
|
|
|
|
|
|
subcaptionText = ""
|
2024-06-15 09:06:06 +00:00
|
|
|
subcaptionLine = find_token(lines, "subcaption", i + 1, j)
|
2006-07-01 19:16:09 +00:00
|
|
|
if subcaptionLine != -1:
|
2003-04-01 14:48:13 +00:00
|
|
|
subcaptionText = lines[subcaptionLine][11:]
|
2006-07-01 19:16:09 +00:00
|
|
|
if subcaptionText != "":
|
2024-06-15 09:06:06 +00:00
|
|
|
subcaptionText = '"' + subcaptionText + '"'
|
2006-07-01 19:16:09 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
k = find_token(lines, "subfigure", i + 1, j)
|
2006-07-01 19:16:09 +00:00
|
|
|
if k == -1:
|
|
|
|
subcaption = 0
|
|
|
|
else:
|
|
|
|
subcaption = 1
|
|
|
|
|
|
|
|
new = ["\\begin_inset Graphics FormatVersion 1"]
|
|
|
|
write_attribute(new, "filename", filename)
|
|
|
|
write_attribute(new, "display", display)
|
|
|
|
if subcaption:
|
|
|
|
new.append("\tsubcaption")
|
|
|
|
write_attribute(new, "subcaptionText", subcaptionText)
|
|
|
|
write_attribute(new, "size_type", size_type)
|
|
|
|
write_attribute(new, "width", width)
|
|
|
|
write_attribute(new, "height", height)
|
|
|
|
if rotateAngle != "":
|
|
|
|
new.append("\trotate")
|
|
|
|
write_attribute(new, "rotateAngle", rotateAngle)
|
|
|
|
write_attribute(new, "rotateOrigin", "leftBaseline")
|
|
|
|
write_attribute(new, "lyxsize_type", "1")
|
|
|
|
write_attribute(new, "lyxwidth", lyxwidth)
|
|
|
|
write_attribute(new, "lyxheight", lyxheight)
|
|
|
|
new = new + ["\\end_inset"]
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[i : j + 1] = new
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-28 10:45:21 +00:00
|
|
|
attr_re = re.compile(r' \w*="(false|0|)"')
|
2024-06-15 09:06:06 +00:00
|
|
|
line_re = re.compile(r"<(features|column|row|cell)")
|
|
|
|
|
2002-08-28 10:45:21 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def update_tabular(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Convert tabular format 2 to 3."
|
|
|
|
regexp = re.compile(r"^\\begin_inset\s+Tabular")
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-08-28 10:45:21 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2005-02-17 19:38:40 +00:00
|
|
|
i = find_re(lines, regexp, i)
|
2002-08-28 10:45:21 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
for k in get_tabular_lines(lines, i):
|
|
|
|
if check_token(lines[k], "<lyxtabular"):
|
2006-08-02 15:45:44 +00:00
|
|
|
lines[k] = lines[k].replace('version="2"', 'version="3"')
|
2006-07-01 19:16:09 +00:00
|
|
|
elif check_token(lines[k], "<column"):
|
2006-08-02 15:45:44 +00:00
|
|
|
lines[k] = lines[k].replace('width=""', 'width="0pt"')
|
2002-08-31 11:27:01 +00:00
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
if line_re.match(lines[k]):
|
|
|
|
lines[k] = re.sub(attr_re, "", lines[k])
|
2002-08-28 10:45:21 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
i = i + 1
|
2002-08-28 10:45:21 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2004-12-03 18:33:19 +00:00
|
|
|
##
|
|
|
|
# Convert tabular format 2 to 3
|
|
|
|
#
|
|
|
|
# compatibility read for old longtable options. Now we can make any
|
|
|
|
# row part of the header/footer type we want before it was strict
|
|
|
|
# sequential from the first row down (as LaTeX does it!). So now when
|
|
|
|
# we find a header/footer line we have to go up the rows and set it
|
|
|
|
# on all preceding rows till the first or one with already a h/f option
|
|
|
|
# set. If we find a firstheader on the same line as a header or a
|
|
|
|
# lastfooter on the same line as a footer then this should be set empty.
|
|
|
|
# (Jug 20011220)
|
|
|
|
|
|
|
|
# just for compatibility with old python versions
|
|
|
|
# python >= 2.3 has real booleans (False and True)
|
|
|
|
false = 0
|
|
|
|
true = 1
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
2004-12-03 18:33:19 +00:00
|
|
|
class row:
|
2024-06-15 09:06:06 +00:00
|
|
|
"Simple data structure to deal with long table info."
|
|
|
|
|
2004-12-03 18:33:19 +00:00
|
|
|
def __init__(self):
|
2024-06-15 09:06:06 +00:00
|
|
|
self.endhead = false # header row
|
|
|
|
self.endfirsthead = false # first header row
|
|
|
|
self.endfoot = false # footer row
|
|
|
|
self.endlastfoot = false # last footer row
|
2004-12-03 18:33:19 +00:00
|
|
|
|
|
|
|
|
|
|
|
def haveLTFoot(row_info):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Does row has LTFoot?"
|
2004-12-03 18:33:19 +00:00
|
|
|
for row_ in row_info:
|
|
|
|
if row_.endfoot:
|
|
|
|
return true
|
|
|
|
return false
|
|
|
|
|
|
|
|
|
|
|
|
def setHeaderFooterRows(hr, fhr, fr, lfr, rows_, row_info):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Set Header/Footer rows."
|
2004-12-03 18:33:19 +00:00
|
|
|
endfirsthead_empty = false
|
|
|
|
endlastfoot_empty = false
|
|
|
|
# set header info
|
2024-06-15 09:06:06 +00:00
|
|
|
while hr > 0:
|
2004-12-03 18:33:19 +00:00
|
|
|
hr = hr - 1
|
|
|
|
row_info[hr].endhead = true
|
|
|
|
|
|
|
|
# set firstheader info
|
|
|
|
if fhr and fhr < rows_:
|
|
|
|
if row_info[fhr].endhead:
|
|
|
|
while fhr > 0:
|
|
|
|
fhr = fhr - 1
|
|
|
|
row_info[fhr].endfirsthead = true
|
|
|
|
row_info[fhr].endhead = false
|
|
|
|
elif row_info[fhr - 1].endhead:
|
|
|
|
endfirsthead_empty = true
|
|
|
|
else:
|
|
|
|
while fhr > 0 and not row_info[fhr - 1].endhead:
|
|
|
|
fhr = fhr - 1
|
|
|
|
row_info[fhr].endfirsthead = true
|
|
|
|
|
|
|
|
# set footer info
|
|
|
|
if fr and fr < rows_:
|
|
|
|
if row_info[fr].endhead and row_info[fr - 1].endhead:
|
|
|
|
while fr > 0 and not row_info[fr - 1].endhead:
|
|
|
|
fr = fr - 1
|
|
|
|
row_info[fr].endfoot = true
|
|
|
|
row_info[fr].endhead = false
|
|
|
|
elif row_info[fr].endfirsthead and row_info[fr - 1].endfirsthead:
|
|
|
|
while fr > 0 and not row_info[fr - 1].endfirsthead:
|
|
|
|
fr = fr - 1
|
|
|
|
row_info[fr].endfoot = true
|
|
|
|
row_info[fr].endfirsthead = false
|
|
|
|
elif not row_info[fr - 1].endhead and not row_info[fr - 1].endfirsthead:
|
|
|
|
while fr > 0 and not row_info[fr - 1].endhead and not row_info[fr - 1].endfirsthead:
|
|
|
|
fr = fr - 1
|
|
|
|
row_info[fr].endfoot = true
|
|
|
|
|
|
|
|
# set lastfooter info
|
|
|
|
if lfr and lfr < rows_:
|
|
|
|
if row_info[lfr].endhead and row_info[lfr - 1].endhead:
|
|
|
|
while lfr > 0 and not row_info[lfr - 1].endhead:
|
|
|
|
lfr = lfr - 1
|
|
|
|
row_info[lfr].endlastfoot = true
|
|
|
|
row_info[lfr].endhead = false
|
|
|
|
elif row_info[lfr].endfirsthead and row_info[lfr - 1].endfirsthead:
|
|
|
|
while lfr > 0 and not row_info[lfr - 1].endfirsthead:
|
|
|
|
lfr = lfr - 1
|
|
|
|
row_info[lfr].endlastfoot = true
|
|
|
|
row_info[lfr].endfirsthead = false
|
|
|
|
elif row_info[lfr].endfoot and row_info[lfr - 1].endfoot:
|
|
|
|
while lfr > 0 and not row_info[lfr - 1].endfoot:
|
|
|
|
lfr = lfr - 1
|
|
|
|
row_info[lfr].endlastfoot = true
|
|
|
|
row_info[lfr].endfoot = false
|
2024-06-15 09:06:06 +00:00
|
|
|
elif (
|
|
|
|
not row_info[fr - 1].endhead
|
|
|
|
and not row_info[fr - 1].endfirsthead
|
|
|
|
and not row_info[fr - 1].endfoot
|
|
|
|
):
|
|
|
|
while (
|
|
|
|
lfr > 0
|
|
|
|
and not row_info[lfr - 1].endhead
|
|
|
|
and not row_info[lfr - 1].endfirsthead
|
|
|
|
and not row_info[lfr - 1].endfoot
|
|
|
|
):
|
2004-12-03 18:33:19 +00:00
|
|
|
lfr = lfr - 1
|
|
|
|
row_info[lfr].endlastfoot = true
|
|
|
|
elif haveLTFoot(row_info):
|
|
|
|
endlastfoot_empty = true
|
|
|
|
|
|
|
|
return endfirsthead_empty, endlastfoot_empty
|
|
|
|
|
|
|
|
|
|
|
|
def insert_attribute(lines, i, attribute):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Insert attribute in lines[i]."
|
|
|
|
last = lines[i].find(">")
|
|
|
|
lines[i] = lines[i][:last] + " " + attribute + lines[i][last:]
|
2004-12-03 18:33:19 +00:00
|
|
|
|
|
|
|
|
|
|
|
rows_re = re.compile(r'rows="(\d*)"')
|
|
|
|
longtable_re = re.compile(r'islongtable="(\w)"')
|
2024-06-15 09:06:06 +00:00
|
|
|
ltvalues_re = re.compile(
|
|
|
|
r'endhead="(-?\d*)" endfirsthead="(-?\d*)" endfoot="(-?\d*)" endlastfoot="(-?\d*)"'
|
|
|
|
)
|
|
|
|
lt_features_re = re.compile(
|
|
|
|
r'(endhead="-?\d*" endfirsthead="-?\d*" endfoot="-?\d*" endlastfoot="-?\d*")'
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def update_longtables(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Update longtables to new format."
|
|
|
|
regexp = re.compile(r"^\\begin_inset\s+Tabular")
|
2006-08-02 14:19:22 +00:00
|
|
|
body = document.body
|
2004-12-03 18:33:19 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2005-02-17 19:38:40 +00:00
|
|
|
i = find_re(body, regexp, i)
|
2004-12-03 18:33:19 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
i = find_token(body, "<lyxtabular", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
# get number of rows in the table
|
|
|
|
rows = int(rows_re.search(body[i]).group(1))
|
|
|
|
|
|
|
|
i = i + 1
|
2024-06-15 09:06:06 +00:00
|
|
|
i = find_token(body, "<features", i)
|
2004-12-03 18:33:19 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2005-06-09 09:58:08 +00:00
|
|
|
|
2004-12-03 18:33:19 +00:00
|
|
|
# is this a longtable?
|
|
|
|
longtable = longtable_re.search(body[i])
|
|
|
|
|
|
|
|
if not longtable:
|
|
|
|
# islongtable is missing add it
|
|
|
|
body[i] = body[i][:10] + 'islongtable="false" ' + body[i][10:]
|
|
|
|
|
|
|
|
if not longtable or longtable.group(1) != "true":
|
|
|
|
# remove longtable elements from features
|
|
|
|
features = lt_features_re.search(body[i])
|
|
|
|
if features:
|
2006-08-02 15:45:44 +00:00
|
|
|
body[i] = body[i].replace(features.group(1), "")
|
2004-12-03 18:33:19 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
row_info = row() * rows
|
|
|
|
res = ltvalues_re.search(body[i])
|
|
|
|
if not res:
|
|
|
|
continue
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
endfirsthead_empty, endlastfoot_empty = setHeaderFooterRows(
|
|
|
|
res.group(1), res.group(2), res.group(3), res.group(4), rows, row_info
|
|
|
|
)
|
2004-12-03 18:33:19 +00:00
|
|
|
|
|
|
|
if endfirsthead_empty:
|
|
|
|
insert_attribute(body, i, 'firstHeadEmpty="true"')
|
|
|
|
|
|
|
|
if endfirsthead_empty:
|
|
|
|
insert_attribute(body, i, 'lastFootEmpty="true"')
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
for j in range(rows):
|
2024-06-15 09:06:06 +00:00
|
|
|
i = find_token(body, "<row", i)
|
2004-12-03 18:33:19 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
row_info[i].endfoot = false # footer row
|
|
|
|
row_info[i].endlastfoot = false # last footer row
|
2004-12-03 18:33:19 +00:00
|
|
|
if row_info[j].endhead:
|
|
|
|
insert_attribute(body, i, 'endhead="true"')
|
|
|
|
|
|
|
|
if row_info[j].endfirsthead:
|
|
|
|
insert_attribute(body, i, 'endfirsthead="true"')
|
|
|
|
|
|
|
|
if row_info[j].endfoot:
|
|
|
|
insert_attribute(body, i, 'endfoot="true"')
|
|
|
|
|
|
|
|
if row_info[j].endlastfoot:
|
|
|
|
insert_attribute(body, i, 'endlastfoot="true"')
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def fix_oldfloatinset(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Figure insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-10-01 14:17:31 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset Float ", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2002-10-01 14:17:31 +00:00
|
|
|
j = find_token(lines, "collapsed", i)
|
|
|
|
if j != -1:
|
|
|
|
lines[j:j] = ["wide false"]
|
2024-06-15 09:06:06 +00:00
|
|
|
i = i + 1
|
2002-10-01 14:17:31 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def change_listof(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Change listof insets."
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2002-10-01 14:17:31 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2006-07-01 19:16:09 +00:00
|
|
|
i = find_token(lines, "\\begin_inset LatexCommand \\listof", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2003-01-17 08:20:52 +00:00
|
|
|
type = re.search(r"listof(\w*)", lines[i]).group(1)[:-1]
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[i] = "\\begin_inset FloatList " + type
|
|
|
|
i = i + 1
|
2002-10-01 14:17:31 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def change_infoinset(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Change info inset."
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.body
|
2003-03-11 14:47:57 +00:00
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2003-03-11 14:47:57 +00:00
|
|
|
i = find_token(lines, "\\begin_inset Info", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 15:45:44 +00:00
|
|
|
txt = lines[i][18:].lstrip()
|
2003-03-12 18:41:13 +00:00
|
|
|
new = ["\\begin_inset Note", "collapsed true", ""]
|
|
|
|
j = find_token(lines, "\\end_inset", i)
|
|
|
|
if j == -1:
|
|
|
|
break
|
2003-03-13 12:04:31 +00:00
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
note_lines = lines[i + 1 : j]
|
2003-03-13 12:04:31 +00:00
|
|
|
if len(txt) > 0:
|
2024-06-15 09:06:06 +00:00
|
|
|
note_lines = [txt] + note_lines
|
2003-03-13 12:04:31 +00:00
|
|
|
|
|
|
|
for line in note_lines:
|
2024-06-15 09:06:06 +00:00
|
|
|
new = new + [r"\layout %s" % document.default_layout, ""]
|
|
|
|
tmp = line.split("\\")
|
2003-03-12 18:41:13 +00:00
|
|
|
new = new + [tmp[0]]
|
|
|
|
for x in tmp[1:]:
|
|
|
|
new = new + ["\\backslash ", x]
|
|
|
|
lines[i:j] = new
|
2024-06-15 09:06:06 +00:00
|
|
|
i = i + 5
|
2003-03-11 14:47:57 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def change_header(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
"Update header."
|
2006-08-02 14:19:22 +00:00
|
|
|
lines = document.header
|
2002-08-02 19:25:14 +00:00
|
|
|
i = find_token(lines, "\\use_amsmath", 0)
|
|
|
|
if i == -1:
|
2006-07-01 19:16:09 +00:00
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
lines[i + 1 : i + 1] = ["\\use_natbib 0", "\\use_numerical_citations 0"]
|
2002-08-02 19:25:14 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
supported_versions = ["1.2.%d" % i for i in range(5)] + ["1.2"]
|
2024-06-15 09:06:06 +00:00
|
|
|
convert = [
|
|
|
|
[
|
|
|
|
220,
|
|
|
|
[
|
|
|
|
change_header,
|
|
|
|
change_listof,
|
|
|
|
fix_oldfloatinset,
|
|
|
|
update_tabular,
|
|
|
|
update_longtables,
|
|
|
|
remove_pextra,
|
|
|
|
remove_oldfloat,
|
|
|
|
remove_figinset,
|
|
|
|
remove_oldertinset,
|
|
|
|
remove_oldert,
|
|
|
|
combine_ert,
|
|
|
|
change_infoinset,
|
|
|
|
],
|
|
|
|
]
|
|
|
|
]
|
|
|
|
revert = []
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2002-08-01 15:26:32 +00:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|