2002-08-01 15:26:32 +00:00
|
|
|
# This file is part of lyx2lyx
|
|
|
|
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
|
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
import sys,string,re
|
2002-08-01 15:26:32 +00:00
|
|
|
from parser_tools import *
|
|
|
|
|
|
|
|
floats = {
|
2002-08-02 19:25:14 +00:00
|
|
|
"footnote": ["\\begin_inset Foot",
|
|
|
|
"collapsed true"],
|
|
|
|
"margin": ["\\begin_inset Marginal",
|
|
|
|
"collapsed true"],
|
|
|
|
"fig": ["\\begin_inset Float figure",
|
|
|
|
"wide false",
|
|
|
|
"collapsed false"],
|
|
|
|
"tab": ["\\begin_inset Float table",
|
|
|
|
"wide false",
|
|
|
|
"collapsed false"],
|
|
|
|
"alg": ["\\begin_inset Float algorithm",
|
|
|
|
"wide false",
|
|
|
|
"collapsed false"],
|
|
|
|
"wide-fig": ["\\begin_inset Float figure",
|
|
|
|
"wide true",
|
|
|
|
"collapsed false"],
|
|
|
|
"wide-tab": ["\\begin_inset Float table",
|
|
|
|
"wide true",
|
|
|
|
"collapsed false"]
|
2002-08-01 15:26:32 +00:00
|
|
|
}
|
|
|
|
|
2002-08-03 14:29:12 +00:00
|
|
|
font_tokens = ["\\family", "\\series", "\\shape", "\\size", "\\emph",
|
2002-08-19 19:51:01 +00:00
|
|
|
"\\bar", "\\noun", "\\color", "\\lang", "\\latex"]
|
2002-08-03 14:29:12 +00:00
|
|
|
|
2002-09-12 12:02:54 +00:00
|
|
|
pextra_type3_rexp = re.compile(r".*\\pextra_type\s+3")
|
|
|
|
pextra_rexp = re.compile(r"\\pextra_type\s+(\S+)"+\
|
|
|
|
r"(\s+\\pextra_alignment\s+(\S+))?"+\
|
|
|
|
r"(\s+\\pextra_hfill\s+(\S+))?"+\
|
|
|
|
r"(\s+\\pextra_start_minipage\s+(\S+))?"+\
|
|
|
|
r"(\s+(\\pextra_widthp?)\s+(\S*))?")
|
|
|
|
|
|
|
|
def get_width(mo):
|
2002-09-12 15:10:23 +00:00
|
|
|
if mo.group(10):
|
|
|
|
if mo.group(9) == "\\pextra_widthp":
|
|
|
|
return mo.group(10)+"col%"
|
|
|
|
else:
|
|
|
|
return mo.group(10)
|
2002-09-12 12:02:54 +00:00
|
|
|
else:
|
|
|
|
return "100col%"
|
|
|
|
|
2002-10-08 15:37:52 +00:00
|
|
|
#
|
|
|
|
# Change \begin_float .. \end_float into \begin_inset Float .. \end_inset
|
|
|
|
#
|
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
def remove_oldfloat(lines, language):
|
2002-08-01 15:26:32 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(lines, "\\begin_float", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2002-08-19 19:51:01 +00:00
|
|
|
# There are no nested floats, so finding the end of the float is simple
|
2002-08-01 15:26:32 +00:00
|
|
|
j = find_token(lines, "\\end_float", i+1)
|
2002-08-19 19:51:01 +00:00
|
|
|
|
2002-08-01 15:26:32 +00:00
|
|
|
floattype = string.split(lines[i])[1]
|
2002-08-02 19:25:14 +00:00
|
|
|
if not floats.has_key(floattype):
|
|
|
|
sys.stderr.write("Error! Unknown float type "+floattype+"\n")
|
|
|
|
floattype = "fig"
|
|
|
|
|
|
|
|
# skip \end_deeper tokens
|
|
|
|
i2 = i+1
|
|
|
|
while check_token(lines[i2], "\\end_deeper"):
|
|
|
|
i2 = i2+1
|
|
|
|
if i2 > i+1:
|
2002-08-19 19:51:01 +00:00
|
|
|
j2 = get_next_paragraph(lines, j+1)
|
2002-08-02 19:25:14 +00:00
|
|
|
lines[j2:j2] = ["\\end_deeper "]*(i2-(i+1))
|
|
|
|
|
2002-08-03 14:29:12 +00:00
|
|
|
new = floats[floattype]+[""]
|
2002-09-12 12:02:54 +00:00
|
|
|
|
|
|
|
# Check if the float is floatingfigure
|
|
|
|
k = find_re(lines, pextra_type3_rexp, i, j)
|
|
|
|
if k != -1:
|
|
|
|
mo = pextra_rexp.search(lines[k])
|
|
|
|
width = get_width(mo)
|
|
|
|
lines[k] = re.sub(pextra_rexp, "", lines[k])
|
|
|
|
new = ["\\begin_inset Wrap figure",
|
|
|
|
'width "%s"' % width,
|
|
|
|
"collapsed false",
|
|
|
|
""]
|
|
|
|
|
2002-08-05 15:49:20 +00:00
|
|
|
new = new+lines[i2:j]+["\\end_inset ", ""]
|
2002-08-19 19:51:01 +00:00
|
|
|
|
2002-08-28 13:26:39 +00:00
|
|
|
# After a float, all font attributes are reseted.
|
2002-08-03 14:29:12 +00:00
|
|
|
# We need to output '\foo default' for every attribute foo
|
|
|
|
# whose value is not default before the float.
|
|
|
|
# The check here is not accurate, but it doesn't matter
|
|
|
|
# as extra '\foo default' commands are ignored.
|
|
|
|
# In fact, it might be safer to output '\foo default' for all
|
|
|
|
# font attributes.
|
|
|
|
k = get_paragraph(lines, i)
|
2002-08-05 15:49:20 +00:00
|
|
|
flag = 0
|
2002-08-03 14:29:12 +00:00
|
|
|
for token in font_tokens:
|
|
|
|
if find_token(lines, token, k, i) != -1:
|
2002-08-05 15:49:20 +00:00
|
|
|
if not flag:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
flag = 1
|
|
|
|
new.append("")
|
2002-08-03 14:29:12 +00:00
|
|
|
if token == "\\lang":
|
2002-08-19 19:51:01 +00:00
|
|
|
new.append(token+" "+language)
|
2002-08-03 14:29:12 +00:00
|
|
|
else:
|
|
|
|
new.append(token+" default ")
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2002-08-19 19:51:01 +00:00
|
|
|
lines[i:j+1] = new
|
2002-08-01 15:26:32 +00:00
|
|
|
i = i+1
|
|
|
|
|
2002-10-08 15:37:52 +00:00
|
|
|
pextra_type2_rexp = re.compile(r".*\\pextra_type\s+[12]")
|
2002-09-12 12:02:54 +00:00
|
|
|
pextra_type2_rexp2 = re.compile(r".*(\\layout|\\pextra_type\s+2)")
|
|
|
|
|
2002-10-08 15:37:52 +00:00
|
|
|
def remove_pextra(lines):
|
2002-08-01 15:26:32 +00:00
|
|
|
i = 0
|
|
|
|
flag = 0
|
|
|
|
while 1:
|
2002-09-12 12:02:54 +00:00
|
|
|
i = find_re(lines, pextra_type2_rexp, i)
|
2002-08-01 15:26:32 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2002-10-08 15:37:52 +00:00
|
|
|
|
2002-09-12 12:02:54 +00:00
|
|
|
mo = pextra_rexp.search(lines[i])
|
2002-10-08 15:37:52 +00:00
|
|
|
width = get_width(mo)
|
|
|
|
|
|
|
|
if mo.group(1) == "1":
|
|
|
|
# handle \pextra_type 1 (indented paragraph)
|
|
|
|
lines[i] = re.sub(pextra_rexp, "\\leftindent "+width+" ", lines[i])
|
|
|
|
i = i+1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# handle \pextra_type 2 (minipage)
|
2002-09-12 12:02:54 +00:00
|
|
|
position = mo.group(3)
|
|
|
|
hfill = mo.group(5)
|
|
|
|
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
start = ["\\begin_inset Minipage",
|
|
|
|
"position " + position,
|
|
|
|
"inner_position 0",
|
|
|
|
'height "0pt"',
|
|
|
|
'width "%s"' % width,
|
|
|
|
"collapsed false"
|
2002-08-01 15:26:32 +00:00
|
|
|
]
|
|
|
|
if flag:
|
|
|
|
flag = 0
|
|
|
|
if hfill:
|
2002-08-02 19:25:14 +00:00
|
|
|
start = ["","\hfill",""]+start
|
2002-08-01 15:26:32 +00:00
|
|
|
else:
|
2002-08-02 19:25:14 +00:00
|
|
|
start = ["\\layout Standard"] + start
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2003-03-14 15:00:02 +00:00
|
|
|
j0 = find_token_backwards(lines,"\\layout", i-1)
|
|
|
|
j = get_next_paragraph(lines, i)
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
count = 0
|
2002-08-01 15:26:32 +00:00
|
|
|
while 1:
|
2002-08-02 19:25:14 +00:00
|
|
|
# collect more paragraphs to the minipage
|
|
|
|
count = count+1
|
|
|
|
if j == -1 or not check_token(lines[j], "\\layout"):
|
|
|
|
break
|
2002-09-12 12:02:54 +00:00
|
|
|
i = find_re(lines, pextra_type2_rexp2, j+1)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
mo = pextra_rexp.search(lines[i])
|
|
|
|
if not mo:
|
2002-08-01 15:26:32 +00:00
|
|
|
break
|
2002-09-12 12:02:54 +00:00
|
|
|
if mo.group(7) == "1":
|
2002-08-01 15:26:32 +00:00
|
|
|
flag = 1
|
|
|
|
break
|
2002-09-12 15:10:23 +00:00
|
|
|
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
2002-08-02 19:25:14 +00:00
|
|
|
j = find_tokens(lines, ["\\layout", "\\end_float"], i+1)
|
2002-08-01 15:26:32 +00:00
|
|
|
|
2002-09-12 15:10:23 +00:00
|
|
|
mid = lines[j0:j]
|
2002-08-02 19:25:14 +00:00
|
|
|
end = ["\\end_inset "]
|
2002-08-01 15:26:32 +00:00
|
|
|
|
|
|
|
lines[j0:j] = start+mid+end
|
|
|
|
i = i+1
|
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
def is_empty(lines):
|
2002-08-02 20:34:20 +00:00
|
|
|
return filter(is_nonempty_line, lines) == []
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2002-10-01 08:21:47 +00:00
|
|
|
move_rexp = re.compile(r"\\(family|series|shape|size|emph|numeric|bar|noun|end_deeper)")
|
2003-07-15 14:04:35 +00:00
|
|
|
ert_rexp = re.compile(r"\\begin_inset|\\hfill|.*\\SpecialChar")
|
2002-08-02 19:25:14 +00:00
|
|
|
spchar_rexp = re.compile(r"(.*)(\\SpecialChar.*)")
|
2002-08-02 20:34:20 +00:00
|
|
|
ert_begin = ["\\begin_inset ERT",
|
|
|
|
"status Collapsed",
|
|
|
|
"",
|
|
|
|
"\\layout Standard"]
|
2002-08-02 19:25:14 +00:00
|
|
|
|
|
|
|
def remove_oldert(lines):
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2002-08-02 20:34:20 +00:00
|
|
|
i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
|
2002-08-02 19:25:14 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2002-08-06 12:10:09 +00:00
|
|
|
j = i+1
|
|
|
|
while 1:
|
2002-10-11 18:08:21 +00:00
|
|
|
# \end_inset is for ert inside a tabular cell. The other tokens
|
|
|
|
# are obvious.
|
|
|
|
j = find_tokens(lines, ["\\latex default", "\\layout", "\\begin_inset", "\\end_inset", "\\end_float", "\\the_end"],
|
2002-08-06 12:10:09 +00:00
|
|
|
j)
|
|
|
|
if check_token(lines[j], "\\begin_inset"):
|
2002-10-11 18:08:21 +00:00
|
|
|
j = find_end_of_inset(lines, j)+1
|
2002-08-06 12:10:09 +00:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
if check_token(lines[j], "\\layout"):
|
|
|
|
while j-1 >= 0 and check_token(lines[j-1], "\\begin_deeper"):
|
|
|
|
j = j-1
|
|
|
|
|
|
|
|
# We need to remove insets, special chars & font commands from ERT text
|
|
|
|
new = []
|
|
|
|
new2 = []
|
2002-08-02 20:34:20 +00:00
|
|
|
if check_token(lines[i], "\\layout LaTeX"):
|
|
|
|
new = ["\layout Standard", "", ""]
|
|
|
|
# We have a problem with classes in which Standard is not the default layout!
|
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
k = i+1
|
|
|
|
while 1:
|
|
|
|
k2 = find_re(lines, ert_rexp, k, j)
|
2003-07-15 14:04:35 +00:00
|
|
|
inset = hfill = specialchar = 0
|
2002-08-02 19:25:14 +00:00
|
|
|
if k2 == -1:
|
|
|
|
k2 = j
|
|
|
|
elif check_token(lines[k2], "\\begin_inset"):
|
|
|
|
inset = 1
|
2003-07-15 14:04:35 +00:00
|
|
|
elif check_token(lines[k2], "\\hfill"):
|
|
|
|
hfill = 1
|
|
|
|
del lines[k2]
|
|
|
|
j = j-1
|
2002-08-02 19:25:14 +00:00
|
|
|
else:
|
|
|
|
specialchar = 1
|
|
|
|
mo = spchar_rexp.match(lines[k2])
|
|
|
|
lines[k2] = mo.group(1)
|
|
|
|
specialchar_str = mo.group(2)
|
|
|
|
k2 = k2+1
|
|
|
|
|
|
|
|
tmp = []
|
|
|
|
for line in lines[k:k2]:
|
2002-10-01 08:21:47 +00:00
|
|
|
# Move some lines outside the ERT inset:
|
|
|
|
if move_rexp.match(line):
|
2002-08-05 15:49:20 +00:00
|
|
|
if new2 == []:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
new2 = [""]
|
2002-08-02 19:25:14 +00:00
|
|
|
new2.append(line)
|
2002-08-12 21:48:47 +00:00
|
|
|
elif not check_token(line, "\\latex"):
|
2002-08-02 19:25:14 +00:00
|
|
|
tmp.append(line)
|
|
|
|
|
|
|
|
if is_empty(tmp):
|
2002-08-05 15:49:20 +00:00
|
|
|
if filter(lambda x:x != "", tmp) != []:
|
|
|
|
if new == []:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
lines[i-1] = lines[i-1]+" "
|
|
|
|
else:
|
|
|
|
new = new+[" "]
|
2002-08-02 19:25:14 +00:00
|
|
|
else:
|
|
|
|
new = new+ert_begin+tmp+["\\end_inset ", ""]
|
|
|
|
|
|
|
|
if inset:
|
2002-08-10 13:34:57 +00:00
|
|
|
k3 = find_end_of_inset(lines, k2)
|
2002-08-05 15:49:20 +00:00
|
|
|
new = new+[""]+lines[k2:k3+1]+[""] # Put an empty line after \end_inset
|
2002-08-02 19:25:14 +00:00
|
|
|
k = k3+1
|
2002-08-05 15:49:20 +00:00
|
|
|
# Skip the empty line after \end_inset
|
|
|
|
if not is_nonempty_line(lines[k]):
|
|
|
|
k = k+1
|
|
|
|
new.append("")
|
2003-07-15 14:04:35 +00:00
|
|
|
elif hfill:
|
|
|
|
new = new+["\hfill", ""]
|
|
|
|
k = k2
|
2002-08-02 19:25:14 +00:00
|
|
|
elif specialchar:
|
2002-08-05 15:49:20 +00:00
|
|
|
if new == []:
|
|
|
|
# This is not necessary, but we want the output to be
|
|
|
|
# as similar as posible to the lyx format
|
|
|
|
lines[i-1] = lines[i-1]+specialchar_str
|
|
|
|
new = [""]
|
|
|
|
else:
|
|
|
|
new = new+[specialchar_str, ""]
|
2002-08-02 19:25:14 +00:00
|
|
|
k = k2
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
new = new+new2
|
2002-08-28 12:21:20 +00:00
|
|
|
if not check_token(lines[j], "\\latex "):
|
2002-08-02 19:25:14 +00:00
|
|
|
new = new+[""]+[lines[j]]
|
|
|
|
lines[i:j+1] = new
|
|
|
|
i = i+1
|
|
|
|
|
2002-10-01 08:21:47 +00:00
|
|
|
# Delete remaining "\latex xxx" tokens
|
2002-08-21 07:33:25 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2002-08-28 12:21:20 +00:00
|
|
|
i = find_token(lines, "\\latex ", i)
|
2002-08-21 07:33:25 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
del lines[i]
|
|
|
|
|
2002-10-01 14:17:31 +00:00
|
|
|
# ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future.
|
2002-08-12 21:48:47 +00:00
|
|
|
def remove_oldertinset(lines):
|
2002-08-06 12:10:09 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(lines, "\\begin_inset ERT", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2002-08-10 13:34:57 +00:00
|
|
|
j = find_end_of_inset(lines, i)
|
2002-08-12 21:48:47 +00:00
|
|
|
k = find_token(lines, "\\layout", i+1)
|
|
|
|
l = get_paragraph(lines, i)
|
|
|
|
if lines[k] == lines[l]: # same layout
|
|
|
|
k = k+1
|
|
|
|
new = lines[k:j]
|
|
|
|
lines[i:j+1] = new
|
2002-08-06 12:10:09 +00:00
|
|
|
i = i+1
|
|
|
|
|
2002-08-02 20:34:20 +00:00
|
|
|
def is_ert_paragraph(lines, i):
|
2002-10-09 20:36:25 +00:00
|
|
|
if not check_token(lines[i], "\\layout Standard"):
|
|
|
|
return 0
|
|
|
|
|
2002-08-02 20:34:20 +00:00
|
|
|
i = find_nonempty_line(lines, i+1)
|
|
|
|
if not check_token(lines[i], "\\begin_inset ERT"):
|
|
|
|
return 0
|
2002-10-09 20:36:25 +00:00
|
|
|
|
2002-08-10 13:34:57 +00:00
|
|
|
j = find_end_of_inset(lines, i)
|
2002-08-02 20:34:20 +00:00
|
|
|
k = find_nonempty_line(lines, j+1)
|
|
|
|
return check_token(lines[k], "\\layout")
|
|
|
|
|
|
|
|
def combine_ert(lines):
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(lines, "\\begin_inset ERT", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2002-10-09 20:36:25 +00:00
|
|
|
j = get_paragraph(lines, i)
|
2002-08-02 20:34:20 +00:00
|
|
|
count = 0
|
|
|
|
text = []
|
|
|
|
while is_ert_paragraph(lines, j):
|
|
|
|
|
|
|
|
count = count+1
|
|
|
|
i2 = find_token(lines, "\\layout", j+1)
|
|
|
|
k = find_token(lines, "\\end_inset", i2+1)
|
|
|
|
text = text+lines[i2:k]
|
|
|
|
j = find_token(lines, "\\layout", k+1)
|
|
|
|
if j == -1:
|
|
|
|
break
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2002-08-02 20:34:20 +00:00
|
|
|
if count >= 2:
|
2002-08-03 10:31:31 +00:00
|
|
|
j = find_token(lines, "\\layout", i+1)
|
|
|
|
lines[j:k] = text
|
2002-08-02 20:34:20 +00:00
|
|
|
|
|
|
|
i = i+1
|
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
oldunits = ["pt", "cm", "in", "text%", "col%"]
|
|
|
|
|
|
|
|
def get_length(lines, name, start, end):
|
|
|
|
i = find_token(lines, name, start, end)
|
|
|
|
if i == -1:
|
|
|
|
return ""
|
|
|
|
x = string.split(lines[i])
|
|
|
|
return x[2]+oldunits[int(x[1])]
|
|
|
|
|
2002-08-28 13:26:39 +00:00
|
|
|
def write_attribute(x, token, value):
|
2002-08-02 19:25:14 +00:00
|
|
|
if value != "":
|
|
|
|
x.append("\t"+token+" "+value)
|
|
|
|
|
|
|
|
def remove_figinset(lines):
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(lines, "\\begin_inset Figure", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2002-08-10 13:34:57 +00:00
|
|
|
j = find_end_of_inset(lines, i)
|
2002-08-02 19:25:14 +00:00
|
|
|
|
2003-01-15 14:17:56 +00:00
|
|
|
if ( len(string.split(lines[i])) > 2 ):
|
|
|
|
lyxwidth = string.split(lines[i])[3]+"pt"
|
|
|
|
lyxheight = string.split(lines[i])[4]+"pt"
|
|
|
|
else:
|
|
|
|
lyxwidth = ""
|
|
|
|
lyxheight = ""
|
2002-08-02 19:25:14 +00:00
|
|
|
|
|
|
|
filename = get_value(lines, "file", i+1, j)
|
|
|
|
|
|
|
|
width = get_length(lines, "width", i+1, j)
|
|
|
|
# what does width=5 mean ?
|
|
|
|
height = get_length(lines, "height", i+1, j)
|
|
|
|
rotateAngle = get_value(lines, "angle", i+1, j)
|
|
|
|
if width == "" and height == "":
|
|
|
|
size_type = "0"
|
|
|
|
else:
|
|
|
|
size_type = "1"
|
|
|
|
|
|
|
|
flags = get_value(lines, "flags", i+1, j)
|
|
|
|
x = int(flags)%4
|
|
|
|
if x == 1:
|
|
|
|
display = "monochrome"
|
|
|
|
elif x == 2:
|
|
|
|
display = "gray"
|
|
|
|
else:
|
|
|
|
display = "color"
|
|
|
|
|
2003-04-01 14:48:13 +00:00
|
|
|
subcaptionText = ""
|
|
|
|
subcaptionLine = find_token(lines, "subcaption", i+1, j)
|
|
|
|
if subcaptionLine != -1:
|
|
|
|
subcaptionText = lines[subcaptionLine][11:]
|
|
|
|
if subcaptionText != "":
|
|
|
|
subcaptionText = '"'+subcaptionText+'"'
|
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
k = find_token(lines, "subfigure", i+1,j)
|
|
|
|
if k == -1:
|
|
|
|
subcaption = 0
|
|
|
|
else:
|
|
|
|
subcaption = 1
|
|
|
|
|
|
|
|
new = ["\\begin_inset Graphics FormatVersion 1"]
|
2002-08-28 13:26:39 +00:00
|
|
|
write_attribute(new, "filename", filename)
|
|
|
|
write_attribute(new, "display", display)
|
2002-08-02 19:25:14 +00:00
|
|
|
if subcaption:
|
|
|
|
new.append("\tsubcaption")
|
2002-08-28 13:26:39 +00:00
|
|
|
write_attribute(new, "subcaptionText", subcaptionText)
|
|
|
|
write_attribute(new, "size_type", size_type)
|
|
|
|
write_attribute(new, "width", width)
|
|
|
|
write_attribute(new, "height", height)
|
2002-08-02 19:25:14 +00:00
|
|
|
if rotateAngle != "":
|
|
|
|
new.append("\trotate")
|
2002-08-28 13:26:39 +00:00
|
|
|
write_attribute(new, "rotateAngle", rotateAngle)
|
|
|
|
write_attribute(new, "rotateOrigin", "leftBaseline")
|
|
|
|
write_attribute(new, "lyxsize_type", "1")
|
|
|
|
write_attribute(new, "lyxwidth", lyxwidth)
|
|
|
|
write_attribute(new, "lyxheight", lyxheight)
|
2002-08-02 19:25:14 +00:00
|
|
|
new = new + ["\end_inset"]
|
|
|
|
lines[i:j+1] = new
|
|
|
|
|
2002-08-28 10:45:21 +00:00
|
|
|
attr_re = re.compile(r' \w*="(false|0|)"')
|
|
|
|
line_re = re.compile(r'<(features|column|row|cell)')
|
|
|
|
|
|
|
|
def update_tabular(lines):
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(lines, '\\begin_inset Tabular', i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
2002-09-03 15:21:24 +00:00
|
|
|
for k in get_tabular_lines(lines, i):
|
2002-08-31 11:27:01 +00:00
|
|
|
if check_token(lines[k], "<lyxtabular"):
|
|
|
|
lines[k] = string.replace(lines[k], 'version="2"', 'version="3"')
|
|
|
|
elif check_token(lines[k], "<column"):
|
|
|
|
lines[k] = string.replace(lines[k], 'width=""', 'width="0pt"')
|
|
|
|
|
2002-08-28 10:45:21 +00:00
|
|
|
if line_re.match(lines[k]):
|
|
|
|
lines[k] = re.sub(attr_re, "", lines[k])
|
|
|
|
|
2002-09-03 15:21:24 +00:00
|
|
|
i = i+1
|
2002-08-28 10:45:21 +00:00
|
|
|
|
2002-10-01 14:17:31 +00:00
|
|
|
# Figure insert are hidden feature of lyx 1.1.6. This might be removed in the future.
|
|
|
|
def fix_oldfloatinset(lines):
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2002-10-05 12:59:04 +00:00
|
|
|
i = find_token(lines, "\\begin_inset Float ", i)
|
2002-10-01 14:17:31 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_token(lines, "collapsed", i)
|
|
|
|
if j != -1:
|
|
|
|
lines[j:j] = ["wide false"]
|
|
|
|
i = i+1
|
|
|
|
|
|
|
|
def change_listof(lines):
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(lines, "\\begin_inset LatexCommand \\listof", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2003-01-17 08:20:52 +00:00
|
|
|
type = re.search(r"listof(\w*)", lines[i]).group(1)[:-1]
|
2002-10-01 14:17:31 +00:00
|
|
|
lines[i] = "\\begin_inset FloatList "+type
|
|
|
|
i = i+1
|
|
|
|
|
2003-03-11 14:47:57 +00:00
|
|
|
def change_infoinset(lines):
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(lines, "\\begin_inset Info", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2003-03-12 18:41:13 +00:00
|
|
|
txt = string.lstrip(lines[i][18:])
|
|
|
|
new = ["\\begin_inset Note", "collapsed true", ""]
|
|
|
|
j = find_token(lines, "\\end_inset", i)
|
|
|
|
if j == -1:
|
|
|
|
break
|
2003-03-13 12:04:31 +00:00
|
|
|
|
|
|
|
note_lines = lines[i+1:j]
|
|
|
|
if len(txt) > 0:
|
|
|
|
note_lines = [txt]+note_lines
|
|
|
|
|
|
|
|
for line in note_lines:
|
2003-03-12 18:41:13 +00:00
|
|
|
new = new + ["\layout Standard", ""]
|
2003-03-13 12:04:31 +00:00
|
|
|
tmp = string.split(line, '\\')
|
2003-03-12 18:41:13 +00:00
|
|
|
new = new + [tmp[0]]
|
|
|
|
for x in tmp[1:]:
|
|
|
|
new = new + ["\\backslash ", x]
|
|
|
|
lines[i:j] = new
|
2003-03-11 14:47:57 +00:00
|
|
|
i = i+5
|
|
|
|
|
2002-08-02 19:25:14 +00:00
|
|
|
def change_preamble(lines):
|
|
|
|
i = find_token(lines, "\\use_amsmath", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
lines[i+1:i+1] = ["\\use_natbib 0",
|
|
|
|
"\use_numerical_citations 0"]
|
|
|
|
|
|
|
|
def convert(header, body):
|
|
|
|
language = get_value(header, "\\language", 0)
|
|
|
|
if language == "":
|
|
|
|
language = "english"
|
|
|
|
|
|
|
|
change_preamble(header)
|
2002-10-01 14:17:31 +00:00
|
|
|
change_listof(body)
|
|
|
|
fix_oldfloatinset(body)
|
2002-08-28 10:45:21 +00:00
|
|
|
update_tabular(body)
|
2002-10-08 15:37:52 +00:00
|
|
|
remove_pextra(body)
|
2002-08-02 19:25:14 +00:00
|
|
|
remove_oldfloat(body, language)
|
|
|
|
remove_figinset(body)
|
2002-08-19 19:51:01 +00:00
|
|
|
remove_oldertinset(body)
|
|
|
|
remove_oldert(body)
|
|
|
|
combine_ert(body)
|
2003-03-11 14:47:57 +00:00
|
|
|
change_infoinset(body)
|
2002-08-01 15:26:32 +00:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|