mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-12-24 21:55:29 +00:00
d4ca8d7404
This is the same as the parbreak separator and is represented on screen as the old parbreak. Old parbreak separators are converted to latexpar separators when they are used for introducing blank lines in the latex output rather than for separating environments. Instead, parbreak separators are now represented on screen by a double line. In essence, latexpar and parbreak separators produce the same output but are represented differently on screen. The context menu does not account for latexpar separators and only "true" separators can be turned each into the other one.
2373 lines
88 KiB
Python
2373 lines
88 KiB
Python
# -*- coding: utf-8 -*-
|
|
# This file is part of lyx2lyx
|
|
# -*- coding: utf-8 -*-
|
|
# Copyright (C) 2015 The LyX team
|
|
#
|
|
# This program is free software; you can redistribute it and/or
|
|
# modify it under the terms of the GNU General Public License
|
|
# as published by the Free Software Foundation; either version 2
|
|
# of the License, or (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program; if not, write to the Free Software
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
""" Convert files to the file format generated by lyx 2.2"""
|
|
|
|
import re, string
|
|
import unicodedata
|
|
import sys, os
|
|
|
|
# Uncomment only what you need to import, please.
|
|
|
|
#from parser_tools import find_token, find_end_of, find_tokens, \
|
|
# find_token_exact, find_end_of_inset, find_end_of_layout, \
|
|
# find_token_backwards, is_in_inset, get_value, get_quoted_value, \
|
|
# del_token, check_token, get_option_value
|
|
|
|
from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, get_ert, lyx2latex, \
|
|
lyx2verbatim, length_in_bp, convert_info_insets
|
|
# insert_to_preamble, latex_length, revert_flex_inset, \
|
|
# revert_font_attrs, hex2ratio, str2bool
|
|
|
|
from parser_tools import find_token, find_token_backwards, find_re, \
|
|
find_end_of_inset, find_end_of_layout, find_nonempty_line, \
|
|
get_containing_layout, get_value, check_token
|
|
|
|
####################################################################
|
|
# Private helper functions
|
|
|
|
def revert_Argument_to_TeX_brace(document, line, endline, n, nmax, environment, opt, nolastopt):
|
|
'''
|
|
Reverts an InsetArgument to TeX-code
|
|
usage:
|
|
revert_Argument_to_TeX_brace(document, LineOfBegin, LineOfEnd, StartArgument, EndArgument, isEnvironment, isOpt, notLastOpt)
|
|
LineOfBegin is the line of the \begin_layout or \begin_inset statement
|
|
LineOfEnd is the line of the \end_layout or \end_inset statement, if "0" is given, the end of the file is used instead
|
|
StartArgument is the number of the first argument that needs to be converted
|
|
EndArgument is the number of the last argument that needs to be converted or the last defined one
|
|
isEnvironment must be true, if the layout is for a LaTeX environment
|
|
isOpt must be true, if the argument is an optional one
|
|
notLastOpt must be true if the argument is mandatory and followed by optional ones
|
|
'''
|
|
lineArg = 0
|
|
wasOpt = False
|
|
while lineArg != -1 and n < nmax + 1:
|
|
lineArg = find_token(document.body, "\\begin_inset Argument " + str(n), line)
|
|
if lineArg > endline and endline != 0:
|
|
return wasOpt
|
|
if lineArg != -1:
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
|
|
# we have to assure that no other inset is in the Argument
|
|
beginInset = find_token(document.body, "\\begin_inset", beginPlain)
|
|
endInset = find_token(document.body, "\\end_inset", beginPlain)
|
|
k = beginPlain + 1
|
|
l = k
|
|
while beginInset < endInset and beginInset != -1:
|
|
beginInset = find_token(document.body, "\\begin_inset", k)
|
|
endInset = find_token(document.body, "\\end_inset", l)
|
|
k = beginInset + 1
|
|
l = endInset + 1
|
|
if environment == False:
|
|
if opt == False:
|
|
if nolastopt == False:
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}{")
|
|
else:
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
|
|
del(document.body[lineArg : beginPlain + 1])
|
|
wasOpt = False
|
|
else:
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("]")
|
|
document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("[")
|
|
wasOpt = True
|
|
else:
|
|
if opt == False:
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
|
|
document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("{")
|
|
wasOpt = False
|
|
else:
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("]")
|
|
document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("[")
|
|
wasOpt = True
|
|
n += 1
|
|
return wasOpt
|
|
|
|
|
|
###############################################################################
|
|
###
|
|
### Conversion and reversion routines
|
|
###
|
|
###############################################################################
|
|
|
|
def convert_longtable_label_internal(document, forward):
|
|
"""
|
|
Convert reference to "LongTableNoNumber" into "Unnumbered" if forward is True
|
|
else revert it.
|
|
"""
|
|
old_reference = "\\begin_inset Caption LongTableNoNumber"
|
|
new_reference = "\\begin_inset Caption Unnumbered"
|
|
|
|
# if the purpose is to revert swap the strings roles
|
|
if not forward:
|
|
old_reference, new_reference = new_reference, old_reference
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, old_reference, i)
|
|
|
|
if i == -1:
|
|
return
|
|
|
|
document.body[i] = new_reference
|
|
|
|
|
|
def convert_longtable_label(document):
|
|
convert_longtable_label_internal(document, True)
|
|
|
|
|
|
def revert_longtable_label(document):
|
|
convert_longtable_label_internal(document, False)
|
|
|
|
|
|
def convert_separator(document):
|
|
"""
|
|
Convert layout separators to separator insets and add (LaTeX) paragraph
|
|
breaks in order to mimic previous LaTeX export.
|
|
"""
|
|
|
|
parins = ["\\begin_inset Separator parbreak", "\\end_inset", ""]
|
|
parlay = ["\\begin_layout Standard", "\\begin_inset Separator parbreak",
|
|
"\\end_inset", "", "\\end_layout", ""]
|
|
sty_dict = {
|
|
"family" : "default",
|
|
"series" : "default",
|
|
"shape" : "default",
|
|
"size" : "default",
|
|
"bar" : "default",
|
|
"color" : "inherit"
|
|
}
|
|
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_deeper", i)
|
|
if i == -1:
|
|
break
|
|
|
|
j = find_token_backwards(document.body, "\\end_layout", i-1)
|
|
if j != -1:
|
|
# reset any text style before inserting the inset
|
|
lay = get_containing_layout(document.body, j-1)
|
|
if lay != False:
|
|
content = "\n".join(document.body[lay[1]:lay[2]])
|
|
for val in list(sty_dict.keys()):
|
|
if content.find("\\%s" % val) != -1:
|
|
document.body[j:j] = ["\\%s %s" % (val, sty_dict[val])]
|
|
i = i + 1
|
|
j = j + 1
|
|
document.body[j:j] = parins
|
|
i = i + len(parins) + 1
|
|
else:
|
|
i = i + 1
|
|
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\align", i)
|
|
if i == -1:
|
|
break
|
|
|
|
lay = get_containing_layout(document.body, i)
|
|
if lay != False and lay[0] == "Plain Layout":
|
|
i = i + 1
|
|
continue
|
|
|
|
j = find_token_backwards(document.body, "\\end_layout", i-1)
|
|
if j != -1:
|
|
lay = get_containing_layout(document.body, j-1)
|
|
if lay != False and lay[0] == "Standard" \
|
|
and find_token(document.body, "\\align", lay[1], lay[2]) == -1 \
|
|
and find_token(document.body, "\\begin_inset VSpace", lay[1], lay[2]) == -1:
|
|
# reset any text style before inserting the inset
|
|
content = "\n".join(document.body[lay[1]:lay[2]])
|
|
for val in list(sty_dict.keys()):
|
|
if content.find("\\%s" % val) != -1:
|
|
document.body[j:j] = ["\\%s %s" % (val, sty_dict[val])]
|
|
i = i + 1
|
|
j = j + 1
|
|
document.body[j:j] = parins
|
|
i = i + len(parins) + 1
|
|
else:
|
|
i = i + 1
|
|
else:
|
|
i = i + 1
|
|
|
|
regexp = re.compile(r'^\\begin_layout (?:(-*)|(\s*))(Separator|EndOfSlide)(?:(-*)|(\s*))$', re.IGNORECASE)
|
|
|
|
i = 0
|
|
while 1:
|
|
i = find_re(document.body, regexp, i)
|
|
if i == -1:
|
|
return
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Missing `\\end_layout'.")
|
|
return
|
|
|
|
lay = get_containing_layout(document.body, j-1)
|
|
if lay != False:
|
|
lines = document.body[lay[3]:lay[2]]
|
|
else:
|
|
lines = []
|
|
|
|
document.body[i:j+1] = parlay
|
|
if len(lines) > 0:
|
|
document.body[i+1:i+1] = lines
|
|
|
|
i = i + len(parlay) + len(lines) + 1
|
|
|
|
|
|
def revert_separator(document):
|
|
" Revert separator insets to layout separators "
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
if document.textclass in beamer_classes:
|
|
beglaysep = "\\begin_layout Separator"
|
|
else:
|
|
beglaysep = "\\begin_layout --Separator--"
|
|
|
|
parsep = [beglaysep, "", "\\end_layout", ""]
|
|
comert = ["\\begin_inset ERT", "status collapsed", "",
|
|
"\\begin_layout Plain Layout", "%", "\\end_layout",
|
|
"", "\\end_inset", ""]
|
|
empert = ["\\begin_inset ERT", "status collapsed", "",
|
|
"\\begin_layout Plain Layout", " ", "\\end_layout",
|
|
"", "\\end_inset", ""]
|
|
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_inset Separator", i)
|
|
if i == -1:
|
|
return
|
|
|
|
lay = get_containing_layout(document.body, i)
|
|
if lay == False:
|
|
document.warning("Malformed LyX document: Can't convert separator inset at line " + str(i))
|
|
i = i + 1
|
|
continue
|
|
|
|
layoutname = lay[0]
|
|
beg = lay[1]
|
|
end = lay[2]
|
|
kind = get_value(document.body, "\\begin_inset Separator", i, i+1, "plain").split()[1]
|
|
before = document.body[beg+1:i]
|
|
something_before = len(before) > 0 and len("".join(before)) > 0
|
|
j = find_end_of_inset(document.body, i)
|
|
after = document.body[j+1:end]
|
|
something_after = len(after) > 0 and len("".join(after)) > 0
|
|
if kind == "plain":
|
|
beg = beg + len(before) + 1
|
|
elif something_before:
|
|
document.body[i:i] = ["\\end_layout", ""]
|
|
i = i + 2
|
|
j = j + 2
|
|
beg = i
|
|
end = end + 2
|
|
|
|
if kind == "plain":
|
|
if something_after:
|
|
document.body[beg:j+1] = empert
|
|
i = i + len(empert)
|
|
else:
|
|
document.body[beg:j+1] = comert
|
|
i = i + len(comert)
|
|
else:
|
|
if something_after:
|
|
if layoutname == "Standard":
|
|
if not something_before:
|
|
document.body[beg:j+1] = parsep
|
|
i = i + len(parsep)
|
|
document.body[i:i] = ["", "\\begin_layout Standard"]
|
|
i = i + 2
|
|
else:
|
|
document.body[beg:j+1] = ["\\begin_layout Standard"]
|
|
i = i + 1
|
|
else:
|
|
document.body[beg:j+1] = ["\\begin_deeper"]
|
|
i = i + 1
|
|
end = end + 1 - (j + 1 - beg)
|
|
if not something_before:
|
|
document.body[i:i] = parsep
|
|
i = i + len(parsep)
|
|
end = end + len(parsep)
|
|
document.body[i:i] = ["\\begin_layout Standard"]
|
|
document.body[end+2:end+2] = ["", "\\end_deeper", ""]
|
|
i = i + 4
|
|
else:
|
|
next_par_is_aligned = False
|
|
k = find_nonempty_line(document.body, end+1)
|
|
if k != -1 and check_token(document.body[k], "\\begin_layout"):
|
|
lay = get_containing_layout(document.body, k)
|
|
next_par_is_aligned = lay != False and \
|
|
find_token(document.body, "\\align", lay[1], lay[2]) != -1
|
|
if k != -1 and not next_par_is_aligned \
|
|
and not check_token(document.body[k], "\\end_deeper") \
|
|
and not check_token(document.body[k], "\\begin_deeper"):
|
|
if layoutname == "Standard":
|
|
document.body[beg:j+1] = [beglaysep]
|
|
i = i + 1
|
|
else:
|
|
document.body[beg:j+1] = ["\\begin_deeper", beglaysep]
|
|
end = end + 2 - (j + 1 - beg)
|
|
document.body[end+1:end+1] = ["", "\\end_deeper", ""]
|
|
i = i + 3
|
|
else:
|
|
if something_before:
|
|
del document.body[i:end+1]
|
|
else:
|
|
del document.body[i:end-1]
|
|
|
|
i = i + 1
|
|
|
|
|
|
def convert_parbreak(document):
|
|
"""
|
|
Convert parbreak separators not specifically used to separate
|
|
environments to latexpar separators.
|
|
"""
|
|
parbreakinset = "\\begin_inset Separator parbreak"
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, parbreakinset, i)
|
|
if i == -1:
|
|
return
|
|
lay = get_containing_layout(document.body, i)
|
|
if lay == False:
|
|
document.warning("Malformed LyX document: Can't convert separator inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
if lay[0] == "Standard":
|
|
# Convert only if not alone in the paragraph
|
|
k1 = find_nonempty_line(document.body, lay[1] + 1, i + 1)
|
|
k2 = find_nonempty_line(document.body, i + 1, lay[2])
|
|
if (k1 < i) or (k2 > i + 1) or not check_token(document.body[i], parbreakinset):
|
|
document.body[i] = document.body[i].replace("parbreak", "latexpar")
|
|
else:
|
|
document.body[i] = document.body[i].replace("parbreak", "latexpar")
|
|
i += 1
|
|
|
|
|
|
def revert_parbreak(document):
|
|
"""
|
|
Revert latexpar separators to parbreak separators.
|
|
"""
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_inset Separator latexpar", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace("latexpar", "parbreak")
|
|
i += 1
|
|
|
|
|
|
def revert_smash(document):
|
|
" Set amsmath to on if smash commands are used "
|
|
|
|
commands = ["smash[t]", "smash[b]", "notag"]
|
|
i = find_token(document.header, "\\use_package amsmath", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document: Can't find \\use_package amsmath.")
|
|
return
|
|
value = get_value(document.header, "\\use_package amsmath", i).split()[1]
|
|
if value != "1":
|
|
# nothing to do if package is not auto but on or off
|
|
return
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, '\\begin_inset Formula', j)
|
|
if j == -1:
|
|
return
|
|
k = find_end_of_inset(document.body, j)
|
|
if k == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(j))
|
|
j += 1
|
|
continue
|
|
code = "\n".join(document.body[j:k])
|
|
for c in commands:
|
|
if code.find("\\%s" % c) != -1:
|
|
# set amsmath to on, since it is loaded by the newer format
|
|
document.header[i] = "\\use_package amsmath 2"
|
|
return
|
|
j = k
|
|
|
|
|
|
def revert_swissgerman(document):
|
|
" Set language german-ch-old to german "
|
|
i = 0
|
|
if document.language == "german-ch-old":
|
|
document.language = "german"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language german"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang german-ch-old", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang german-ch-old", "\\lang german")
|
|
j = j + 1
|
|
|
|
|
|
def revert_use_package(document, pkg, commands, oldauto, supported):
|
|
# oldauto defines how the version we are reverting to behaves:
|
|
# if it is true, the old version uses the package automatically.
|
|
# if it is false, the old version never uses the package.
|
|
# If "supported" is true, the target version also supports this
|
|
# package natively.
|
|
regexp = re.compile(r'(\\use_package\s+%s)' % pkg)
|
|
p = find_re(document.header, regexp, 0)
|
|
value = "1" # default is auto
|
|
if p != -1:
|
|
value = get_value(document.header, "\\use_package" , p).split()[1]
|
|
if not supported:
|
|
del document.header[p]
|
|
if value == "2" and not supported: # on
|
|
add_to_preamble(document, ["\\usepackage{" + pkg + "}"])
|
|
elif value == "1" and not oldauto: # auto
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Formula', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
code = "\n".join(document.body[i:j])
|
|
for c in commands:
|
|
if code.find("\\%s" % c) != -1:
|
|
if supported:
|
|
document.header[p] = "\\use_package " + pkg + " 2"
|
|
else:
|
|
add_to_preamble(document, ["\\usepackage{" + pkg + "}"])
|
|
return
|
|
i = j
|
|
|
|
|
|
mathtools_commands = ["xhookrightarrow", "xhookleftarrow", "xRightarrow", \
|
|
"xrightharpoondown", "xrightharpoonup", "xrightleftharpoons", \
|
|
"xLeftarrow", "xleftharpoondown", "xleftharpoonup", \
|
|
"xleftrightarrow", "xLeftrightarrow", "xleftrightharpoons", \
|
|
"xmapsto"]
|
|
|
|
def revert_xarrow(document):
|
|
"remove use_package mathtools"
|
|
revert_use_package(document, "mathtools", mathtools_commands, False, True)
|
|
|
|
|
|
def revert_beamer_lemma(document):
|
|
" Reverts beamer lemma layout to ERT "
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
if document.textclass not in beamer_classes:
|
|
return
|
|
|
|
consecutive = False
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_layout Lemma", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Lemma layout")
|
|
i += 1
|
|
continue
|
|
arg1 = find_token(document.body, "\\begin_inset Argument 1", i, j)
|
|
endarg1 = find_end_of_inset(document.body, arg1)
|
|
arg2 = find_token(document.body, "\\begin_inset Argument 2", i, j)
|
|
endarg2 = find_end_of_inset(document.body, arg2)
|
|
subst1 = []
|
|
subst2 = []
|
|
if arg1 != -1:
|
|
beginPlain1 = find_token(document.body, "\\begin_layout Plain Layout", arg1, endarg1)
|
|
if beginPlain1 == -1:
|
|
document.warning("Malformed LyX document: Can't find arg1 plain Layout")
|
|
i += 1
|
|
continue
|
|
endPlain1 = find_end_of_inset(document.body, beginPlain1)
|
|
content1 = document.body[beginPlain1 + 1 : endPlain1 - 2]
|
|
subst1 = put_cmd_in_ert("<") + content1 + put_cmd_in_ert(">")
|
|
if arg2 != -1:
|
|
beginPlain2 = find_token(document.body, "\\begin_layout Plain Layout", arg2, endarg2)
|
|
if beginPlain2 == -1:
|
|
document.warning("Malformed LyX document: Can't find arg2 plain Layout")
|
|
i += 1
|
|
continue
|
|
endPlain2 = find_end_of_inset(document.body, beginPlain2)
|
|
content2 = document.body[beginPlain2 + 1 : endPlain2 - 2]
|
|
subst2 = put_cmd_in_ert("[") + content2 + put_cmd_in_ert("]")
|
|
|
|
# remove Arg insets
|
|
if arg1 < arg2:
|
|
del document.body[arg2 : endarg2 + 1]
|
|
if arg1 != -1:
|
|
del document.body[arg1 : endarg1 + 1]
|
|
if arg2 < arg1:
|
|
del document.body[arg1 : endarg1 + 1]
|
|
if arg2 != -1:
|
|
del document.body[arg2 : endarg2 + 1]
|
|
|
|
# index of end layout has probably changed
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Lemma layout")
|
|
i += 1
|
|
continue
|
|
|
|
begcmd = []
|
|
|
|
# if this is not a consecutive env, add start command
|
|
if not consecutive:
|
|
begcmd = put_cmd_in_ert("\\begin{lemma}")
|
|
|
|
# has this a consecutive lemma?
|
|
consecutive = document.body[j + 2] == "\\begin_layout Lemma"
|
|
|
|
# if this is not followed by a consecutive env, add end command
|
|
if not consecutive:
|
|
document.body[j : j + 1] = put_cmd_in_ert("\\end{lemma}") + ["\\end_layout"]
|
|
|
|
document.body[i : i + 1] = ["\\begin_layout Standard", ""] + begcmd + subst1 + subst2
|
|
|
|
i = j
|
|
|
|
|
|
|
|
def revert_question_env(document):
|
|
"""
|
|
Reverts question and question* environments of
|
|
theorems-ams-extended-bytype module to ERT
|
|
"""
|
|
|
|
# Do we use theorems-ams-extended-bytype module?
|
|
if not "theorems-ams-extended-bytype" in document.get_module_list():
|
|
return
|
|
|
|
consecutive = False
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_layout Question", i)
|
|
if i == -1:
|
|
return
|
|
|
|
starred = document.body[i] == "\\begin_layout Question*"
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Question layout")
|
|
i += 1
|
|
continue
|
|
|
|
# if this is not a consecutive env, add start command
|
|
begcmd = []
|
|
if not consecutive:
|
|
if starred:
|
|
begcmd = put_cmd_in_ert("\\begin{question*}")
|
|
else:
|
|
begcmd = put_cmd_in_ert("\\begin{question}")
|
|
|
|
# has this a consecutive theorem of same type?
|
|
consecutive = False
|
|
if starred:
|
|
consecutive = document.body[j + 2] == "\\begin_layout Question*"
|
|
else:
|
|
consecutive = document.body[j + 2] == "\\begin_layout Question"
|
|
|
|
# if this is not followed by a consecutive env, add end command
|
|
if not consecutive:
|
|
if starred:
|
|
document.body[j : j + 1] = put_cmd_in_ert("\\end{question*}") + ["\\end_layout"]
|
|
else:
|
|
document.body[j : j + 1] = put_cmd_in_ert("\\end{question}") + ["\\end_layout"]
|
|
|
|
document.body[i : i + 1] = ["\\begin_layout Standard", ""] + begcmd
|
|
|
|
add_to_preamble(document, "\\providecommand{\questionname}{Question}")
|
|
|
|
if starred:
|
|
add_to_preamble(document, "\\theoremstyle{plain}\n" \
|
|
"\\newtheorem*{question*}{\\protect\\questionname}")
|
|
else:
|
|
add_to_preamble(document, "\\theoremstyle{plain}\n" \
|
|
"\\newtheorem{question}{\\protect\\questionname}")
|
|
|
|
i = j
|
|
|
|
|
|
def convert_dashes(document):
|
|
"convert -- and --- to \\twohyphens and \\threehyphens"
|
|
|
|
if document.backend != "latex":
|
|
return
|
|
|
|
i = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
|
words[1] in ["CommandInset", "ERT", "External", "Formula", "Graphics", "IPA", "listings"]:
|
|
# must not replace anything in insets that store LaTeX contents in .lyx files
|
|
# (math and command insets withut overridden read() and write() methods
|
|
# filtering out IPA makes Text::readParToken() more simple
|
|
# skip ERT as well since it is not needed there
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
|
|
i += 1
|
|
else:
|
|
i = j
|
|
continue
|
|
while True:
|
|
j = document.body[i].find("--")
|
|
if j == -1:
|
|
break
|
|
front = document.body[i][:j]
|
|
back = document.body[i][j+2:]
|
|
# We can have an arbitrary number of consecutive hyphens.
|
|
# These must be split into the corresponding number of two and three hyphens
|
|
# We must match what LaTeX does: First try emdash, then endash, then single hyphen
|
|
if back.find("-") == 0:
|
|
back = back[1:]
|
|
if len(back) > 0:
|
|
document.body.insert(i+1, back)
|
|
document.body[i] = front + "\\threehyphens"
|
|
else:
|
|
if len(back) > 0:
|
|
document.body.insert(i+1, back)
|
|
document.body[i] = front + "\\twohyphens"
|
|
i += 1
|
|
|
|
|
|
def revert_dashes(document):
|
|
"convert \\twohyphens and \\threehyphens to -- and ---"
|
|
|
|
i = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
|
words[1] in ["CommandInset", "ERT", "External", "Formula", "Graphics", "IPA", "listings"]:
|
|
# see convert_dashes
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
|
|
i += 1
|
|
else:
|
|
i = j
|
|
continue
|
|
replaced = False
|
|
if document.body[i].find("\\twohyphens") >= 0:
|
|
document.body[i] = document.body[i].replace("\\twohyphens", "--")
|
|
replaced = True
|
|
if document.body[i].find("\\threehyphens") >= 0:
|
|
document.body[i] = document.body[i].replace("\\threehyphens", "---")
|
|
replaced = True
|
|
if replaced and i+1 < len(document.body) and \
|
|
(document.body[i+1].find("\\") != 0 or \
|
|
document.body[i+1].find("\\twohyphens") == 0 or
|
|
document.body[i+1].find("\\threehyphens") == 0) and \
|
|
len(document.body[i]) + len(document.body[i+1]) <= 80:
|
|
document.body[i] = document.body[i] + document.body[i+1]
|
|
document.body[i+1:i+2] = []
|
|
else:
|
|
i += 1
|
|
|
|
|
|
# order is important for the last three!
|
|
phrases = ["LyX", "LaTeX2e", "LaTeX", "TeX"]
|
|
|
|
def is_part_of_converted_phrase(line, j, phrase):
|
|
"is phrase part of an already converted phrase?"
|
|
for p in phrases:
|
|
converted = "\\SpecialCharNoPassThru \\" + p
|
|
pos = j + len(phrase) - len(converted)
|
|
if pos >= 0:
|
|
if line[pos:pos+len(converted)] == converted:
|
|
return True
|
|
return False
|
|
|
|
|
|
def convert_phrases(document):
|
|
"convert special phrases from plain text to \\SpecialCharNoPassThru"
|
|
|
|
if document.backend != "latex":
|
|
return
|
|
|
|
for phrase in phrases:
|
|
i = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
|
words[1] in ["CommandInset", "External", "Formula", "Graphics", "listings"]:
|
|
# must not replace anything in insets that store LaTeX contents in .lyx files
|
|
# (math and command insets withut overridden read() and write() methods
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
i += 1
|
|
else:
|
|
i = j
|
|
continue
|
|
if document.body[i].find("\\") == 0:
|
|
i += 1
|
|
continue
|
|
j = document.body[i].find(phrase)
|
|
if j == -1:
|
|
i += 1
|
|
continue
|
|
if not is_part_of_converted_phrase(document.body[i], j, phrase):
|
|
front = document.body[i][:j]
|
|
back = document.body[i][j+len(phrase):]
|
|
if len(back) > 0:
|
|
document.body.insert(i+1, back)
|
|
# We cannot use SpecialChar since we do not know whether we are outside passThru
|
|
document.body[i] = front + "\\SpecialCharNoPassThru \\" + phrase
|
|
i += 1
|
|
|
|
|
|
def revert_phrases(document):
|
|
"convert special phrases to plain text"
|
|
|
|
i = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
|
words[1] in ["CommandInset", "External", "Formula", "Graphics", "listings"]:
|
|
# see convert_phrases
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
i += 1
|
|
else:
|
|
i = j
|
|
continue
|
|
replaced = False
|
|
for phrase in phrases:
|
|
# we can replace SpecialChar since LyX ensures that it cannot be inserted into passThru parts
|
|
if document.body[i].find("\\SpecialChar \\" + phrase) >= 0:
|
|
document.body[i] = document.body[i].replace("\\SpecialChar \\" + phrase, phrase)
|
|
replaced = True
|
|
if document.body[i].find("\\SpecialCharNoPassThru \\" + phrase) >= 0:
|
|
document.body[i] = document.body[i].replace("\\SpecialCharNoPassThru \\" + phrase, phrase)
|
|
replaced = True
|
|
if replaced and i+1 < len(document.body) and \
|
|
(document.body[i+1].find("\\") != 0 or \
|
|
document.body[i+1].find("\\SpecialChar") == 0) and \
|
|
len(document.body[i]) + len(document.body[i+1]) <= 80:
|
|
document.body[i] = document.body[i] + document.body[i+1]
|
|
document.body[i+1:i+2] = []
|
|
i -= 1
|
|
i += 1
|
|
|
|
|
|
def convert_specialchar_internal(document, forward):
|
|
specialchars = {"\\-":"softhyphen", "\\textcompwordmark{}":"ligaturebreak", \
|
|
"\\@.":"endofsentence", "\\ldots{}":"ldots", \
|
|
"\\menuseparator":"menuseparator", "\\slash{}":"breakableslash", \
|
|
"\\nobreakdash-":"nobreakdash", "\\LyX":"LyX", \
|
|
"\\TeX":"TeX", "\\LaTeX2e":"LaTeX2e", \
|
|
"\\LaTeX":"LaTeX" # must be after LaTeX2e
|
|
}
|
|
|
|
i = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
|
words[1] in ["CommandInset", "External", "Formula", "Graphics", "listings"]:
|
|
# see convert_phrases
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
i += 1
|
|
else:
|
|
i = j
|
|
continue
|
|
for key, value in specialchars.iteritems():
|
|
if forward:
|
|
document.body[i] = document.body[i].replace("\\SpecialChar " + key, "\\SpecialChar " + value)
|
|
document.body[i] = document.body[i].replace("\\SpecialCharNoPassThru " + key, "\\SpecialCharNoPassThru " + value)
|
|
else:
|
|
document.body[i] = document.body[i].replace("\\SpecialChar " + value, "\\SpecialChar " + key)
|
|
document.body[i] = document.body[i].replace("\\SpecialCharNoPassThru " + value, "\\SpecialCharNoPassThru " + key)
|
|
i += 1
|
|
|
|
|
|
def convert_specialchar(document):
|
|
"convert special characters to new syntax"
|
|
convert_specialchar_internal(document, True)
|
|
|
|
|
|
def revert_specialchar(document):
|
|
"convert special characters to old syntax"
|
|
convert_specialchar_internal(document, False)
|
|
|
|
|
|
def revert_georgian(document):
|
|
"Set the document language to English but assure Georgian output"
|
|
|
|
if document.language == "georgian":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language georgian", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package babel"
|
|
k = find_token(document.header, "\\options", 0)
|
|
if k != -1:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options georgian,")
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l + 1, "\\options georgian")
|
|
|
|
|
|
def revert_sigplan_doi(document):
|
|
" Reverts sigplanconf DOI layout to ERT "
|
|
|
|
if document.textclass != "sigplanconf":
|
|
return
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_layout DOI", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of DOI layout")
|
|
i += 1
|
|
continue
|
|
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\doi{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# no need to reset i
|
|
|
|
|
|
def revert_ex_itemargs(document):
|
|
" Reverts \\item arguments of the example environments (Linguistics module) to TeX-code "
|
|
|
|
if not "linguistics" in document.get_module_list():
|
|
return
|
|
|
|
i = 0
|
|
example_layouts = ["Numbered Examples (consecutive)", "Subexample"]
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Argument item:", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
# Find containing paragraph layout
|
|
parent = get_containing_layout(document.body, i)
|
|
if parent == False:
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
|
i += 1
|
|
continue
|
|
parbeg = parent[3]
|
|
layoutname = parent[0]
|
|
if layoutname in example_layouts:
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
del document.body[i:j+1]
|
|
subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
|
document.body[parbeg : parbeg] = subst
|
|
i += 1
|
|
|
|
|
|
def revert_forest(document):
|
|
" Reverts the forest environment (Linguistics module) to TeX-code "
|
|
|
|
if not "linguistics" in document.get_module_list():
|
|
return
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Flex Structure Tree", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Structure Tree inset")
|
|
i += 1
|
|
continue
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
content = lyx2latex(document, document.body[beginPlain : endPlain])
|
|
|
|
add_to_preamble(document, ["\\usepackage{forest}"])
|
|
|
|
document.body[i:j + 1] = put_cmd_in_ert("\\begin{forest}" + content + "\\end{forest}")
|
|
# no need to reset i
|
|
|
|
|
|
def revert_glossgroup(document):
|
|
" Reverts the GroupGlossedWords inset (Linguistics module) to TeX-code "
|
|
|
|
if not "linguistics" in document.get_module_list():
|
|
return
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Flex GroupGlossedWords", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of GroupGlossedWords inset")
|
|
i += 1
|
|
continue
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
content = lyx2verbatim(document, document.body[beginPlain : endPlain])
|
|
|
|
document.body[i:j + 1] = ["{", "", content, "", "}"]
|
|
# no need to reset i
|
|
|
|
|
|
def revert_newgloss(document):
|
|
" Reverts the new Glosse insets (Linguistics module) to the old format "
|
|
|
|
if not "linguistics" in document.get_module_list():
|
|
return
|
|
|
|
glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")
|
|
for glosse in glosses:
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, glosse, i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Glosse inset")
|
|
i += 1
|
|
continue
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
argcontent = ""
|
|
if arg != -1:
|
|
argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
|
|
if argbeginPlain == -1:
|
|
document.warning("Malformed LyX document: Can't find arg plain Layout")
|
|
i += 1
|
|
continue
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
argcontent = lyx2verbatim(document, document.body[argbeginPlain : argendPlain - 2])
|
|
|
|
document.body[j:j] = ["", "\\begin_layout Plain Layout","\\backslash", "glt ",
|
|
argcontent, "\\end_layout"]
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
|
if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
|
|
del document.body[arg - 1 : endarg + 4]
|
|
else:
|
|
del document.body[arg : endarg + 1]
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
content = lyx2verbatim(document, document.body[beginPlain : endPlain])
|
|
|
|
document.body[beginPlain + 1:endPlain] = [content]
|
|
i = beginPlain + 1
|
|
|
|
# Dissolve ERT insets
|
|
for glosse in glosses:
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, glosse, i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Glosse inset")
|
|
i += 1
|
|
continue
|
|
while True:
|
|
ert = find_token(document.body, "\\begin_inset ERT", i, j)
|
|
if ert == -1:
|
|
break
|
|
ertend = find_end_of_inset(document.body, ert)
|
|
if ertend == -1:
|
|
document.warning("Malformed LyX document: Can't find end of ERT inset")
|
|
ert += 1
|
|
continue
|
|
ertcontent = get_ert(document.body, ert, True)
|
|
document.body[ert : ertend + 1] = [ertcontent]
|
|
i += 1
|
|
|
|
|
|
def convert_newgloss(document):
|
|
" Converts Glosse insets (Linguistics module) to the new format "
|
|
|
|
if not "linguistics" in document.get_module_list():
|
|
return
|
|
|
|
glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")
|
|
for glosse in glosses:
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, glosse, i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Glosse inset")
|
|
i += 1
|
|
continue
|
|
|
|
k = i
|
|
while True:
|
|
argcontent = []
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", k, j)
|
|
if beginPlain == -1:
|
|
break
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
if endPlain == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Glosse layout")
|
|
i += 1
|
|
continue
|
|
|
|
glt = find_token(document.body, "\\backslash", beginPlain, endPlain)
|
|
if glt != -1 and document.body[glt + 1].startswith("glt"):
|
|
document.body[glt + 1] = document.body[glt + 1].lstrip("glt").lstrip()
|
|
argcontent = document.body[glt + 1 : endPlain]
|
|
document.body[beginPlain + 1 : endPlain] = ["\\begin_inset Argument 1", "status open", "",
|
|
"\\begin_layout Plain Layout", "\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", ""] + argcontent + ["\\end_layout", "", "\\end_inset", "",
|
|
"\\end_layout", "", "\\end_inset"]
|
|
else:
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
document.body[beginPlain + 1 : endPlain] = ["\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout"] + content + ["\\end_layout", "", "\\end_inset"]
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
k = endPlain
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
i = endPlain + 1
|
|
|
|
|
|
def convert_BoxFeatures(document):
|
|
" adds new box features "
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "height_special", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i+1:i+1] = ['thickness "0.4pt"', 'separation "3pt"', 'shadowsize "4pt"']
|
|
i = i + 4
|
|
|
|
|
|
def revert_BoxFeatures(document):
|
|
" outputs new box features as TeX code "
|
|
|
|
i = 0
|
|
defaultSep = "3pt"
|
|
defaultThick = "0.4pt"
|
|
defaultShadow = "4pt"
|
|
while True:
|
|
i = find_token(document.body, "height_special", i)
|
|
if i == -1:
|
|
return
|
|
# read out the values
|
|
beg = document.body[i+1].find('"');
|
|
end = document.body[i+1].rfind('"');
|
|
thickness = document.body[i+1][beg+1:end];
|
|
beg = document.body[i+2].find('"');
|
|
end = document.body[i+2].rfind('"');
|
|
separation = document.body[i+2][beg+1:end];
|
|
beg = document.body[i+3].find('"');
|
|
end = document.body[i+3].rfind('"');
|
|
shadowsize = document.body[i+3][beg+1:end];
|
|
# delete the specification
|
|
del document.body[i+1:i+4]
|
|
# output ERT
|
|
# first output the closing brace
|
|
if shadowsize != defaultShadow or separation != defaultSep or thickness != defaultThick:
|
|
document.body[i + 10 : i + 10] = put_cmd_in_ert("}")
|
|
# now output the lengths
|
|
if shadowsize != defaultShadow or separation != defaultSep or thickness != defaultThick:
|
|
document.body[i - 10 : i - 10] = put_cmd_in_ert("{")
|
|
if thickness != defaultThick:
|
|
document.body[i - 5 : i - 4] = ["{\\backslash fboxrule " + thickness]
|
|
if separation != defaultSep and thickness == defaultThick:
|
|
document.body[i - 5 : i - 4] = ["{\\backslash fboxsep " + separation]
|
|
if separation != defaultSep and thickness != defaultThick:
|
|
document.body[i - 5 : i - 4] = ["{\\backslash fboxrule " + thickness + "\\backslash fboxsep " + separation]
|
|
if shadowsize != defaultShadow and separation == defaultSep and thickness == defaultThick:
|
|
document.body[i - 5 : i - 4] = ["{\\backslash shadowsize " + shadowsize]
|
|
if shadowsize != defaultShadow and separation != defaultSep and thickness == defaultThick:
|
|
document.body[i - 5 : i - 4] = ["{\\backslash fboxsep " + separation + "\\backslash shadowsize " + shadowsize]
|
|
if shadowsize != defaultShadow and separation == defaultSep and thickness != defaultThick:
|
|
document.body[i - 5 : i - 4] = ["{\\backslash fboxrule " + thickness + "\\backslash shadowsize " + shadowsize]
|
|
if shadowsize != defaultShadow and separation != defaultSep and thickness != defaultThick:
|
|
document.body[i - 5 : i - 4] = ["{\\backslash fboxrule " + thickness + "\\backslash fboxsep " + separation + "\\backslash shadowsize " + shadowsize]
|
|
i = i + 11
|
|
|
|
|
|
def convert_origin(document):
|
|
" Insert the origin tag "
|
|
|
|
i = find_token(document.header, "\\textclass ", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document: No \\textclass!!")
|
|
return
|
|
if document.dir == "":
|
|
origin = "stdin"
|
|
else:
|
|
relpath = ''
|
|
if document.systemlyxdir and document.systemlyxdir != '':
|
|
try:
|
|
if os.path.isabs(document.dir):
|
|
absdir = os.path.normpath(document.dir)
|
|
else:
|
|
absdir = os.path.normpath(os.path.abspath(document.dir))
|
|
if os.path.isabs(document.systemlyxdir):
|
|
abssys = os.path.normpath(document.systemlyxdir)
|
|
else:
|
|
abssys = os.path.normpath(os.path.abspath(document.systemlyxdir))
|
|
relpath = os.path.relpath(absdir, abssys)
|
|
if relpath.find('..') == 0:
|
|
relpath = ''
|
|
except:
|
|
relpath = ''
|
|
if relpath == '':
|
|
origin = document.dir.replace('\\', '/') + '/'
|
|
else:
|
|
origin = os.path.join("/systemlyxdir", relpath).replace('\\', '/') + '/'
|
|
if os.name != 'nt':
|
|
origin = unicode(origin, sys.getfilesystemencoding())
|
|
document.header[i:i] = ["\\origin " + origin]
|
|
|
|
|
|
def revert_origin(document):
|
|
" Remove the origin tag "
|
|
|
|
i = find_token(document.header, "\\origin ", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document: No \\origin!!")
|
|
return
|
|
del document.header[i]
|
|
|
|
|
|
color_names = ["brown", "darkgray", "gray", \
|
|
"lightgray", "lime", "olive", "orange", \
|
|
"pink", "purple", "teal", "violet"]
|
|
|
|
def revert_textcolor(document):
|
|
" revert new \\textcolor colors to TeX code "
|
|
|
|
i = 0
|
|
j = 0
|
|
xcolor = False
|
|
while True:
|
|
i = find_token(document.body, "\\color ", i)
|
|
if i == -1:
|
|
return
|
|
else:
|
|
for color in list(color_names):
|
|
if document.body[i] == "\\color " + color:
|
|
# register that xcolor must be loaded in the preamble
|
|
if xcolor == False:
|
|
xcolor = True
|
|
add_to_preamble(document, ["\\@ifundefined{rangeHsb}{\usepackage{xcolor}}{}"])
|
|
# find the next \\color and/or the next \\end_layout
|
|
j = find_token(document.body, "\\color", i + 1)
|
|
k = find_token(document.body, "\\end_layout", i + 1)
|
|
if j == -1 and k != -1:
|
|
j = k +1
|
|
# output TeX code
|
|
# first output the closing brace
|
|
if k < j:
|
|
document.body[k: k] = put_cmd_in_ert("}")
|
|
else:
|
|
document.body[j: j] = put_cmd_in_ert("}")
|
|
# now output the \textcolor command
|
|
document.body[i : i + 1] = put_cmd_in_ert("\\textcolor{" + color + "}{")
|
|
i = i + 1
|
|
|
|
|
|
def convert_colorbox(document):
|
|
" adds color settings for boxes "
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "shadowsize", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i+1:i+1] = ['framecolor "black"', 'backgroundcolor "none"']
|
|
i = i + 3
|
|
|
|
|
|
def revert_colorbox(document):
|
|
" outputs color settings for boxes as TeX code "
|
|
|
|
binset = 0
|
|
defaultframecolor = "black"
|
|
defaultbackcolor = "none"
|
|
while True:
|
|
binset = find_token(document.body, "\\begin_inset Box", binset)
|
|
if binset == -1:
|
|
return
|
|
|
|
einset = find_end_of_inset(document.body, binset)
|
|
if einset == -1:
|
|
document.warning("Malformed LyX document: Can't find end of box inset!")
|
|
binset += 1
|
|
continue
|
|
|
|
blay = find_token(document.body, "\\begin_layout", binset, einset)
|
|
if blay == -1:
|
|
document.warning("Malformed LyX document: Can't find start of layout!")
|
|
binset = einset
|
|
continue
|
|
|
|
# doing it this way, we make sure only to find a framecolor option
|
|
frame = find_token(document.body, "framecolor", binset, blay)
|
|
if frame == -1:
|
|
binset = einset
|
|
continue
|
|
|
|
beg = document.body[frame].find('"')
|
|
end = document.body[frame].rfind('"')
|
|
framecolor = document.body[frame][beg + 1 : end]
|
|
|
|
# this should be on the next line
|
|
bgcolor = frame + 1
|
|
beg = document.body[bgcolor].find('"')
|
|
end = document.body[bgcolor].rfind('"')
|
|
backcolor = document.body[bgcolor][beg + 1 : end]
|
|
|
|
# delete those bits
|
|
del document.body[frame : frame + 2]
|
|
# adjust end of inset
|
|
einset -= 2
|
|
|
|
if document.body[binset] == "\\begin_inset Box Boxed" and \
|
|
framecolor != defaultframecolor:
|
|
document.body[binset] = "\\begin_inset Box Frameless"
|
|
|
|
# output TeX code
|
|
# first output the closing brace
|
|
if framecolor == defaultframecolor and backcolor == defaultbackcolor:
|
|
# nothing needed
|
|
pass
|
|
else:
|
|
# we also neeed to load xcolor in the preamble but only once
|
|
add_to_preamble(document, ["\\@ifundefined{rangeHsb}{\usepackage{xcolor}}{}"])
|
|
document.body[einset + 1 : einset + 1] = put_cmd_in_ert("}")
|
|
if framecolor != defaultframecolor:
|
|
document.body[binset:binset] = put_cmd_in_ert("\\fcolorbox{" + framecolor + "}{" + backcolor + "}{")
|
|
else:
|
|
document.body[binset:binset] = put_cmd_in_ert("\\colorbox{" + backcolor + "}{")
|
|
|
|
binset = einset
|
|
|
|
|
|
def revert_mathmulticol(document):
|
|
" Convert formulas to ERT if they contain multicolumns "
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Formula', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
lines = document.body[i:j]
|
|
lines[0] = lines[0].replace('\\begin_inset Formula', '').lstrip()
|
|
code = "\n".join(lines)
|
|
converted = False
|
|
k = 0
|
|
n = 0
|
|
while n >= 0:
|
|
n = code.find("\\multicolumn", k)
|
|
# no need to convert degenerated multicolumn cells,
|
|
# they work in old LyX versions as "math ERT"
|
|
if n != -1 and code.find("\\multicolumn{1}", k) != n:
|
|
ert = put_cmd_in_ert(code)
|
|
document.body[i:j+1] = ert
|
|
converted = True
|
|
break
|
|
else:
|
|
k = n + 12
|
|
if converted:
|
|
i = find_end_of_inset(document.body, i)
|
|
else:
|
|
i = j
|
|
|
|
|
|
def revert_jss(document):
|
|
" Reverts JSS In_Preamble commands to ERT in preamble "
|
|
|
|
if document.textclass != "jss":
|
|
return
|
|
|
|
h = 0
|
|
m = 0
|
|
j = 0
|
|
k = 0
|
|
n = 0
|
|
while True:
|
|
# at first revert the inset layouts because they can be part of the In_Preamble layouts
|
|
while m != -1 or j != -1 or h != -1 or k != -1 or n != -1:
|
|
# \pkg
|
|
if h != -1:
|
|
h = find_token(document.body, "\\begin_inset Flex Pkg", h)
|
|
if h != -1:
|
|
endh = find_end_of_inset(document.body, h)
|
|
document.body[endh - 2 : endh + 1] = put_cmd_in_ert("}")
|
|
document.body[h : h + 4] = put_cmd_in_ert("\\pkg{")
|
|
h = h + 5
|
|
# \proglang
|
|
if m != -1:
|
|
m = find_token(document.body, "\\begin_inset Flex Proglang", m)
|
|
if m != -1:
|
|
endm = find_end_of_inset(document.body, m)
|
|
document.body[endm - 2 : endm + 1] = put_cmd_in_ert("}")
|
|
document.body[m : m + 4] = put_cmd_in_ert("\\proglang{")
|
|
m = m + 5
|
|
# \code
|
|
if j != -1:
|
|
j = find_token(document.body, "\\begin_inset Flex Code", j)
|
|
if j != -1:
|
|
# assure that we are not in a Code Chunk inset
|
|
if document.body[j][-1] == "e":
|
|
endj = find_end_of_inset(document.body, j)
|
|
document.body[endj - 2 : endj + 1] = put_cmd_in_ert("}")
|
|
document.body[j : j + 4] = put_cmd_in_ert("\\code{")
|
|
j = j + 5
|
|
else:
|
|
j = j + 1
|
|
# \email
|
|
if k != -1:
|
|
k = find_token(document.body, "\\begin_inset Flex E-mail", k)
|
|
if k != -1:
|
|
endk = find_end_of_inset(document.body, k)
|
|
document.body[endk - 2 : endk + 1] = put_cmd_in_ert("}")
|
|
document.body[k : k + 4] = put_cmd_in_ert("\\email{")
|
|
k = k + 5
|
|
# \url
|
|
if n != -1:
|
|
n = find_token(document.body, "\\begin_inset Flex URL", n)
|
|
if n != -1:
|
|
endn = find_end_of_inset(document.body, n)
|
|
document.body[endn - 2 : endn + 1] = put_cmd_in_ert("}")
|
|
document.body[n : n + 4] = put_cmd_in_ert("\\url{")
|
|
n = n + 5
|
|
# now revert the In_Preamble layouts
|
|
# \title
|
|
i = find_token(document.body, "\\begin_layout Title", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Title layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\title{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# \author
|
|
i = find_token(document.body, "\\begin_layout Author", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Author layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\author{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# \Plainauthor
|
|
i = find_token(document.body, "\\begin_layout Plain Author", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Plain Author layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\Plainauthor{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# \Plaintitle
|
|
i = find_token(document.body, "\\begin_layout Plain Title", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Plain Title layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\Plaintitle{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# \Shorttitle
|
|
i = find_token(document.body, "\\begin_layout Short Title", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Short Title layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\Shorttitle{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# \Abstract
|
|
i = find_token(document.body, "\\begin_layout Abstract", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Abstract layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\Abstract{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# \Keywords
|
|
i = find_token(document.body, "\\begin_layout Keywords", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Keywords layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\Keywords{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# \Plainkeywords
|
|
i = find_token(document.body, "\\begin_layout Plain Keywords", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Plain Keywords layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\Plainkeywords{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# \Address
|
|
i = find_token(document.body, "\\begin_layout Address", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Address layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\Address{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# finally handle the code layouts
|
|
h = 0
|
|
m = 0
|
|
j = 0
|
|
k = 0
|
|
while m != -1 or j != -1 or h != -1 or k != -1:
|
|
# \CodeChunk
|
|
if h != -1:
|
|
h = find_token(document.body, "\\begin_inset Flex Code Chunk", h)
|
|
if h != -1:
|
|
endh = find_end_of_inset(document.body, h)
|
|
document.body[endh : endh + 1] = put_cmd_in_ert("\\end{CodeChunk}")
|
|
document.body[h : h + 3] = put_cmd_in_ert("\\begin{CodeChunk}")
|
|
document.body[h - 1 : h] = ["\\begin_layout Standard"]
|
|
h = h + 1
|
|
# \CodeInput
|
|
if j != -1:
|
|
j = find_token(document.body, "\\begin_layout Code Input", j)
|
|
if j != -1:
|
|
endj = find_end_of_layout(document.body, j)
|
|
document.body[endj : endj + 1] = ["\\end_layout", "", "\\begin_layout Standard"]
|
|
document.body[endj + 3 : endj + 4] = put_cmd_in_ert("\\end{CodeInput}")
|
|
document.body[endj + 13 : endj + 13] = ["\\end_layout", "", "\\begin_layout Standard"]
|
|
document.body[j + 1 : j] = ["\\end_layout", "", "\\begin_layout Standard"]
|
|
document.body[j : j + 1] = put_cmd_in_ert("\\begin{CodeInput}")
|
|
j = j + 1
|
|
# \CodeOutput
|
|
if k != -1:
|
|
k = find_token(document.body, "\\begin_layout Code Output", k)
|
|
if k != -1:
|
|
endk = find_end_of_layout(document.body, k)
|
|
document.body[endk : endk + 1] = ["\\end_layout", "", "\\begin_layout Standard"]
|
|
document.body[endk + 3 : endk + 4] = put_cmd_in_ert("\\end{CodeOutput}")
|
|
document.body[endk + 13 : endk + 13] = ["\\end_layout", "", "\\begin_layout Standard"]
|
|
document.body[k + 1 : k] = ["\\end_layout", "", "\\begin_layout Standard"]
|
|
document.body[k : k + 1] = put_cmd_in_ert("\\begin{CodeOutput}")
|
|
k = k + 1
|
|
# \Code
|
|
if m != -1:
|
|
m = find_token(document.body, "\\begin_layout Code", m)
|
|
if m != -1:
|
|
endm = find_end_of_layout(document.body, m)
|
|
document.body[endm : endm + 1] = ["\\end_layout", "", "\\begin_layout Standard"]
|
|
document.body[endm + 3 : endm + 4] = put_cmd_in_ert("\\end{Code}")
|
|
document.body[endm + 13 : endm + 13] = ["\\end_layout", "", "\\begin_layout Standard"]
|
|
document.body[m + 1 : m] = ["\\end_layout", "", "\\begin_layout Standard"]
|
|
document.body[m : m + 1] = put_cmd_in_ert("\\begin{Code}")
|
|
m = m + 1
|
|
|
|
|
|
def convert_subref(document):
|
|
" converts sub: ref prefixes to subref: "
|
|
|
|
# 1) label insets
|
|
rx = re.compile(r'^name \"sub:(.+)$')
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset label", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Label inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
|
|
for p in range(i, j):
|
|
m = rx.match(document.body[p])
|
|
if m:
|
|
label = m.group(1)
|
|
document.body[p] = "name \"subsec:" + label
|
|
i += 1
|
|
|
|
# 2) xref insets
|
|
rx = re.compile(r'^reference \"sub:(.+)$')
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Ref inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
|
|
for p in range(i, j):
|
|
m = rx.match(document.body[p])
|
|
if m:
|
|
label = m.group(1)
|
|
document.body[p] = "reference \"subsec:" + label
|
|
break
|
|
i += 1
|
|
|
|
|
|
|
|
def revert_subref(document):
|
|
" reverts subref: ref prefixes to sub: "
|
|
|
|
# 1) label insets
|
|
rx = re.compile(r'^name \"subsec:(.+)$')
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset label", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Label inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
|
|
for p in range(i, j):
|
|
m = rx.match(document.body[p])
|
|
if m:
|
|
label = m.group(1)
|
|
document.body[p] = "name \"sub:" + label
|
|
break
|
|
i += 1
|
|
|
|
# 2) xref insets
|
|
rx = re.compile(r'^reference \"subsec:(.+)$')
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Ref inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
|
|
for p in range(i, j):
|
|
m = rx.match(document.body[p])
|
|
if m:
|
|
label = m.group(1)
|
|
document.body[p] = "reference \"sub:" + label
|
|
break
|
|
i += 1
|
|
|
|
|
|
def convert_nounzip(document):
|
|
" remove the noUnzip parameter of graphics insets "
|
|
|
|
rx = re.compile(r'\s*noUnzip\s*$')
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Graphics", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of graphics inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
|
|
k = find_re(document.body, rx, i, j)
|
|
if k != -1:
|
|
del document.body[k]
|
|
j = j - 1
|
|
i = j + 1
|
|
|
|
|
|
def convert_revert_external_bbox(document, forward):
|
|
" add units to bounding box of external insets "
|
|
|
|
rx = re.compile(r'^\s*boundingBox\s+\S+\s+\S+\s+\S+\s+\S+\s*$')
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset External", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of external inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
k = find_re(document.body, rx, i, j)
|
|
if k == -1:
|
|
i = j + 1
|
|
continue
|
|
tokens = document.body[k].split()
|
|
if forward:
|
|
for t in range(1, 5):
|
|
tokens[t] += "bp"
|
|
else:
|
|
for t in range(1, 5):
|
|
tokens[t] = length_in_bp(tokens[t])
|
|
document.body[k] = "\tboundingBox " + tokens[1] + " " + tokens[2] + " " + \
|
|
tokens[3] + " " + tokens[4]
|
|
i = j + 1
|
|
|
|
|
|
def convert_external_bbox(document):
|
|
convert_revert_external_bbox(document, True)
|
|
|
|
|
|
def revert_external_bbox(document):
|
|
convert_revert_external_bbox(document, False)
|
|
|
|
|
|
def revert_tcolorbox_1(document):
|
|
" Reverts the Flex:Subtitle inset of tcolorbox to TeX-code "
|
|
i = -1
|
|
while True:
|
|
i = find_token(document.header, "tcolorbox", i)
|
|
if i == -1:
|
|
break
|
|
else:
|
|
flex = 0
|
|
flexEnd = -1
|
|
flex = find_token(document.body, "\\begin_inset Flex Subtitle", flex)
|
|
if flex == -1:
|
|
return flexEnd
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
wasOpt = revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, False, True, False)
|
|
revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, False, False, False)
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
if wasOpt == True:
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\tcbsubtitle")
|
|
else:
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\tcbsubtitle{")
|
|
document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("}")
|
|
flex += 1
|
|
|
|
|
|
def revert_tcolorbox_2(document):
|
|
" Reverts the Flex:Raster_Color_Box inset of tcolorbox to TeX-code "
|
|
i = -1
|
|
while True:
|
|
i = find_token(document.header, "tcolorbox", i)
|
|
if i == -1:
|
|
break
|
|
else:
|
|
flex = 0
|
|
flexEnd = -1
|
|
flex = find_token(document.body, "\\begin_inset Flex Raster Color Box", flex)
|
|
if flex == -1:
|
|
return flexEnd
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{tcbraster}")
|
|
document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("\\end{tcbraster}")
|
|
flex += 1
|
|
|
|
|
|
def revert_tcolorbox_3(document):
|
|
" Reverts the Flex:Custom_Color_Box_1 inset of tcolorbox to TeX-code "
|
|
i = -1
|
|
while True:
|
|
i = find_token(document.header, "tcolorbox", i)
|
|
if i == -1:
|
|
break
|
|
else:
|
|
flex = 0
|
|
flexEnd = -1
|
|
flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 1", flex)
|
|
if flex == -1:
|
|
return flexEnd
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
|
|
revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxA}")
|
|
document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxA}")
|
|
flex += 1
|
|
|
|
|
|
def revert_tcolorbox_4(document):
|
|
" Reverts the Flex:Custom_Color_Box_2 inset of tcolorbox to TeX-code "
|
|
i = -1
|
|
while True:
|
|
i = find_token(document.header, "tcolorbox", i)
|
|
if i == -1:
|
|
break
|
|
else:
|
|
flex = 0
|
|
flexEnd = -1
|
|
flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 2", flex)
|
|
if flex == -1:
|
|
return flexEnd
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
|
|
revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxB}")
|
|
document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxB}")
|
|
flex += 1
|
|
|
|
|
|
def revert_tcolorbox_5(document):
|
|
" Reverts the Flex:Custom_Color_Box_3 inset of tcolorbox to TeX-code "
|
|
i = -1
|
|
while True:
|
|
i = find_token(document.header, "tcolorbox", i)
|
|
if i == -1:
|
|
break
|
|
else:
|
|
flex = 0
|
|
flexEnd = -1
|
|
flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 3", flex)
|
|
if flex == -1:
|
|
return flexEnd
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
|
|
revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxC}")
|
|
document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxC}")
|
|
flex += 1
|
|
|
|
|
|
def revert_tcolorbox_6(document):
|
|
" Reverts the Flex:Custom_Color_Box_4 inset of tcolorbox to TeX-code "
|
|
i = -1
|
|
while True:
|
|
i = find_token(document.header, "tcolorbox", i)
|
|
if i == -1:
|
|
break
|
|
else:
|
|
flex = 0
|
|
flexEnd = -1
|
|
flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 4", flex)
|
|
if flex == -1:
|
|
return flexEnd
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
|
|
revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxD}")
|
|
document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxD}")
|
|
flex += 1
|
|
|
|
|
|
def revert_tcolorbox_7(document):
|
|
" Reverts the Flex:Custom_Color_Box_5 inset of tcolorbox to TeX-code "
|
|
i = -1
|
|
while True:
|
|
i = find_token(document.header, "tcolorbox", i)
|
|
if i == -1:
|
|
break
|
|
else:
|
|
flex = 0
|
|
flexEnd = -1
|
|
flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 5", flex)
|
|
if flex == -1:
|
|
return flexEnd
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
|
|
revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxE}")
|
|
document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxE}")
|
|
flex += 1
|
|
|
|
|
|
def revert_tcolorbox_8(document):
|
|
" Reverts the layout New Color Box Type of tcolorbox to TeX-code "
|
|
i = 0
|
|
j = 0
|
|
k = 0
|
|
while True:
|
|
if i != -1:
|
|
i = find_token(document.body, "\\begin_layout New Color Box Type", i)
|
|
if i != -1:
|
|
j = find_end_of_layout(document.body, i)
|
|
wasOpt = revert_Argument_to_TeX_brace(document, i, j, 1, 1, False, True, False)
|
|
revert_Argument_to_TeX_brace(document, i, 0, 2, 2, False, False, True)
|
|
revert_Argument_to_TeX_brace(document, i, 0, 3, 4, False, True, False)
|
|
document.body[i] = document.body[i].replace("\\begin_layout New Color Box Type", "\\begin_layout Standard")
|
|
if wasOpt == True:
|
|
document.body[i + 1 : i + 1] = put_cmd_in_ert("\\newtcolorbox")
|
|
else:
|
|
document.body[i + 1 : i + 1] = put_cmd_in_ert("\\newtcolorbox{")
|
|
k = find_end_of_inset(document.body, j)
|
|
k = find_token(document.body, "\\end_inset", k + 1)
|
|
k = find_token(document.body, "\\end_inset", k + 1)
|
|
if wasOpt == True:
|
|
k = find_token(document.body, "\\end_inset", k + 1)
|
|
document.body[k + 2 : j + 2] = put_cmd_in_ert("{")
|
|
j = find_token(document.body, "\\begin_layout Standard", j + 1)
|
|
document.body[j - 2 : j - 2] = put_cmd_in_ert("}")
|
|
i += 1
|
|
if i == -1:
|
|
return
|
|
|
|
|
|
def revert_moderncv_1(document):
|
|
" Reverts the new inset of moderncv to TeX-code in preamble "
|
|
|
|
if document.textclass != "moderncv":
|
|
return
|
|
i = 0
|
|
j = 0
|
|
lineArg = 0
|
|
while True:
|
|
# at first revert the new styles
|
|
# \moderncvicons
|
|
i = find_token(document.body, "\\begin_layout CVIcons", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of CVIcons layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\moderncvicons{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# \hintscolumnwidth
|
|
i = find_token(document.body, "\\begin_layout CVColumnWidth", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of CVColumnWidth layout")
|
|
i += 1
|
|
continue
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
add_to_preamble(document, ["\\setlength{\hintscolumnwidth}{" + content + "}"])
|
|
del document.body[i:j + 1]
|
|
# now change the new styles to the obsolete ones
|
|
# \name
|
|
i = find_token(document.body, "\\begin_layout Name", 0)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Name layout")
|
|
i += 1
|
|
continue
|
|
lineArg = find_token(document.body, "\\begin_inset Argument 1", i)
|
|
if lineArg > j and j != 0:
|
|
return
|
|
if lineArg != -1:
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
|
|
# we have to assure that no other inset is in the Argument
|
|
beginInset = find_token(document.body, "\\begin_inset", beginPlain)
|
|
endInset = find_token(document.body, "\\end_inset", beginPlain)
|
|
k = beginPlain + 1
|
|
l = k
|
|
while beginInset < endInset and beginInset != -1:
|
|
beginInset = find_token(document.body, "\\begin_inset", k)
|
|
endInset = find_token(document.body, "\\end_inset", l)
|
|
k = beginInset + 1
|
|
l = endInset + 1
|
|
Arg2 = document.body[l + 5 : l + 6]
|
|
# rename the style
|
|
document.body[i : i + 1]= ["\\begin_layout FirstName"]
|
|
# delete the Argument inset
|
|
del( document.body[endInset - 2 : endInset + 3])
|
|
del( document.body[lineArg : beginPlain + 1])
|
|
document.body[i + 4 : i + 4]= ["\\begin_layout FamilyName"] + Arg2 + ["\\end_layout"] + [""]
|
|
|
|
|
|
def revert_moderncv_2(document):
|
|
" Reverts the phone inset of moderncv to the obsoleted mobile or fax "
|
|
|
|
if document.textclass != "moderncv":
|
|
return
|
|
i = 0
|
|
j = 0
|
|
lineArg = 0
|
|
while True:
|
|
# \phone
|
|
i = find_token(document.body, "\\begin_layout Phone", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Phone layout")
|
|
i += 1
|
|
return
|
|
lineArg = find_token(document.body, "\\begin_inset Argument 1", i)
|
|
if lineArg > j and j != 0:
|
|
i += 1
|
|
continue
|
|
if lineArg != -1:
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
|
|
# we have to assure that no other inset is in the Argument
|
|
beginInset = find_token(document.body, "\\begin_inset", beginPlain)
|
|
endInset = find_token(document.body, "\\end_inset", beginPlain)
|
|
k = beginPlain + 1
|
|
l = k
|
|
while beginInset < endInset and beginInset != -1:
|
|
beginInset = find_token(document.body, "\\begin_inset", k)
|
|
endInset = find_token(document.body, "\\end_inset", l)
|
|
k = beginInset + 1
|
|
l = endInset + 1
|
|
Arg = document.body[beginPlain + 1 : beginPlain + 2]
|
|
# rename the style
|
|
if Arg[0] == "mobile":
|
|
document.body[i : i + 1]= ["\\begin_layout Mobile"]
|
|
if Arg[0] == "fax":
|
|
document.body[i : i + 1]= ["\\begin_layout Fax"]
|
|
# delete the Argument inset
|
|
del(document.body[endInset - 2 : endInset + 1])
|
|
del(document.body[lineArg : beginPlain + 3])
|
|
i += 1
|
|
|
|
|
|
def convert_moderncv_phone(document):
|
|
" Convert the Fax and Mobile inset of moderncv to the new phone inset "
|
|
|
|
if document.textclass != "moderncv":
|
|
return
|
|
i = 0
|
|
j = 0
|
|
lineArg = 0
|
|
|
|
phone_dict = {
|
|
"Mobile" : "mobile",
|
|
"Fax" : "fax",
|
|
}
|
|
|
|
rx = re.compile(r'^\\begin_layout (\S+)$')
|
|
while True:
|
|
# substitute \fax and \mobile by \phone[fax] and \phone[mobile], respectively
|
|
i = find_token(document.body, "\\begin_layout", i)
|
|
if i == -1:
|
|
return
|
|
|
|
m = rx.match(document.body[i])
|
|
val = ""
|
|
if m:
|
|
val = m.group(1)
|
|
if val not in list(phone_dict.keys()):
|
|
i += 1
|
|
continue
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Mobile layout")
|
|
i += 1
|
|
return
|
|
|
|
document.body[i : i + 1] = ["\\begin_layout Phone", "\\begin_inset Argument 1", "status open", "",
|
|
"\\begin_layout Plain Layout", phone_dict[val], "\\end_layout", "",
|
|
"\\end_inset", ""]
|
|
|
|
|
|
def convert_moderncv_name(document):
|
|
" Convert the FirstName and LastName layout of moderncv to the general Name layout "
|
|
|
|
if document.textclass != "moderncv":
|
|
return
|
|
|
|
fnb = 0 # Begin of FirstName inset
|
|
fne = 0 # End of FirstName inset
|
|
lnb = 0 # Begin of LastName (FamilyName) inset
|
|
lne = 0 # End of LastName (FamilyName) inset
|
|
nb = 0 # Begin of substituting Name inset
|
|
ne = 0 # End of substituting Name inset
|
|
FirstName = [] # FirstName content
|
|
FamilyName = [] # LastName content
|
|
|
|
while True:
|
|
# locate FirstName
|
|
fnb = find_token(document.body, "\\begin_layout FirstName", fnb)
|
|
if fnb != -1:
|
|
fne = find_end_of_layout(document.body, fnb)
|
|
if fne == -1:
|
|
document.warning("Malformed LyX document: Can't find end of FirstName layout")
|
|
return
|
|
FirstName = document.body[fnb + 1 : fne]
|
|
# locate FamilyName
|
|
lnb = find_token(document.body, "\\begin_layout FamilyName", lnb)
|
|
if lnb != -1:
|
|
lne = find_end_of_layout(document.body, lnb)
|
|
if lne == -1:
|
|
document.warning("Malformed LyX document: Can't find end of FamilyName layout")
|
|
return
|
|
FamilyName = document.body[lnb + 1 : lne]
|
|
# Determine the region for the substituting Name layout
|
|
if fnb == -1 and lnb == -1: # Neither FirstName nor FamilyName exists -> Do nothing
|
|
return
|
|
elif fnb == -1: # Only FamilyName exists -> New Name insets replaces that
|
|
nb = lnb
|
|
ne = lne
|
|
elif lnb == -1: # Only FirstName exists -> New Name insets replaces that
|
|
nb = fnb
|
|
ne = fne
|
|
elif fne > lne: # FirstName position before FamilyName -> New Name insets spans
|
|
nb = lnb # from FamilyName begin
|
|
ne = fne # to FirstName end
|
|
else: # FirstName position before FamilyName -> New Name insets spans
|
|
nb = fnb # from FirstName begin
|
|
ne = lne # to FamilyName end
|
|
|
|
# Insert the substituting layout now. If FirstName exists, use an otpional argument.
|
|
if FirstName == []:
|
|
document.body[nb : ne + 1] = ["\\begin_layout Name"] + FamilyName + ["\\end_layout", ""]
|
|
else:
|
|
document.body[nb : ne + 1] = ["\\begin_layout Name", "\\begin_inset Argument 1", "status open", "",
|
|
"\\begin_layout Plain Layout"] + FirstName + ["\\end_layout", "",
|
|
"\\end_inset", ""] + FamilyName + ["\\end_layout", ""]
|
|
|
|
|
|
def revert_achemso(document):
|
|
" Reverts the flex inset Latin to TeX code "
|
|
|
|
if document.textclass != "achemso":
|
|
return
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Flex Latin", i)
|
|
if i != -1:
|
|
j = find_end_of_inset(document.body, i)
|
|
else:
|
|
return
|
|
if j != -1:
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
content = lyx2latex(document, document.body[beginPlain : endPlain])
|
|
document.body[i:j + 1] = put_cmd_in_ert("\\latin{" + content + "}")
|
|
else:
|
|
document.warning("Malformed LyX document: Can't find end of flex inset Latin")
|
|
return
|
|
i += 1
|
|
|
|
|
|
fontsettings = ["\\font_roman", "\\font_sans", "\\font_typewriter", "\\font_math", \
|
|
"\\font_sf_scale", "\\font_tt_scale"]
|
|
fontdefaults = ["default", "default", "default", "auto", "100", "100"]
|
|
fontquotes = [True, True, True, True, False, False]
|
|
|
|
def convert_fontsettings(document):
|
|
" Duplicate font settings "
|
|
|
|
i = find_token(document.header, "\\use_non_tex_fonts ", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document: No \\use_non_tex_fonts!")
|
|
use_non_tex_fonts = "false"
|
|
else:
|
|
use_non_tex_fonts = get_value(document.header, "\\use_non_tex_fonts", i)
|
|
j = 0
|
|
for f in fontsettings:
|
|
i = find_token(document.header, f + " ", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document: No " + f + "!")
|
|
# we can fix that
|
|
# note that with i = -1, this will insert at the end
|
|
# of the header
|
|
value = fontdefaults[j]
|
|
else:
|
|
value = document.header[i][len(f):].strip()
|
|
if fontquotes[j]:
|
|
if use_non_tex_fonts == "true":
|
|
document.header[i:i+1] = [f + ' "' + fontdefaults[j] + '" "' + value + '"']
|
|
else:
|
|
document.header[i:i+1] = [f + ' "' + value + '" "' + fontdefaults[j] + '"']
|
|
else:
|
|
if use_non_tex_fonts == "true":
|
|
document.header[i:i+1] = [f + ' ' + fontdefaults[j] + ' ' + value]
|
|
else:
|
|
document.header[i:i+1] = [f + ' ' + value + ' ' + fontdefaults[j]]
|
|
j = j + 1
|
|
|
|
|
|
def revert_fontsettings(document):
|
|
" Merge font settings "
|
|
|
|
i = find_token(document.header, "\\use_non_tex_fonts ", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document: No \\use_non_tex_fonts!")
|
|
use_non_tex_fonts = "false"
|
|
else:
|
|
use_non_tex_fonts = get_value(document.header, "\\use_non_tex_fonts", i)
|
|
j = 0
|
|
for f in fontsettings:
|
|
i = find_token(document.header, f + " ", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document: No " + f + "!")
|
|
j = j + 1
|
|
continue
|
|
line = get_value(document.header, f, i)
|
|
if fontquotes[j]:
|
|
q1 = line.find('"')
|
|
q2 = line.find('"', q1+1)
|
|
q3 = line.find('"', q2+1)
|
|
q4 = line.find('"', q3+1)
|
|
if q1 == -1 or q2 == -1 or q3 == -1 or q4 == -1:
|
|
document.warning("Malformed LyX document: Missing quotes!")
|
|
j = j + 1
|
|
continue
|
|
if use_non_tex_fonts == "true":
|
|
document.header[i:i+1] = [f + ' ' + line[q3+1:q4]]
|
|
else:
|
|
document.header[i:i+1] = [f + ' ' + line[q1+1:q2]]
|
|
else:
|
|
if use_non_tex_fonts == "true":
|
|
document.header[i:i+1] = [f + ' ' + line.split()[1]]
|
|
else:
|
|
document.header[i:i+1] = [f + ' ' + line.split()[0]]
|
|
j = j + 1
|
|
|
|
|
|
def revert_solution(document):
|
|
" Reverts the solution environment of the theorem module to TeX code "
|
|
|
|
# Do we use one of the modules that provides Solution?
|
|
have_mod = False
|
|
mods = document.get_module_list()
|
|
for mod in mods:
|
|
if mod == "theorems-std" or mod == "theorems-bytype" \
|
|
or mod == "theorems-ams" or mod == "theorems-ams-bytype":
|
|
have_mod = True
|
|
break
|
|
if not have_mod:
|
|
return
|
|
|
|
consecutive = False
|
|
is_starred = False
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_layout Solution", i)
|
|
if i == -1:
|
|
return
|
|
|
|
is_starred = document.body[i].startswith("\\begin_layout Solution*")
|
|
if is_starred == True:
|
|
LaTeXName = "sol*"
|
|
LyXName = "Solution*"
|
|
theoremName = "newtheorem*"
|
|
else:
|
|
LaTeXName = "sol"
|
|
LyXName = "Solution"
|
|
theoremName = "newtheorem"
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of " + LyXName + " layout")
|
|
i += 1
|
|
continue
|
|
|
|
# if this is not a consecutive env, add start command
|
|
begcmd = []
|
|
if not consecutive:
|
|
begcmd = put_cmd_in_ert("\\begin{%s}" % (LaTeXName))
|
|
|
|
# has this a consecutive theorem of same type?
|
|
consecutive = document.body[j + 2] == "\\begin_layout " + LyXName
|
|
|
|
# if this is not followed by a consecutive env, add end command
|
|
if not consecutive:
|
|
document.body[j : j + 1] = put_cmd_in_ert("\\end{%s}" % (LaTeXName)) + ["\\end_layout"]
|
|
|
|
document.body[i : i + 1] = ["\\begin_layout Standard", ""] + begcmd
|
|
|
|
add_to_preamble(document, "\\theoremstyle{definition}")
|
|
if is_starred or mod == "theorems-bytype" or mod == "theorems-ams-bytype":
|
|
add_to_preamble(document, "\\%s{%s}{\\protect\\solutionname}" % \
|
|
(theoremName, LaTeXName))
|
|
else: # mod == "theorems-std" or mod == "theorems-ams" and not is_starred
|
|
add_to_preamble(document, "\\%s{%s}[thm]{\\protect\\solutionname}" % \
|
|
(theoremName, LaTeXName))
|
|
|
|
add_to_preamble(document, "\\providecommand{\solutionname}{Solution}")
|
|
i = j
|
|
|
|
|
|
def revert_verbatim_star(document):
|
|
from lyx_2_1 import revert_verbatim
|
|
revert_verbatim(document, True)
|
|
|
|
|
|
def convert_save_props(document):
|
|
" Add save_transient_properties parameter. "
|
|
i = find_token(document.header, '\\begin_header', 0)
|
|
if i == -1:
|
|
document.warning("Malformed lyx document: Missing '\\begin_header'.")
|
|
return
|
|
document.header.insert(i + 1, '\\save_transient_properties true')
|
|
|
|
|
|
def revert_save_props(document):
|
|
" Remove save_transient_properties parameter. "
|
|
i = find_token(document.header, "\\save_transient_properties", 0)
|
|
if i == -1:
|
|
return
|
|
del document.header[i]
|
|
|
|
|
|
def convert_info_tabular_feature(document):
|
|
def f(arg):
|
|
return arg.replace("inset-modify tabular", "tabular-feature")
|
|
convert_info_insets(document, "shortcut(s)?|icon", f)
|
|
|
|
|
|
def revert_info_tabular_feature(document):
|
|
def f(arg):
|
|
return arg.replace("tabular-feature", "inset-modify tabular")
|
|
convert_info_insets(document, "shortcut(s)?|icon", f)
|
|
|
|
|
|
##
|
|
# Conversion hub
|
|
#
|
|
|
|
supported_versions = ["2.2.0", "2.2"]
|
|
convert = [
|
|
[475, [convert_separator]],
|
|
# nothing to do for 476: We consider it a bug that older versions
|
|
# did not load amsmath automatically for these commands, and do not
|
|
# want to hardcode amsmath off.
|
|
[476, []],
|
|
[477, []],
|
|
[478, []],
|
|
[479, []],
|
|
[480, []],
|
|
[481, [convert_dashes]],
|
|
[482, [convert_phrases]],
|
|
[483, [convert_specialchar]],
|
|
[484, []],
|
|
[485, []],
|
|
[486, []],
|
|
[487, []],
|
|
[488, [convert_newgloss]],
|
|
[489, [convert_BoxFeatures]],
|
|
[490, [convert_origin]],
|
|
[491, []],
|
|
[492, [convert_colorbox]],
|
|
[493, []],
|
|
[494, []],
|
|
[495, [convert_subref]],
|
|
[496, [convert_nounzip]],
|
|
[497, [convert_external_bbox]],
|
|
[498, []],
|
|
[499, [convert_moderncv_phone, convert_moderncv_name]],
|
|
[500, []],
|
|
[501, [convert_fontsettings]],
|
|
[502, []],
|
|
[503, []],
|
|
[504, [convert_save_props]],
|
|
[505, []],
|
|
[506, [convert_info_tabular_feature]],
|
|
[507, [convert_longtable_label]],
|
|
[508, [convert_parbreak]]
|
|
]
|
|
|
|
revert = [
|
|
[507, [revert_parbreak]],
|
|
[506, [revert_longtable_label]],
|
|
[505, [revert_info_tabular_feature]],
|
|
[504, []],
|
|
[503, [revert_save_props]],
|
|
[502, [revert_verbatim_star]],
|
|
[501, [revert_solution]],
|
|
[500, [revert_fontsettings]],
|
|
[499, [revert_achemso]],
|
|
[498, [revert_moderncv_1, revert_moderncv_2]],
|
|
[497, [revert_tcolorbox_1, revert_tcolorbox_2,
|
|
revert_tcolorbox_3, revert_tcolorbox_4, revert_tcolorbox_5,
|
|
revert_tcolorbox_6, revert_tcolorbox_7, revert_tcolorbox_8]],
|
|
[496, [revert_external_bbox]],
|
|
[495, []], # nothing to do since the noUnzip parameter was optional
|
|
[494, [revert_subref]],
|
|
[493, [revert_jss]],
|
|
[492, [revert_mathmulticol]],
|
|
[491, [revert_colorbox]],
|
|
[490, [revert_textcolor]],
|
|
[489, [revert_origin]],
|
|
[488, [revert_BoxFeatures]],
|
|
[487, [revert_newgloss, revert_glossgroup]],
|
|
[486, [revert_forest]],
|
|
[485, [revert_ex_itemargs]],
|
|
[484, [revert_sigplan_doi]],
|
|
[483, [revert_georgian]],
|
|
[482, [revert_specialchar]],
|
|
[481, [revert_phrases]],
|
|
[480, [revert_dashes]],
|
|
[479, [revert_question_env]],
|
|
[478, [revert_beamer_lemma]],
|
|
[477, [revert_xarrow]],
|
|
[476, [revert_swissgerman]],
|
|
[475, [revert_smash]],
|
|
[474, [revert_separator]]
|
|
]
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pass
|