2011-05-03 13:12:55 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# This file is part of lyx2lyx
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright (C) 2011 The LyX team
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
2011-08-25 23:10:36 +00:00
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
2011-05-03 13:12:55 +00:00
|
|
|
|
|
|
|
""" Convert files to the file format generated by lyx 2.1"""
|
|
|
|
|
|
|
|
import re, string
|
|
|
|
import unicodedata
|
|
|
|
import sys, os
|
|
|
|
|
|
|
|
# Uncomment only what you need to import, please.
|
|
|
|
|
2012-11-29 14:34:20 +00:00
|
|
|
from parser_tools import del_token, find_token, find_token_backwards, find_end_of, find_end_of_inset, \
|
2012-02-20 02:10:33 +00:00
|
|
|
find_end_of_layout, find_re, get_option_value, get_value, get_quoted_value, \
|
|
|
|
set_option_value
|
2011-07-23 18:40:21 +00:00
|
|
|
|
2011-05-03 13:12:55 +00:00
|
|
|
#from parser_tools import find_token, find_end_of, find_tokens, \
|
|
|
|
#find_token_exact, find_end_of_inset, find_end_of_layout, \
|
2012-11-29 14:34:20 +00:00
|
|
|
#is_in_inset, del_token, check_token
|
2011-07-23 18:40:21 +00:00
|
|
|
|
2012-04-16 19:40:59 +00:00
|
|
|
from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, get_ert
|
2011-07-23 18:40:21 +00:00
|
|
|
|
2012-01-05 20:53:48 +00:00
|
|
|
#from lyx2lyx_tools import insert_to_preamble, \
|
2012-04-16 19:40:59 +00:00
|
|
|
# lyx2latex, latex_length, revert_flex_inset, \
|
2011-05-03 13:12:55 +00:00
|
|
|
# revert_font_attrs, hex2ratio, str2bool
|
|
|
|
|
|
|
|
####################################################################
|
|
|
|
# Private helper functions
|
|
|
|
|
|
|
|
#def remove_option(lines, m, option):
|
|
|
|
#''' removes option from line m. returns whether we did anything '''
|
|
|
|
#l = lines[m].find(option)
|
|
|
|
#if l == -1:
|
|
|
|
#return False
|
|
|
|
#val = lines[m][l:].split('"')[1]
|
|
|
|
#lines[m] = lines[m][:l - 1] + lines[m][l+len(option + '="' + val + '"'):]
|
|
|
|
#return True
|
|
|
|
|
|
|
|
|
|
|
|
###############################################################################
|
|
|
|
###
|
|
|
|
### Conversion and reversion routines
|
|
|
|
###
|
|
|
|
###############################################################################
|
|
|
|
|
2011-07-23 18:40:21 +00:00
|
|
|
def revert_visible_space(document):
|
|
|
|
"Revert InsetSpace visible into its ERT counterpart"
|
|
|
|
i = 0
|
|
|
|
while True:
|
2011-08-29 14:07:30 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset space \\textvisiblespace{}", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
subst = put_cmd_in_ert("\\textvisiblespace{}")
|
|
|
|
document.body[i:end + 1] = subst
|
2011-05-03 13:12:55 +00:00
|
|
|
|
|
|
|
|
2011-08-10 03:37:33 +00:00
|
|
|
def convert_undertilde(document):
|
|
|
|
" Load undertilde automatically "
|
|
|
|
i = find_token(document.header, "\\use_mathdots" , 0)
|
2012-01-08 12:34:12 +00:00
|
|
|
if i == -1:
|
|
|
|
i = find_token(document.header, "\\use_mhchem" , 0)
|
|
|
|
if i == -1:
|
|
|
|
i = find_token(document.header, "\\use_esint" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find \\use_mathdots.")
|
|
|
|
return;
|
|
|
|
j = find_token(document.preamble, "\\usepackage{undertilde}", 0)
|
|
|
|
if j == -1:
|
|
|
|
document.header.insert(i + 1, "\\use_undertilde 0")
|
|
|
|
else:
|
|
|
|
document.header.insert(i + 1, "\\use_undertilde 2")
|
|
|
|
del document.preamble[j]
|
2011-08-10 03:37:33 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_undertilde(document):
|
|
|
|
" Load undertilde if used in the document "
|
|
|
|
undertilde = find_token(document.header, "\\use_undertilde" , 0)
|
|
|
|
if undertilde == -1:
|
|
|
|
document.warning("No \\use_undertilde line. Assuming auto.")
|
|
|
|
else:
|
|
|
|
val = get_value(document.header, "\\use_undertilde", undertilde)
|
|
|
|
del document.header[undertilde]
|
|
|
|
try:
|
|
|
|
usetilde = int(val)
|
|
|
|
except:
|
|
|
|
document.warning("Invalid \\use_undertilde value: " + val + ". Assuming auto.")
|
|
|
|
# probably usedots has not been changed, but be safe.
|
|
|
|
usetilde = 1
|
|
|
|
|
|
|
|
if usetilde == 0:
|
|
|
|
# do not load case
|
|
|
|
return
|
|
|
|
if usetilde == 2:
|
|
|
|
# force load case
|
|
|
|
add_to_preamble(document, ["\\usepackage{undertilde}"])
|
|
|
|
return
|
2012-01-05 20:53:48 +00:00
|
|
|
|
2011-08-10 03:37:33 +00:00
|
|
|
# so we are in the auto case. we want to load undertilde if \utilde is used.
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Formula', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
code = "\n".join(document.body[i:j])
|
|
|
|
if code.find("\\utilde") != -1:
|
|
|
|
add_to_preamble(document, ["\\@ifundefined{utilde}{\\usepackage{undertilde}}"])
|
|
|
|
return
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2011-08-29 14:07:30 +00:00
|
|
|
def revert_negative_space(document):
|
|
|
|
"Revert InsetSpace negmedspace and negthickspace into its TeX-code counterpart"
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
reverted = False
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset space \\negmedspace{}", i)
|
|
|
|
if i == -1:
|
|
|
|
j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
|
|
|
|
if j == -1:
|
|
|
|
# load amsmath in the preamble if not already loaded if we are at the end of checking
|
|
|
|
if reverted == True:
|
|
|
|
i = find_token(document.header, "\\use_amsmath 2", 0)
|
|
|
|
if i == -1:
|
|
|
|
add_to_preamble(document, ["\\@ifundefined{negthickspace}{\\usepackage{amsmath}}"])
|
|
|
|
return
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
subst = put_cmd_in_ert("\\negmedspace{}")
|
|
|
|
document.body[i:end + 1] = subst
|
|
|
|
j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
end = find_end_of_inset(document.body, j)
|
|
|
|
subst = put_cmd_in_ert("\\negthickspace{}")
|
|
|
|
document.body[j:end + 1] = subst
|
|
|
|
reverted = True
|
|
|
|
|
|
|
|
|
|
|
|
def revert_math_spaces(document):
|
|
|
|
"Revert formulas with protected custom space and protected hfills to TeX-code"
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Formula", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find("\\hspace*")
|
|
|
|
if j != -1:
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
subst = put_cmd_in_ert(document.body[i][21:])
|
|
|
|
document.body[i:end + 1] = subst
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2011-11-07 18:36:56 +00:00
|
|
|
def convert_japanese_encodings(document):
|
|
|
|
" Rename the japanese encodings to names understood by platex "
|
|
|
|
jap_enc_dict = {
|
|
|
|
"EUC-JP-pLaTeX": "euc",
|
|
|
|
"JIS-pLaTeX": "jis",
|
|
|
|
"SJIS-pLaTeX": "sjis"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\inputencoding" , 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\inputencoding", i)
|
|
|
|
if val in jap_enc_dict.keys():
|
|
|
|
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_japanese_encodings(document):
|
|
|
|
" Revert the japanese encodings name changes "
|
|
|
|
jap_enc_dict = {
|
|
|
|
"euc": "EUC-JP-pLaTeX",
|
|
|
|
"jis": "JIS-pLaTeX",
|
|
|
|
"sjis": "SJIS-pLaTeX"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\inputencoding" , 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\inputencoding", i)
|
|
|
|
if val in jap_enc_dict.keys():
|
|
|
|
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
|
|
|
|
|
|
|
|
|
2011-12-07 22:33:25 +00:00
|
|
|
def revert_justification(document):
|
|
|
|
" Revert the \\justification buffer param"
|
|
|
|
if not del_token(document.header, '\\justification', 0):
|
|
|
|
document.warning("Malformed LyX document: Missing \\justification.")
|
|
|
|
|
2011-12-08 23:58:30 +00:00
|
|
|
|
|
|
|
def revert_australian(document):
|
|
|
|
"Set English language variants Australian and Newzealand to English"
|
|
|
|
|
|
|
|
if document.language == "australian" or document.language == "newzealand":
|
2012-06-08 00:37:36 +00:00
|
|
|
document.language = "english"
|
2011-12-08 23:58:30 +00:00
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang australian", j)
|
2012-06-08 00:37:36 +00:00
|
|
|
if j == -1:
|
2011-12-08 23:58:30 +00:00
|
|
|
j = find_token(document.body, "\\lang newzealand", 0)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
document.body[j] = document.body[j].replace("\\lang newzealand", "\\lang english")
|
|
|
|
else:
|
|
|
|
document.body[j] = document.body[j].replace("\\lang australian", "\\lang english")
|
|
|
|
j += 1
|
2011-12-18 21:27:17 +00:00
|
|
|
|
2011-12-07 22:33:25 +00:00
|
|
|
|
2011-12-12 14:40:34 +00:00
|
|
|
def convert_biblio_style(document):
|
|
|
|
"Add a sensible default for \\biblio_style based on the citation engine."
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
if i != -1:
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i).split("_")[0]
|
|
|
|
style = {"basic": "plain", "natbib": "plainnat", "jurabib": "jurabib"}
|
|
|
|
document.header.insert(i + 1, "\\biblio_style " + style[engine])
|
|
|
|
|
|
|
|
|
|
|
|
def revert_biblio_style(document):
|
|
|
|
"BibTeX insets with default option use the style defined by \\biblio_style."
|
|
|
|
i = find_token(document.header, "\\biblio_style" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("No \\biblio_style line. Nothing to do.")
|
|
|
|
return
|
|
|
|
|
|
|
|
default_style = get_value(document.header, "\\biblio_style", i)
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
# We are looking for bibtex insets having the default option
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of bibtex inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
return
|
|
|
|
k = find_token(document.body, "options", i, j)
|
|
|
|
if k != -1:
|
|
|
|
options = get_quoted_value(document.body, "options", k)
|
|
|
|
if "default" in options.split(","):
|
|
|
|
document.body[k] = 'options "%s"' \
|
|
|
|
% options.replace("default", default_style)
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2011-12-18 21:27:17 +00:00
|
|
|
def handle_longtable_captions(document, forward):
|
|
|
|
begin_table = 0
|
|
|
|
while True:
|
|
|
|
begin_table = find_token(document.body, '<lyxtabular version=', begin_table)
|
|
|
|
if begin_table == -1:
|
|
|
|
break
|
|
|
|
end_table = find_end_of(document.body, begin_table, '<lyxtabular', '</lyxtabular>')
|
|
|
|
if end_table == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of table.")
|
|
|
|
begin_table += 1
|
|
|
|
continue
|
|
|
|
fline = find_token(document.body, "<features", begin_table, end_table)
|
|
|
|
if fline == -1:
|
|
|
|
document.warning("Can't find features for inset at line " + str(begin_table))
|
|
|
|
begin_table += 1
|
|
|
|
continue
|
|
|
|
p = document.body[fline].find("islongtable")
|
|
|
|
if p == -1:
|
|
|
|
# no longtable
|
|
|
|
begin_table += 1
|
|
|
|
continue
|
|
|
|
numrows = get_option_value(document.body[begin_table], "rows")
|
|
|
|
try:
|
|
|
|
numrows = int(numrows)
|
|
|
|
except:
|
|
|
|
document.warning(document.body[begin_table])
|
|
|
|
document.warning("Unable to determine rows!")
|
|
|
|
begin_table = end_table
|
|
|
|
continue
|
|
|
|
begin_row = begin_table
|
|
|
|
for row in range(numrows):
|
|
|
|
begin_row = find_token(document.body, '<row', begin_row, end_table)
|
|
|
|
if begin_row == -1:
|
|
|
|
document.warning("Can't find row " + str(row + 1))
|
|
|
|
break
|
|
|
|
end_row = find_end_of(document.body, begin_row, '<row', '</row>')
|
|
|
|
if end_row == -1:
|
|
|
|
document.warning("Can't find end of row " + str(row + 1))
|
|
|
|
break
|
|
|
|
if forward:
|
|
|
|
if (get_option_value(document.body[begin_row], 'caption') == 'true' and
|
|
|
|
get_option_value(document.body[begin_row], 'endfirsthead') != 'true' and
|
|
|
|
get_option_value(document.body[begin_row], 'endhead') != 'true' and
|
|
|
|
get_option_value(document.body[begin_row], 'endfoot') != 'true' and
|
|
|
|
get_option_value(document.body[begin_row], 'endlastfoot') != 'true'):
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'caption', 'true", endfirsthead="true')
|
|
|
|
elif get_option_value(document.body[begin_row], 'caption') == 'true':
|
|
|
|
if get_option_value(document.body[begin_row], 'endfirsthead') == 'true':
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'endfirsthead', 'false')
|
|
|
|
if get_option_value(document.body[begin_row], 'endhead') == 'true':
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'endhead', 'false')
|
|
|
|
if get_option_value(document.body[begin_row], 'endfoot') == 'true':
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'endfoot', 'false')
|
|
|
|
if get_option_value(document.body[begin_row], 'endlastfoot') == 'true':
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'endlastfoot', 'false')
|
|
|
|
begin_row = end_row
|
|
|
|
# since there could be a tabular inside this one, we
|
|
|
|
# cannot jump to end.
|
|
|
|
begin_table += 1
|
|
|
|
|
|
|
|
|
|
|
|
def convert_longtable_captions(document):
|
|
|
|
"Add a firsthead flag to caption rows"
|
|
|
|
handle_longtable_captions(document, True)
|
|
|
|
|
|
|
|
|
|
|
|
def revert_longtable_captions(document):
|
|
|
|
"remove head/foot flag from caption rows"
|
|
|
|
handle_longtable_captions(document, False)
|
|
|
|
|
|
|
|
|
2012-01-03 21:26:09 +00:00
|
|
|
def convert_use_packages(document):
|
|
|
|
"use_xxx yyy => use_package xxx yyy"
|
|
|
|
packages = ["amsmath", "esint", "mathdots", "mhchem", "undertilde"]
|
|
|
|
for p in packages:
|
2012-05-26 17:00:03 +00:00
|
|
|
i = find_token(document.header, "\\use_%s" % p, 0)
|
2012-01-03 21:26:09 +00:00
|
|
|
if i != -1:
|
2012-05-26 17:00:03 +00:00
|
|
|
value = get_value(document.header, "\\use_%s" % p, i)
|
2012-01-03 21:26:09 +00:00
|
|
|
document.header[i] = "\\use_package %s %s" % (p, value)
|
|
|
|
|
|
|
|
|
|
|
|
def revert_use_packages(document):
|
|
|
|
"use_package xxx yyy => use_xxx yyy"
|
2012-05-26 17:00:03 +00:00
|
|
|
packages = ["amsmath", "esint", "mathdots", "mhchem", "undertilde"]
|
2012-01-05 20:53:48 +00:00
|
|
|
# the order is arbitrary for the use_package version, and not all packages need to be given.
|
|
|
|
# Ensure a complete list and correct order (important for older LyX versions and especially lyx2lyx)
|
2012-05-26 17:00:03 +00:00
|
|
|
j = 0
|
|
|
|
for p in packages:
|
2012-01-03 21:26:09 +00:00
|
|
|
regexp = re.compile(r'(\\use_package\s+%s)' % p)
|
2012-05-26 17:00:03 +00:00
|
|
|
i = find_re(document.header, regexp, j)
|
2012-01-03 21:26:09 +00:00
|
|
|
if i != -1:
|
2012-05-26 17:00:03 +00:00
|
|
|
value = get_value(document.header, "\\use_package %s" % p, i).split()[1]
|
2012-01-05 20:53:48 +00:00
|
|
|
del document.header[i]
|
|
|
|
j = i
|
2012-05-26 17:00:03 +00:00
|
|
|
document.header.insert(j, "\\use_%s %s" % (p, value))
|
2012-01-05 20:53:48 +00:00
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
|
|
|
def convert_use_mathtools(document):
|
|
|
|
"insert use_package mathtools"
|
|
|
|
i = find_token(document.header, "\\use_package", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find \\use_package.")
|
|
|
|
return;
|
2012-01-08 12:34:12 +00:00
|
|
|
j = find_token(document.preamble, "\\usepackage{mathtools}", 0)
|
|
|
|
if j == -1:
|
|
|
|
document.header.insert(i + 1, "\\use_package mathtools 0")
|
|
|
|
else:
|
|
|
|
document.header.insert(i + 1, "\\use_package mathtools 2")
|
|
|
|
del document.preamble[j]
|
2012-01-05 20:53:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_use_mathtools(document):
|
|
|
|
"remove use_package mathtools"
|
|
|
|
regexp = re.compile(r'(\\use_package\s+mathtools)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
value = "1" # default is auto
|
|
|
|
if i != -1:
|
|
|
|
value = get_value(document.header, "\\use_package" , i).split()[1]
|
|
|
|
del document.header[i]
|
|
|
|
if value == "2": # on
|
|
|
|
add_to_preamble(document, ["\\usepackage{mathtools}"])
|
|
|
|
elif value == "1": # auto
|
2012-01-08 12:34:12 +00:00
|
|
|
commands = ["mathclap", "mathllap", "mathrlap", \
|
|
|
|
"lgathered", "rgathered", "vcentcolon", "dblcolon", \
|
2012-01-05 20:53:48 +00:00
|
|
|
"coloneqq", "Coloneqq", "coloneq", "Coloneq", "eqqcolon", \
|
|
|
|
"Eqqcolon", "eqcolon", "Eqcolon", "colonapprox", \
|
|
|
|
"Colonapprox", "colonsim", "Colonsim"]
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Formula', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
code = "\n".join(document.body[i:j])
|
|
|
|
for c in commands:
|
|
|
|
if code.find("\\%s" % c) != -1:
|
|
|
|
add_to_preamble(document, ["\\usepackage{mathtools}"])
|
|
|
|
return
|
|
|
|
i = j
|
2012-01-03 21:26:09 +00:00
|
|
|
|
|
|
|
|
2012-01-09 13:16:38 +00:00
|
|
|
def convert_cite_engine_type(document):
|
|
|
|
"Determine the \\cite_engine_type from the citation engine."
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
if "_" in engine:
|
|
|
|
engine, type = engine.split("_")
|
|
|
|
else:
|
|
|
|
type = {"basic": "numerical", "jurabib": "authoryear"}[engine]
|
|
|
|
document.header[i] = "\\cite_engine " + engine
|
|
|
|
document.header.insert(i + 1, "\\cite_engine_type " + type)
|
|
|
|
|
|
|
|
|
|
|
|
def revert_cite_engine_type(document):
|
|
|
|
"Natbib had the type appended with an underscore."
|
|
|
|
engine_type = "numerical"
|
|
|
|
i = find_token(document.header, "\\cite_engine_type" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("No \\cite_engine_type line. Assuming numerical.")
|
|
|
|
else:
|
|
|
|
engine_type = get_value(document.header, "\\cite_engine_type", i)
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
# We are looking for the natbib citation engine
|
2012-03-13 23:43:27 +00:00
|
|
|
i = find_token(document.header, "\\cite_engine natbib", 0)
|
2012-01-09 13:16:38 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.header[i] = "\\cite_engine natbib_" + engine_type
|
|
|
|
|
|
|
|
|
2012-01-23 01:49:49 +00:00
|
|
|
def revert_cancel(document):
|
|
|
|
"add cancel to the preamble if necessary"
|
|
|
|
commands = ["cancelto", "cancel", "bcancel", "xcancel"]
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Formula', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
code = "\n".join(document.body[i:j])
|
|
|
|
for c in commands:
|
|
|
|
if code.find("\\%s" % c) != -1:
|
|
|
|
add_to_preamble(document, ["\\usepackage{cancel}"])
|
|
|
|
return
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2012-02-20 02:10:33 +00:00
|
|
|
def revert_verbatim(document):
|
|
|
|
" Revert verbatim einvironments completely to TeX-code. "
|
|
|
|
i = 0
|
|
|
|
consecutive = False
|
|
|
|
subst_end = ['\end_layout', '', '\\begin_layout Plain Layout',
|
|
|
|
'\end_layout', '',
|
|
|
|
'\\begin_layout Plain Layout', '', '',
|
|
|
|
'\\backslash', '',
|
|
|
|
'end{verbatim}',
|
|
|
|
'\\end_layout', '', '\\end_inset',
|
|
|
|
'', '', '\\end_layout']
|
|
|
|
subst_begin = ['\\begin_layout Standard', '\\noindent',
|
|
|
|
'\\begin_inset ERT', 'status collapsed', '',
|
|
|
|
'\\begin_layout Plain Layout', '', '', '\\backslash',
|
|
|
|
'begin{verbatim}',
|
|
|
|
'\\end_layout', '', '\\begin_layout Plain Layout', '']
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Verbatim", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find end of Verbatim layout")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
# delete all line breaks insets (there are no other insets)
|
|
|
|
l = i
|
|
|
|
while 1:
|
|
|
|
n = find_token(document.body, "\\begin_inset Newline newline", l)
|
|
|
|
if n == -1:
|
|
|
|
n = find_token(document.body, "\\begin_inset Newline linebreak", l)
|
|
|
|
if n == -1:
|
|
|
|
break
|
|
|
|
m = find_end_of_inset(document.body, n)
|
|
|
|
del(document.body[m:m+1])
|
|
|
|
document.body[n:n+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
|
|
|
|
l += 1
|
|
|
|
j += 1
|
|
|
|
# consecutive verbatim environments need to be connected
|
|
|
|
k = find_token(document.body, "\\begin_layout Verbatim", j)
|
|
|
|
if k == j + 2 and consecutive == False:
|
|
|
|
consecutive = True
|
|
|
|
document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
|
|
|
|
document.body[i:i+1] = subst_begin
|
|
|
|
continue
|
|
|
|
if k == j + 2 and consecutive == True:
|
|
|
|
document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
|
|
|
|
del(document.body[i:i+1])
|
|
|
|
continue
|
|
|
|
if k != j + 2 and consecutive == True:
|
|
|
|
document.body[j:j+1] = subst_end
|
|
|
|
# the next paragraph must not be indented
|
|
|
|
document.body[j+19:j+19] = ['\\noindent']
|
|
|
|
del(document.body[i:i+1])
|
|
|
|
consecutive = False
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
document.body[j:j+1] = subst_end
|
|
|
|
# the next paragraph must not be indented
|
|
|
|
document.body[j+19:j+19] = ['\\noindent']
|
|
|
|
document.body[i:i+1] = subst_begin
|
|
|
|
|
|
|
|
|
2012-03-06 07:54:22 +00:00
|
|
|
def revert_tipa(document):
|
|
|
|
" Revert native TIPA insets to mathed or ERT. "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, "\\begin_inset IPA", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find end of IPA inset")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
Multipar = False
|
|
|
|
n = find_token(document.body, "\\begin_layout", i, j)
|
|
|
|
if n == -1:
|
|
|
|
document.warning("Malformed lyx document: IPA inset has no embedded layout")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
m = find_end_of_layout(document.body, n)
|
|
|
|
if m == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find end of embedded layout")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
content = document.body[n+1:m]
|
|
|
|
p = find_token(document.body, "\\begin_layout", m, j)
|
|
|
|
if p != -1 or len(content) > 1:
|
|
|
|
Multipar = True
|
|
|
|
content = document.body[i+1:j]
|
|
|
|
if Multipar:
|
|
|
|
# IPA insets with multiple pars need to be wrapped by \begin{IPA}...\end{IPA}
|
|
|
|
document.body[i:j+1] = ['\\end_layout', '', '\\begin_layout Standard'] + put_cmd_in_ert("\\begin{IPA}") + ['\\end_layout'] + content + ['\\begin_layout Standard'] + put_cmd_in_ert("\\end{IPA}")
|
|
|
|
add_to_preamble(document, ["\\usepackage{tipa,tipx}"])
|
|
|
|
else:
|
|
|
|
# single-par IPA insets can be reverted to mathed
|
|
|
|
document.body[i:j+1] = ["\\begin_inset Formula $\\text{\\textipa{" + content[0] + "}}$", "\\end_inset"]
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2012-03-16 01:29:37 +00:00
|
|
|
def revert_cell_rotation(document):
|
|
|
|
"Revert cell rotations to TeX-code"
|
|
|
|
|
|
|
|
load_rotating = False
|
|
|
|
i = 0
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
# first, let's find out if we need to do anything
|
|
|
|
i = find_token(document.body, '<cell ', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find('rotate="')
|
|
|
|
if j != -1:
|
|
|
|
k = document.body[i].find('"', j + 8)
|
|
|
|
value = document.body[i][j + 8 : k]
|
|
|
|
if value == "0":
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2012-03-16 01:29:37 +00:00
|
|
|
# remove rotate option
|
|
|
|
document.body[i] = rgx.sub('', document.body[i])
|
|
|
|
elif value == "90":
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2012-03-16 01:29:37 +00:00
|
|
|
document.body[i] = rgx.sub('rotate="true"', document.body[i])
|
|
|
|
else:
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2012-03-16 01:29:37 +00:00
|
|
|
load_rotating = True
|
|
|
|
# remove rotate option
|
|
|
|
document.body[i] = rgx.sub('', document.body[i])
|
|
|
|
# write ERT
|
|
|
|
document.body[i + 5 : i + 5] = \
|
|
|
|
put_cmd_in_ert("\\end{turn}")
|
|
|
|
document.body[i + 4 : i + 4] = \
|
|
|
|
put_cmd_in_ert("\\begin{turn}{" + value + "}")
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if load_rotating:
|
|
|
|
add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
|
|
|
|
|
|
|
|
|
|
|
|
def convert_cell_rotation(document):
|
|
|
|
'Convert cell rotation statements from "true" to "90"'
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
# first, let's find out if we need to do anything
|
|
|
|
i = find_token(document.body, '<cell ', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find('rotate="true"')
|
|
|
|
if j != -1:
|
|
|
|
rgx = re.compile(r'rotate="[^"]+?"')
|
|
|
|
# convert "true" to "90"
|
|
|
|
document.body[i] = rgx.sub('rotate="90"', document.body[i])
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2012-03-21 22:04:45 +00:00
|
|
|
def revert_table_rotation(document):
|
|
|
|
"Revert table rotations to TeX-code"
|
|
|
|
|
|
|
|
load_rotating = False
|
|
|
|
i = 0
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
# first, let's find out if we need to do anything
|
|
|
|
i = find_token(document.body, '<features ', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find('rotate="')
|
|
|
|
if j != -1:
|
2012-03-21 22:39:33 +00:00
|
|
|
end_table = find_token(document.body, '</lyxtabular>', j)
|
2012-03-21 22:04:45 +00:00
|
|
|
k = document.body[i].find('"', j + 8)
|
|
|
|
value = document.body[i][j + 8 : k]
|
|
|
|
if value == "0":
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2012-03-21 22:04:45 +00:00
|
|
|
# remove rotate option
|
|
|
|
document.body[i] = rgx.sub('', document.body[i])
|
|
|
|
elif value == "90":
|
|
|
|
rgx = re.compile(r'rotate="[^"]+?"')
|
|
|
|
document.body[i] = rgx.sub('rotate="true"', document.body[i])
|
|
|
|
else:
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2012-03-21 22:04:45 +00:00
|
|
|
load_rotating = True
|
|
|
|
# remove rotate option
|
|
|
|
document.body[i] = rgx.sub('', document.body[i])
|
|
|
|
# write ERT
|
2012-03-21 22:39:33 +00:00
|
|
|
document.body[end_table + 3 : end_table + 3] = \
|
2012-03-21 22:04:45 +00:00
|
|
|
put_cmd_in_ert("\\end{turn}")
|
2012-03-21 22:39:33 +00:00
|
|
|
document.body[i - 2 : i - 2] = \
|
2012-03-21 22:04:45 +00:00
|
|
|
put_cmd_in_ert("\\begin{turn}{" + value + "}")
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if load_rotating:
|
|
|
|
add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
|
|
|
|
|
|
|
|
|
|
|
|
def convert_table_rotation(document):
|
|
|
|
'Convert table rotation statements from "true" to "90"'
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
# first, let's find out if we need to do anything
|
|
|
|
i = find_token(document.body, '<features ', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find('rotate="true"')
|
|
|
|
if j != -1:
|
|
|
|
rgx = re.compile(r'rotate="[^"]+?"')
|
|
|
|
# convert "true" to "90"
|
|
|
|
document.body[i] = rgx.sub('rotate="90"', document.body[i])
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2012-04-16 19:40:59 +00:00
|
|
|
def convert_listoflistings(document):
|
|
|
|
'Convert ERT \lstlistoflistings to TOC lstlistoflistings inset'
|
|
|
|
# We can support roundtrip because the command is so simple
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset ERT", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find end of ERT inset")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
ert = get_ert(document.body, i)
|
|
|
|
if ert == "\\lstlistoflistings{}":
|
|
|
|
document.body[i:j] = ["\\begin_inset CommandInset toc", "LatexCommand lstlistoflistings", ""]
|
|
|
|
i = i + 4
|
|
|
|
else:
|
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_listoflistings(document):
|
|
|
|
'Convert TOC lstlistoflistings inset to ERT lstlistoflistings'
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset toc", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if document.body[i+1] == "LatexCommand lstlistoflistings":
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find end of TOC inset")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
subst = put_cmd_in_ert("\\lstlistoflistings{}")
|
|
|
|
document.body[i:j+1] = subst
|
|
|
|
add_to_preamble(document, ["\\usepackage{listings}"])
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2012-05-06 18:48:04 +00:00
|
|
|
def convert_use_amssymb(document):
|
|
|
|
"insert use_package amssymb"
|
|
|
|
regexp = re.compile(r'(\\use_package\s+amsmath)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find \\use_package amsmath.")
|
|
|
|
return;
|
|
|
|
value = get_value(document.header, "\\use_package" , i).split()[1]
|
|
|
|
useamsmath = 0
|
|
|
|
try:
|
|
|
|
useamsmath = int(value)
|
|
|
|
except:
|
|
|
|
document.warning("Invalid \\use_package amsmath: " + value + ". Assuming auto.")
|
|
|
|
useamsmath = 1
|
|
|
|
j = find_token(document.preamble, "\\usepackage{amssymb}", 0)
|
|
|
|
if j == -1:
|
|
|
|
document.header.insert(i + 1, "\\use_package amssymb %d" % useamsmath)
|
|
|
|
else:
|
|
|
|
document.header.insert(i + 1, "\\use_package amssymb 2")
|
|
|
|
del document.preamble[j]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_use_amssymb(document):
|
|
|
|
"remove use_package amssymb"
|
|
|
|
regexp1 = re.compile(r'(\\use_package\s+amsmath)')
|
|
|
|
regexp2 = re.compile(r'(\\use_package\s+amssymb)')
|
|
|
|
i = find_re(document.header, regexp1, 0)
|
|
|
|
j = find_re(document.header, regexp2, 0)
|
|
|
|
value1 = "1" # default is auto
|
|
|
|
value2 = "1" # default is auto
|
|
|
|
if i != -1:
|
|
|
|
value1 = get_value(document.header, "\\use_package" , i).split()[1]
|
|
|
|
if j != -1:
|
|
|
|
value2 = get_value(document.header, "\\use_package" , j).split()[1]
|
|
|
|
del document.header[j]
|
|
|
|
if value1 != value2 and value2 == "2": # on
|
|
|
|
add_to_preamble(document, ["\\usepackage{amssymb}"])
|
|
|
|
|
|
|
|
|
2012-06-08 00:37:36 +00:00
|
|
|
def revert_ancientgreek(document):
|
|
|
|
"Set the document language for ancientgreek to greek"
|
|
|
|
|
|
|
|
if document.language == "ancientgreek":
|
|
|
|
document.language = "greek"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language greek"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang ancientgreek", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
document.body[j] = document.body[j].replace("\\lang ancientgreek", "\\lang greek")
|
|
|
|
j += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_languages(document):
|
|
|
|
"Set the document language for new supported languages to English"
|
|
|
|
|
|
|
|
languages = [
|
|
|
|
"coptic", "divehi", "hindi", "kurmanji", "lao", "marathi", "occitan", "sanskrit",
|
|
|
|
"syriac", "tamil", "telugu", "urdu"
|
|
|
|
]
|
|
|
|
for n in range(len(languages)):
|
|
|
|
if document.language == languages[n]:
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while j < len(document.body):
|
|
|
|
j = find_token(document.body, "\\lang " + languages[n], j)
|
|
|
|
if j != -1:
|
|
|
|
document.body[j] = document.body[j].replace("\\lang " + languages[n], "\\lang english")
|
|
|
|
j += 1
|
|
|
|
else:
|
|
|
|
j = len(document.body)
|
|
|
|
|
|
|
|
|
2012-06-21 23:12:43 +00:00
|
|
|
def convert_armenian(document):
|
|
|
|
"Use polyglossia and thus non-TeX fonts for Armenian"
|
|
|
|
|
|
|
|
if document.language == "armenian":
|
|
|
|
i = find_token(document.header, "\\use_non_tex_fonts", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\use_non_tex_fonts true"
|
|
|
|
|
|
|
|
|
|
|
|
def revert_armenian(document):
|
|
|
|
"Use ArmTeX and thus TeX fonts for Armenian"
|
|
|
|
|
|
|
|
if document.language == "armenian":
|
|
|
|
i = find_token(document.header, "\\use_non_tex_fonts", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\use_non_tex_fonts false"
|
|
|
|
|
|
|
|
|
2012-08-17 12:11:02 +00:00
|
|
|
def revert_libertine(document):
|
|
|
|
" Revert native libertine font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_roman libertine", 0)
|
|
|
|
if i != -1:
|
|
|
|
osf = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
osf = True
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
if osf:
|
2012-08-18 12:45:41 +00:00
|
|
|
document.header[j] = "\\font_osf false"
|
2012-09-25 09:07:33 +00:00
|
|
|
else:
|
|
|
|
preamble += "[lining]"
|
2012-09-23 16:33:04 +00:00
|
|
|
preamble += "{libertine-type1}"
|
2012-08-17 12:11:02 +00:00
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_roman default"
|
|
|
|
|
|
|
|
|
2012-08-17 16:24:18 +00:00
|
|
|
def revert_txtt(document):
|
|
|
|
" Revert native txtt font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_typewriter txtt", 0)
|
|
|
|
if i != -1:
|
|
|
|
preamble = "\\renewcommand{\\ttdefault}{txtt}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_typewriter default"
|
|
|
|
|
|
|
|
|
2012-08-18 12:45:41 +00:00
|
|
|
def revert_mathdesign(document):
|
|
|
|
" Revert native mathdesign font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
mathdesign_dict = {
|
|
|
|
"mdbch": "charter",
|
|
|
|
"mdput": "utopia",
|
|
|
|
"mdugm": "garamond"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\font_roman", i)
|
|
|
|
if val in mathdesign_dict.keys():
|
|
|
|
preamble = "\\usepackage[%s" % mathdesign_dict[val]
|
|
|
|
expert = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
expert = True
|
|
|
|
document.header[j] = "\\font_osf false"
|
|
|
|
l = find_token(document.header, "\\font_sc true", 0)
|
|
|
|
if l != -1:
|
|
|
|
expert = True
|
|
|
|
document.header[l] = "\\font_sc false"
|
|
|
|
if expert:
|
|
|
|
preamble += ",expert"
|
|
|
|
preamble += "]{mathdesign}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_roman default"
|
2012-08-19 09:57:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_texgyre(document):
|
|
|
|
" Revert native TeXGyre font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
texgyre_fonts = ["tgadventor", "tgbonum", "tgchorus", "tgcursor", \
|
|
|
|
"tgheros", "tgpagella", "tgschola", "tgtermes"]
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if i != -1:
|
|
|
|
val = get_value(document.header, "\\font_roman", i)
|
|
|
|
if val in texgyre_fonts:
|
|
|
|
preamble = "\\usepackage{%s}" % val
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_roman default"
|
|
|
|
i = find_token(document.header, "\\font_sans", 0)
|
|
|
|
if i != -1:
|
|
|
|
val = get_value(document.header, "\\font_sans", i)
|
|
|
|
if val in texgyre_fonts:
|
|
|
|
preamble = "\\usepackage{%s}" % val
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_sans default"
|
|
|
|
i = find_token(document.header, "\\font_typewriter", 0)
|
|
|
|
if i != -1:
|
|
|
|
val = get_value(document.header, "\\font_typewriter", i)
|
|
|
|
if val in texgyre_fonts:
|
|
|
|
preamble = "\\usepackage{%s}" % val
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_typewriter default"
|
2012-08-23 15:42:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_ipadeco(document):
|
|
|
|
" Revert IPA decorations to ERT "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset IPADeco", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
if end == -1:
|
|
|
|
document.warning("Can't find end of inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
line = document.body[i]
|
|
|
|
rx = re.compile(r'\\begin_inset IPADeco (.*)$')
|
|
|
|
m = rx.match(line)
|
|
|
|
decotype = m.group(1)
|
|
|
|
if decotype != "toptiebar" and decotype != "bottomtiebar":
|
|
|
|
document.warning("Invalid IPADeco type: " + decotype)
|
|
|
|
i = end
|
|
|
|
continue
|
|
|
|
blay = find_token(document.body, "\\begin_layout Plain Layout", i, end)
|
|
|
|
if blay == -1:
|
|
|
|
document.warning("Can't find layout for inset at line " + str(i))
|
|
|
|
i = end
|
|
|
|
continue
|
|
|
|
bend = find_end_of_layout(document.body, blay)
|
|
|
|
if bend == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of IPADeco inset's layout.")
|
|
|
|
i = end
|
|
|
|
continue
|
|
|
|
substi = ["\\begin_inset ERT", "status collapsed", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "", "\\backslash",
|
|
|
|
decotype + "{", "\\end_layout", "", "\\end_inset"]
|
|
|
|
substj = ["\\size default", "", "\\begin_inset ERT", "status collapsed", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "}", "\\end_layout", "", "\\end_inset"]
|
|
|
|
# do the later one first so as not to mess up the numbering
|
|
|
|
document.body[bend:end + 1] = substj
|
|
|
|
document.body[i:blay + 1] = substi
|
|
|
|
i = end + len(substi) + len(substj) - (end - bend) - (blay - i) - 2
|
|
|
|
add_to_preamble(document, "\\usepackage{tipa}")
|
|
|
|
|
|
|
|
|
|
|
|
def revert_ipachar(document):
|
|
|
|
' Revert \\IPAChar to ERT '
|
|
|
|
i = 0
|
|
|
|
found = False
|
|
|
|
while i < len(document.body):
|
|
|
|
m = re.match(r'(.*)\\IPAChar \\(\w+\{\w+\})(.*)', document.body[i])
|
|
|
|
if m:
|
|
|
|
found = True
|
|
|
|
before = m.group(1)
|
|
|
|
ipachar = m.group(2)
|
|
|
|
after = m.group(3)
|
|
|
|
subst = [before,
|
|
|
|
'\\begin_inset ERT',
|
|
|
|
'status collapsed', '',
|
|
|
|
'\\begin_layout Standard',
|
|
|
|
'', '', '\\backslash',
|
|
|
|
ipachar,
|
|
|
|
'\\end_layout', '',
|
|
|
|
'\\end_inset', '',
|
|
|
|
after]
|
|
|
|
document.body[i: i+1] = subst
|
|
|
|
i = i + len(subst)
|
|
|
|
else:
|
|
|
|
i = i + 1
|
|
|
|
if found:
|
|
|
|
add_to_preamble(document, "\\usepackage{tone}")
|
2012-09-19 15:46:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_minionpro(document):
|
|
|
|
" Revert native MinionPro font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_roman minionpro", 0)
|
|
|
|
if i != -1:
|
|
|
|
osf = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
osf = True
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
if osf:
|
|
|
|
document.header[j] = "\\font_osf false"
|
|
|
|
else:
|
|
|
|
preamble += "[lf]"
|
|
|
|
preamble += "{MinionPro}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_roman default"
|
2012-08-18 12:45:41 +00:00
|
|
|
|
2012-09-22 15:44:00 +00:00
|
|
|
|
|
|
|
def revert_mathfonts(document):
|
|
|
|
" Revert native math font definitions to LaTeX "
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_math", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
val = get_value(document.header, "\\font_math", i)
|
|
|
|
if val == "eulervm":
|
|
|
|
add_to_preamble(document, "\\usepackage{eulervm}")
|
|
|
|
elif val == "default":
|
|
|
|
mathfont_dict = {
|
|
|
|
"lmodern": "\\renewcommand{\\rmdefault}{lmr}",
|
|
|
|
"minionpro": "\\usepackage[onlytext,lf]{MinionPro}",
|
|
|
|
"minionpro-osf": "\\usepackage[onlytext]{MinionPro}",
|
|
|
|
"palatino": "\\renewcommand{\\rmdefault}{ppl}",
|
|
|
|
"palatino-osf": "\\renewcommand{\\rmdefault}{pplj}",
|
|
|
|
"times": "\\renewcommand{\\rmdefault}{ptm}",
|
2012-09-23 10:30:19 +00:00
|
|
|
"utopia": "\\renewcommand{\\rmdefault}{futs}",
|
|
|
|
"utopia-osf": "\\renewcommand{\\rmdefault}{futj}",
|
2012-09-22 15:44:00 +00:00
|
|
|
}
|
|
|
|
j = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if j != -1:
|
|
|
|
rm = get_value(document.header, "\\font_roman", j)
|
|
|
|
k = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if k != -1:
|
|
|
|
rm += "-osf"
|
|
|
|
if rm in mathfont_dict.keys():
|
|
|
|
add_to_preamble(document, mathfont_dict[rm])
|
|
|
|
document.header[j] = "\\font_roman default"
|
|
|
|
if k != -1:
|
|
|
|
document.header[k] = "\\font_osf false"
|
|
|
|
del document.header[i]
|
|
|
|
|
2012-09-23 10:30:19 +00:00
|
|
|
|
|
|
|
def revert_mdnomath(document):
|
|
|
|
" Revert mathdesign and fourier without math "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
mathdesign_dict = {
|
|
|
|
"md-charter": "mdbch",
|
|
|
|
"md-utopia": "mdput",
|
|
|
|
"md-garamond": "mdugm"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\font_roman", i)
|
|
|
|
if val in mathdesign_dict.keys():
|
|
|
|
j = find_token(document.header, "\\font_math", 0)
|
|
|
|
if j == -1:
|
|
|
|
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
|
|
|
|
mval = get_value(document.header, "\\font_math", j)
|
|
|
|
if mval == "default":
|
|
|
|
document.header[i] = "\\font_roman default"
|
|
|
|
add_to_preamble(document, "\\renewcommand{\\rmdefault}{%s}" % mathdesign_dict[val])
|
|
|
|
else:
|
|
|
|
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
|
|
|
|
|
|
|
|
|
|
|
|
def convert_mdnomath(document):
|
|
|
|
" Change mathdesign font name "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
mathdesign_dict = {
|
|
|
|
"mdbch": "md-charter",
|
|
|
|
"mdput": "md-utopia",
|
|
|
|
"mdugm": "md-garamond"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\font_roman", i)
|
|
|
|
if val in mathdesign_dict.keys():
|
|
|
|
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
|
|
|
|
|
|
|
|
|
2012-09-23 16:33:04 +00:00
|
|
|
def revert_newtxmath(document):
|
|
|
|
" Revert native newtxmath definitions to LaTeX "
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_math", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
val = get_value(document.header, "\\font_math", i)
|
|
|
|
mathfont_dict = {
|
|
|
|
"libertine-ntxm": "\\usepackage[libertine]{newtxmath}",
|
|
|
|
"minion-ntxm": "\\usepackage[minion]{newtxmath}",
|
|
|
|
"newtxmath": "\\usepackage{newtxmath}",
|
|
|
|
}
|
|
|
|
if val in mathfont_dict.keys():
|
|
|
|
add_to_preamble(document, mathfont_dict[val])
|
|
|
|
document.header[i] = "\\font_math auto"
|
|
|
|
|
|
|
|
|
2012-09-25 09:07:33 +00:00
|
|
|
def revert_biolinum(document):
|
|
|
|
" Revert native biolinum font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_sans biolinum", 0)
|
|
|
|
if i != -1:
|
|
|
|
osf = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
osf = True
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
if not osf:
|
|
|
|
preamble += "[lf]"
|
|
|
|
preamble += "{biolinum-type1}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_sans default"
|
|
|
|
|
|
|
|
|
2012-10-21 16:55:24 +00:00
|
|
|
def revert_uop(document):
|
|
|
|
" Revert native URW Classico (Optima) font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_sans uop", 0)
|
|
|
|
if i != -1:
|
|
|
|
preamble = "\\renewcommand{\\sfdefault}{uop}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_sans default"
|
|
|
|
|
|
|
|
|
2012-11-19 13:21:02 +00:00
|
|
|
def convert_latexargs(document):
|
|
|
|
" Convert InsetArgument to new syntax "
|
|
|
|
|
2012-12-01 14:28:10 +00:00
|
|
|
if find_token(document.body, "\\begin_inset Argument", 0) == -1:
|
|
|
|
# nothing to do.
|
|
|
|
return
|
|
|
|
|
|
|
|
# A list of layouts (document classes) with only optional or no arguments.
|
|
|
|
# These can be safely converted to the new syntax
|
|
|
|
# (I took the liberty to add some of my personal layouts/modules here; JSP)
|
|
|
|
safe_layouts = ["aa", "aapaper", "aastex", "achemso", "acmsiggraph", "AEA",
|
|
|
|
"agu-dtd", "agums", "agutex", "amsart", "amsbook", "apa",
|
|
|
|
"arab-article", "armenian-article", "article-beamer", "article",
|
|
|
|
"beamer", "book", "broadway", "chess", "cl2emult", "ctex-article",
|
|
|
|
"ctex-book", "ctex-report", "dinbrief", "docbook-book", "docbook-chapter",
|
|
|
|
"docbook", "docbook-section", "doublecol-new", "dtk", "ectaart", "egs",
|
|
|
|
"elsarticle", "elsart", "entcs", "europecv", "extarticle", "extbook",
|
|
|
|
"extletter", "extreport", "foils", "frletter", "g-brief2", "g-brief",
|
|
|
|
"heb-article", "heb-letter", "hollywood", "IEEEtran", "ijmpc", "ijmpd",
|
|
|
|
"iopart", "isprs", "jarticle", "jasatex", "jbook", "jgrga", "jreport",
|
|
|
|
"jsarticle", "jsbeamer", "jsbook", "jss", "kluwer", "latex8", "letter", "lettre",
|
|
|
|
"literate-article", "literate-book", "literate-report", "llncs", "ltugboat",
|
|
|
|
"memoir", "moderncv", "mwart", "mwbk", "mwrep", "paper", "powerdot",
|
|
|
|
"recipebook", "report", "revtex4", "revtex", "scrartcl", "scrarticle-beamer",
|
|
|
|
"scrbook", "scrlettr", "scrlttr2", "scrreprt", "seminar", "siamltex",
|
|
|
|
"sigplanconf", "simplecv", "singlecol", "singlecol-new", "slides", "spie",
|
|
|
|
"svglobal3", "svglobal", "svjog", "svmono", "svmult", "svprobth", "tarticle",
|
|
|
|
"tbook", "treport", "tufte-book", "tufte-handout"]
|
|
|
|
# A list of "safe" modules, same as above
|
|
|
|
safe_modules = ["biblatex", "beameraddons", "beamersession", "braille", "customHeadersFooters",
|
|
|
|
"endnotes", "enumitem", "eqs-within-sections", "figs-within-sections", "fix-cm",
|
|
|
|
"fixltx2e", "foottoend", "hanging", "jscharstyles", "knitr", "lilypond",
|
|
|
|
"linguistics", "linguisticx", "logicalmkup", "minimalistic", "nomindex", "noweb",
|
|
|
|
"pdfcomment", "sweave", "tabs-within-sections", "theorems-ams-bytype",
|
|
|
|
"theorems-ams-extended-bytype", "theorems-ams-extended", "theorems-ams", "theorems-bytype",
|
|
|
|
"theorems-chap-bytype", "theorems-chap", "theorems-named", "theorems-sec-bytype",
|
|
|
|
"theorems-sec", "theorems-starred", "theorems-std", "todonotes"]
|
|
|
|
# Modules we need to take care of
|
|
|
|
caveat_modules = ["initials"]
|
|
|
|
# information about the relevant styles in caveat_modules (number of opt and req args)
|
|
|
|
# use this if we get more caveat_modules. For now, use hard coding (see below).
|
|
|
|
# initials = [{'Layout' : 'Initial', 'opt' : 1, 'req' : 1}]
|
|
|
|
|
|
|
|
# Is this a known safe layout?
|
|
|
|
safe_layout = document.textclass in safe_layouts
|
|
|
|
if not safe_layout:
|
|
|
|
document.warning("Lyx2lyx knows nothing about textclass '%s'. "
|
|
|
|
"Please check if short title insets have been converted correctly."
|
|
|
|
% document.textclass)
|
|
|
|
# Do we use unsafe or unknown modules
|
|
|
|
mods = document.get_module_list()
|
|
|
|
unknown_modules = False
|
|
|
|
used_caveat_modules = list()
|
|
|
|
for mod in mods:
|
|
|
|
if mod in safe_modules:
|
|
|
|
continue
|
|
|
|
if mod in caveat_modules:
|
|
|
|
used_caveat_modules.append(mod)
|
|
|
|
continue
|
|
|
|
unknown_modules = True
|
|
|
|
document.warning("Lyx2lyx knows nothing about module '%s'. "
|
|
|
|
"Please check if short title insets have been converted correctly."
|
|
|
|
% mod)
|
|
|
|
|
2012-11-19 13:21:02 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2012-12-01 14:28:10 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not safe_layout or unknown_modules:
|
|
|
|
# We cannot do more here since we have no access to this layout.
|
|
|
|
# InsetArgument itself will do the real work
|
|
|
|
# (see InsetArgument::updateBuffer())
|
|
|
|
document.body[i] = "\\begin_inset Argument 999"
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Find beginning and end of the containing paragraph
|
|
|
|
parbeg = find_token_backwards(document.body, "\\begin_layout", i)
|
2012-12-02 14:19:40 +00:00
|
|
|
while get_value(document.body, "\\begin_layout", parbeg) == "Plain Layout":
|
|
|
|
# Probably a preceding inset. Continue searching ...
|
|
|
|
parbeg = find_token_backwards(document.body, "\\begin_layout", parbeg - 1)
|
2012-12-01 14:28:10 +00:00
|
|
|
if parbeg == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find parent paragraph layout")
|
|
|
|
continue
|
|
|
|
parend = find_end_of_layout(document.body, parbeg)
|
|
|
|
if parend == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find end of parent paragraph layout")
|
|
|
|
continue
|
|
|
|
allowed_opts = -1
|
|
|
|
first_req = -1
|
|
|
|
if len(used_caveat_modules) > 0:
|
|
|
|
# We know for now that this must be the initials module with the Initial layout
|
|
|
|
# If we get more such modules, we need some automating.
|
|
|
|
layoutname = get_value(document.body, "\\begin_layout", parbeg)
|
|
|
|
if layoutname == "Initial":
|
|
|
|
# Layout has 1 opt and 1 req arg.
|
|
|
|
# Count the actual arguments
|
|
|
|
actualargs = 0
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if document.body[p] == "\\begin_inset Argument":
|
|
|
|
actualargs += 1
|
|
|
|
if actualargs == 1:
|
|
|
|
allowed_opts = 0
|
|
|
|
first_req = 2
|
|
|
|
# Collect all arguments in this paragraph
|
|
|
|
argnr = 0
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if document.body[p] == "\\begin_inset Argument":
|
|
|
|
argnr += 1
|
|
|
|
if allowed_opts != -1:
|
|
|
|
# We have less arguments than opt + required.
|
|
|
|
# required must take precedence.
|
|
|
|
if argnr > allowed_opts and argnr < first_req:
|
|
|
|
argnr = first_req
|
|
|
|
document.body[p] = "\\begin_inset Argument %d" % argnr
|
|
|
|
i = i + 1
|
2012-11-19 13:21:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_latexargs(document):
|
|
|
|
" Revert InsetArgument to old syntax "
|
|
|
|
|
2012-11-23 09:29:29 +00:00
|
|
|
i = 0
|
2012-11-30 11:57:55 +00:00
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\d+)$')
|
|
|
|
args = dict()
|
2012-11-23 09:29:29 +00:00
|
|
|
while True:
|
2012-11-30 11:57:55 +00:00
|
|
|
# Search for Argument insets
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
m = rx.match(document.body[i])
|
|
|
|
if not m:
|
|
|
|
# No ID: inset already reverted
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
# Find beginning and end of the containing paragraph
|
|
|
|
parbeg = find_token_backwards(document.body, "\\begin_layout", i)
|
|
|
|
if parbeg == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find parent paragraph layout")
|
|
|
|
continue
|
|
|
|
parend = find_end_of_layout(document.body, parbeg)
|
|
|
|
if parend == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find end of parent paragraph layout")
|
|
|
|
continue
|
|
|
|
# Collect all arguments in this paragraph
|
|
|
|
realparend = parend
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
val = int(m.group(1))
|
|
|
|
j = find_end_of_inset(document.body, p)
|
|
|
|
# Revert to old syntax
|
|
|
|
document.body[p] = "\\begin_inset Argument"
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed lyx document: Can't find end of Argument inset")
|
|
|
|
continue
|
|
|
|
if val > 0:
|
|
|
|
args[val] = document.body[p : j + 1]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : j + 1])
|
|
|
|
# Remove arg inset at this position
|
|
|
|
del document.body[p : j + 1]
|
|
|
|
if p >= realparend:
|
|
|
|
break
|
|
|
|
# Now sort the arg insets
|
|
|
|
subst = [""]
|
|
|
|
for f in sorted(args):
|
|
|
|
subst += args[f]
|
|
|
|
del args[f]
|
|
|
|
# Insert the sorted arg insets at paragraph begin
|
|
|
|
document.body[parbeg + 1:parbeg + 1] = subst
|
|
|
|
|
|
|
|
i = parbeg + 1 + len(subst)
|
2012-11-19 13:21:02 +00:00
|
|
|
|
|
|
|
|
2012-11-26 01:50:53 +00:00
|
|
|
def revert_Argument_to_TeX_brace(document, line, n, nmax, environment):
|
|
|
|
'''
|
|
|
|
Reverts an InsetArgument to TeX-code
|
|
|
|
usage:
|
|
|
|
revert_Argument_to_TeX_brace(document, LineOfBeginLayout, StartArgument, EndArgument, isEnvironment)
|
|
|
|
LineOfBeginLayout is the line of the \begin_layout statement
|
|
|
|
StartArgument is the number of the first argument that needs to be converted
|
|
|
|
EndArgument is the number of the last argument that needs to be converted or the last defined one
|
|
|
|
isEnvironment must be true, if the layout id for a LaTeX environment
|
|
|
|
'''
|
|
|
|
lineArg = 0
|
|
|
|
while lineArg != -1 and n < nmax + 1:
|
|
|
|
lineArg = find_token(document.body, "\\begin_inset Argument " + str(n), line)
|
|
|
|
if lineArg != -1:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
|
2012-11-30 00:54:57 +00:00
|
|
|
# we have to assure that no other inset is in the Argument
|
|
|
|
beginInset = find_token(document.body, "\\begin_inset", beginPlain)
|
|
|
|
endInset = find_token(document.body, "\\end_inset", beginPlain)
|
|
|
|
k = beginPlain + 1
|
|
|
|
l = k
|
|
|
|
while beginInset < endInset and beginInset != -1:
|
|
|
|
beginInset = find_token(document.body, "\\begin_inset", k)
|
|
|
|
endInset = find_token(document.body, "\\end_inset", l)
|
|
|
|
k = beginInset + 1
|
|
|
|
l = endInset + 1
|
2012-11-26 01:50:53 +00:00
|
|
|
if environment == False:
|
2012-11-30 00:54:57 +00:00
|
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}{")
|
2012-11-26 01:50:53 +00:00
|
|
|
del(document.body[lineArg : beginPlain + 1])
|
|
|
|
else:
|
2012-11-30 00:54:57 +00:00
|
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
|
2012-11-26 01:50:53 +00:00
|
|
|
document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("{")
|
|
|
|
n = n + 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_IEEEtran(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
'''
|
|
|
|
Reverts InsetArgument of
|
|
|
|
Page headings
|
|
|
|
Biography
|
|
|
|
Biography without photo
|
|
|
|
to TeX-code
|
|
|
|
'''
|
|
|
|
if document.textclass == "IEEEtran":
|
2012-11-26 01:50:53 +00:00
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
k = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Page headings", i)
|
|
|
|
if i != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, i, 1, 1, False)
|
|
|
|
i = i + 1
|
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Biography without photo", j)
|
|
|
|
if j != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, j, 1, 1, True)
|
|
|
|
j = j + 1
|
|
|
|
if k != -1:
|
|
|
|
k = find_token(document.body, "\\begin_layout Biography", k)
|
|
|
|
kA = find_token(document.body, "\\begin_layout Biography without photo", k)
|
|
|
|
if k == kA and k != -1:
|
|
|
|
k = k + 1
|
|
|
|
continue
|
|
|
|
if k != -1:
|
|
|
|
# start with the second argument, therefore 2
|
|
|
|
revert_Argument_to_TeX_brace(document, k, 2, 2, True)
|
|
|
|
k = k + 1
|
|
|
|
if i == -1 and j == -1 and k == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2012-12-02 14:58:14 +00:00
|
|
|
def convert_TeX_brace_to_Argument(document, line, n, nmax, inset, environment):
|
2012-11-26 01:50:53 +00:00
|
|
|
'''
|
|
|
|
Converts TeX code to an InsetArgument
|
2012-11-29 14:34:20 +00:00
|
|
|
!!! Be careful if the braces are different in your case as expected here:
|
2012-12-02 14:58:14 +00:00
|
|
|
- "}{" separates mandatory arguments of commands
|
|
|
|
- "}" + "{" separates mandatory arguments of commands
|
|
|
|
- "}" + " " + "{" separates mandatory arguments of commands
|
2012-11-26 01:50:53 +00:00
|
|
|
- { and } surround a mandatory argument of an environment
|
|
|
|
usage:
|
2012-12-02 14:58:14 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, LineOfBeginLayout/Inset, StartArgument, EndArgument, isInset, isEnvironment)
|
|
|
|
LineOfBeginLayout/Inset is the line of the \begin_layout or \begin_inset statement
|
2012-11-26 01:50:53 +00:00
|
|
|
StartArgument is the number of the first ERT that needs to be converted
|
|
|
|
EndArgument is the number of the last ERT that needs to be converted
|
2012-12-02 14:58:14 +00:00
|
|
|
isInset must be true, if braces inside an InsetLayout needs to be converted
|
|
|
|
isEnvironment must be true, if the layout is for a LaTeX environment
|
2012-11-26 01:50:53 +00:00
|
|
|
|
2012-12-02 14:58:14 +00:00
|
|
|
Note: this routine can currently handle only one mandatory argument of environments
|
2012-11-26 01:50:53 +00:00
|
|
|
'''
|
|
|
|
lineArg = line
|
|
|
|
while lineArg != -1 and n < nmax + 1:
|
|
|
|
lineArg = find_token(document.body, "\\begin_inset ERT", lineArg)
|
|
|
|
if environment == False and lineArg != -1:
|
|
|
|
bracePair = find_token(document.body, "}{", lineArg)
|
2012-12-02 14:58:14 +00:00
|
|
|
# assure that the "}{" is in this ERT
|
|
|
|
if bracePair == lineArg + 5:
|
2012-11-26 01:50:53 +00:00
|
|
|
end = find_token(document.body, "\\end_inset", bracePair)
|
|
|
|
document.body[lineArg : end + 1] = ["\\end_layout", "", "\\end_inset"]
|
2012-11-30 00:54:57 +00:00
|
|
|
if n == 1:
|
2012-12-02 14:58:14 +00:00
|
|
|
if inset == False:
|
|
|
|
document.body[line + 1 : line + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
else:
|
|
|
|
document.body[line + 4 : line + 4] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
2012-11-30 00:54:57 +00:00
|
|
|
else:
|
2012-12-02 14:58:14 +00:00
|
|
|
document.body[endn : endn] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
2012-11-26 01:50:53 +00:00
|
|
|
n = n + 1
|
2012-11-30 00:54:57 +00:00
|
|
|
endn = end
|
2012-12-02 14:58:14 +00:00
|
|
|
# now check the case that we have "}" + "{" in two ERTs
|
2012-11-26 01:50:53 +00:00
|
|
|
else:
|
2012-12-02 14:58:14 +00:00
|
|
|
endBrace = find_token(document.body, "}", lineArg)
|
|
|
|
if endBrace == lineArg + 5:
|
|
|
|
beginBrace = find_token(document.body, "{", endBrace)
|
|
|
|
# assure that the ERTs are consecutive (11 or 12 depending if there is a space between the ERTs or not)
|
|
|
|
if beginBrace == endBrace + 11 or beginBrace == endBrace + 12:
|
|
|
|
end = find_token(document.body, "\\end_inset", beginBrace)
|
|
|
|
document.body[lineArg : end + 1] = ["\\end_layout", "", "\\end_inset"]
|
|
|
|
if n == 1:
|
|
|
|
if inset == False:
|
|
|
|
document.body[line + 1 : line + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
else:
|
|
|
|
document.body[line + 4 : line + 4] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
else:
|
|
|
|
document.body[endn : endn] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
n = n + 1
|
|
|
|
# set the line where the next argument will be inserted
|
|
|
|
if beginBrace == endBrace + 11:
|
|
|
|
endn = end - 11
|
|
|
|
else:
|
|
|
|
endn = end - 12
|
|
|
|
else:
|
|
|
|
lineArg = lineArg + 1
|
2012-11-26 01:50:53 +00:00
|
|
|
if environment == True and lineArg != -1:
|
|
|
|
opening = find_token(document.body, "{", lineArg)
|
2012-11-26 02:39:40 +00:00
|
|
|
if opening == lineArg + 5 or opening == lineArg + 4: # assure that the "{" is in this ERT
|
2012-11-26 01:50:53 +00:00
|
|
|
end = find_token(document.body, "\\end_inset", opening)
|
|
|
|
document.body[lineArg : end + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
n = n + 1
|
|
|
|
lineArg2 = find_token(document.body, "\\begin_inset ERT", lineArg)
|
|
|
|
closing = find_token(document.body, "}", lineArg2)
|
2012-11-26 02:39:40 +00:00
|
|
|
if closing == lineArg2 + 5 or closing == lineArg2 + 4: # assure that the "}" is in this ERT
|
2012-11-26 01:50:53 +00:00
|
|
|
end2 = find_token(document.body, "\\end_inset", closing)
|
|
|
|
document.body[lineArg2 : end2 + 1] = ["\\end_layout", "", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
lineArg = lineArg + 1
|
|
|
|
|
|
|
|
|
|
|
|
def convert_IEEEtran(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
'''
|
|
|
|
Converts ERT of
|
|
|
|
Page headings
|
|
|
|
Biography
|
|
|
|
Biography without photo
|
|
|
|
to InsetArgument
|
|
|
|
'''
|
|
|
|
if document.textclass == "IEEEtran":
|
2012-11-26 01:50:53 +00:00
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
k = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Page headings", i)
|
|
|
|
if i != -1:
|
2012-12-02 14:58:14 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
|
2012-11-26 01:50:53 +00:00
|
|
|
i = i + 1
|
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Biography without photo", j)
|
|
|
|
if j != -1:
|
2012-12-02 14:58:14 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, j, 1, 1, False, True)
|
2012-11-26 01:50:53 +00:00
|
|
|
j = j + 1
|
|
|
|
if k != -1:
|
|
|
|
# assure that we don't handle Biography Biography without photo
|
|
|
|
k = find_token(document.body, "\\begin_layout Biography", k)
|
|
|
|
kA = find_token(document.body, "\\begin_layout Biography without photo", k - 1)
|
|
|
|
if k == kA and k != -1:
|
|
|
|
k = k + 1
|
|
|
|
continue
|
|
|
|
if k != -1:
|
|
|
|
# the argument we want to convert is the second one
|
2012-12-02 14:58:14 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, k, 2, 2, False, True)
|
2012-11-26 01:50:53 +00:00
|
|
|
k = k + 1
|
|
|
|
if i == -1 and j == -1 and k == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2012-11-26 02:39:40 +00:00
|
|
|
def revert_AASTeX(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
" Reverts InsetArgument of Altaffilation to TeX-code "
|
|
|
|
if document.textclass == "aastex":
|
2012-11-26 02:39:40 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Altaffilation", i)
|
|
|
|
if i != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, i, 1, 1, False)
|
|
|
|
i = i + 1
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def convert_AASTeX(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
" Converts ERT of Altaffilation to InsetArgument "
|
|
|
|
if document.textclass == "aastex":
|
2012-11-26 02:39:40 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Altaffilation", i)
|
|
|
|
if i != -1:
|
2012-12-02 14:58:14 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
|
2012-11-26 02:39:40 +00:00
|
|
|
i = i + 1
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2012-11-26 03:21:23 +00:00
|
|
|
def revert_AGUTeX(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
" Reverts InsetArgument of Author affiliation to TeX-code "
|
|
|
|
if document.textclass == "agutex":
|
2012-11-26 03:21:23 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Author affiliation", i)
|
|
|
|
if i != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, i, 1, 1, False)
|
|
|
|
i = i + 1
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def convert_AGUTeX(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
" Converts ERT of Author affiliation to InsetArgument "
|
|
|
|
if document.textclass == "agutex":
|
2012-11-26 03:21:23 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Author affiliation", i)
|
|
|
|
if i != -1:
|
2012-12-02 14:58:14 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
|
2012-11-26 03:21:23 +00:00
|
|
|
i = i + 1
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2012-11-26 04:19:47 +00:00
|
|
|
def revert_IJMP(document):
|
|
|
|
" Reverts InsetArgument of MarkBoth to TeX-code "
|
|
|
|
if document.textclass == "ijmpc" or document.textclass == "ijmpd":
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout MarkBoth", i)
|
|
|
|
if i != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, i, 1, 1, False)
|
|
|
|
i = i + 1
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def convert_IJMP(document):
|
|
|
|
" Converts ERT of MarkBoth to InsetArgument "
|
|
|
|
if document.textclass == "ijmpc" or document.textclass == "ijmpd":
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout MarkBoth", i)
|
|
|
|
if i != -1:
|
2012-12-02 14:58:14 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
|
2012-11-26 04:19:47 +00:00
|
|
|
i = i + 1
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2012-11-30 00:54:57 +00:00
|
|
|
|
|
|
|
def revert_SIGPLAN(document):
|
|
|
|
" Reverts InsetArgument of MarkBoth to TeX-code "
|
|
|
|
if document.textclass == "sigplanconf":
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Conference", i)
|
|
|
|
if i != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, i, 1, 1, False)
|
|
|
|
i = i + 1
|
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Author", j)
|
|
|
|
if j != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, j, 1, 2, False)
|
|
|
|
j = j + 1
|
|
|
|
if i == -1 and j == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def convert_SIGPLAN(document):
|
|
|
|
" Converts ERT of MarkBoth to InsetArgument "
|
|
|
|
if document.textclass == "sigplanconf":
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Conference", i)
|
|
|
|
if i != -1:
|
2012-12-02 14:58:14 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
|
2012-11-30 00:54:57 +00:00
|
|
|
i = i + 1
|
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Author", j)
|
|
|
|
if j != -1:
|
2012-12-02 14:58:14 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, j, 1, 2, False, False)
|
2012-11-30 00:54:57 +00:00
|
|
|
j = j + 1
|
|
|
|
if i == -1 and j == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2012-12-02 14:58:14 +00:00
|
|
|
def revert_SIGGRAPH(document):
|
|
|
|
" Reverts InsetArgument of Flex CRcat to TeX-code "
|
|
|
|
if document.textclass == "acmsiggraph":
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex CRcat", i)
|
|
|
|
if i != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, i, 1, 3, False)
|
|
|
|
i = i + 1
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def convert_SIGGRAPH(document):
|
|
|
|
" Converts ERT of Flex CRcat to InsetArgument "
|
|
|
|
if document.textclass == "acmsiggraph":
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex CRcat", i)
|
|
|
|
if i != -1:
|
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 3, True, False)
|
|
|
|
i = i + 1
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2012-11-28 11:54:34 +00:00
|
|
|
def revert_literate(document):
|
|
|
|
" Revert Literate document to old format "
|
|
|
|
if del_token(document.header, "noweb", 0):
|
|
|
|
document.textclass = "literate-" + document.textclass
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Chunk", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
document.body[i] = "\\begin_layout Scrap"
|
|
|
|
i = i + 1
|
|
|
|
|
2012-11-30 00:54:57 +00:00
|
|
|
|
2012-11-28 11:54:34 +00:00
|
|
|
def convert_literate(document):
|
|
|
|
" Convert Literate document to new format"
|
|
|
|
i = find_token(document.header, "\\textclass", 0)
|
|
|
|
if (i != -1) and "literate-" in document.header[i]:
|
|
|
|
document.textclass = document.header[i].replace("\\textclass literate-", "")
|
|
|
|
j = find_token(document.header, "\\begin_modules", 0)
|
|
|
|
if (j != -1):
|
|
|
|
document.header.insert(j + 1, "noweb")
|
|
|
|
else:
|
|
|
|
document.header.insert(i + 1, "\\end_modules")
|
|
|
|
document.header.insert(i + 1, "noweb")
|
|
|
|
document.header.insert(i + 1, "\\begin_modules")
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Scrap", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
document.body[i] = "\\begin_layout Chunk"
|
|
|
|
i = i + 1
|
2012-11-26 04:19:47 +00:00
|
|
|
|
2012-11-30 00:54:57 +00:00
|
|
|
|
2012-11-29 14:34:20 +00:00
|
|
|
def revert_itemargs(document):
|
|
|
|
" Reverts \\item arguments to TeX-code "
|
2012-12-02 14:19:40 +00:00
|
|
|
i = 0
|
2012-11-29 14:34:20 +00:00
|
|
|
while True:
|
2012-12-02 14:19:40 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Argument item:", i)
|
2012-11-29 14:34:20 +00:00
|
|
|
if i == -1:
|
2012-12-02 14:19:40 +00:00
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
2012-11-29 14:34:20 +00:00
|
|
|
lastlay = find_token_backwards(document.body, "\\begin_layout", i)
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
endLayout = find_token(document.body, "\\end_layout", beginPlain)
|
|
|
|
endInset = find_token(document.body, "\\end_inset", endLayout)
|
|
|
|
content = document.body[beginPlain + 1 : endLayout]
|
|
|
|
del document.body[i:j+1]
|
|
|
|
subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
|
|
|
document.body[lastlay + 1:lastlay + 1] = subst
|
|
|
|
i = i + 1
|
|
|
|
|
2012-11-30 00:54:57 +00:00
|
|
|
|
2011-05-03 13:12:55 +00:00
|
|
|
##
|
|
|
|
# Conversion hub
|
|
|
|
#
|
|
|
|
|
|
|
|
supported_versions = ["2.1.0","2.1"]
|
2011-11-07 18:36:56 +00:00
|
|
|
convert = [
|
|
|
|
[414, []],
|
2011-08-29 14:07:30 +00:00
|
|
|
[415, [convert_undertilde]],
|
2011-11-07 18:36:56 +00:00
|
|
|
[416, []],
|
|
|
|
[417, [convert_japanese_encodings]],
|
2011-12-07 22:33:25 +00:00
|
|
|
[418, []],
|
2011-12-12 14:40:34 +00:00
|
|
|
[419, []],
|
|
|
|
[420, [convert_biblio_style]],
|
2011-12-18 21:27:17 +00:00
|
|
|
[421, [convert_longtable_captions]],
|
2012-01-03 21:26:09 +00:00
|
|
|
[422, [convert_use_packages]],
|
2012-01-05 20:53:48 +00:00
|
|
|
[423, [convert_use_mathtools]],
|
2012-01-09 13:16:38 +00:00
|
|
|
[424, [convert_cite_engine_type]],
|
2012-02-20 02:10:33 +00:00
|
|
|
[425, []],
|
2012-03-06 07:54:22 +00:00
|
|
|
[426, []],
|
2012-03-16 01:29:37 +00:00
|
|
|
[427, []],
|
2012-03-21 22:04:45 +00:00
|
|
|
[428, [convert_cell_rotation]],
|
2012-04-16 19:40:59 +00:00
|
|
|
[429, [convert_table_rotation]],
|
|
|
|
[430, [convert_listoflistings]],
|
2012-05-06 18:48:04 +00:00
|
|
|
[431, [convert_use_amssymb]],
|
2012-06-21 23:12:43 +00:00
|
|
|
[432, []],
|
2012-08-17 12:11:02 +00:00
|
|
|
[433, [convert_armenian]],
|
2012-08-17 16:24:18 +00:00
|
|
|
[434, []],
|
2012-08-18 12:45:41 +00:00
|
|
|
[435, []],
|
2012-08-19 09:57:48 +00:00
|
|
|
[436, []],
|
2012-08-23 15:42:53 +00:00
|
|
|
[437, []],
|
2012-09-19 15:46:55 +00:00
|
|
|
[438, []],
|
2012-09-22 15:44:00 +00:00
|
|
|
[439, []],
|
2012-09-23 10:30:19 +00:00
|
|
|
[440, []],
|
2012-09-23 16:33:04 +00:00
|
|
|
[441, [convert_mdnomath]],
|
2012-09-24 11:19:25 +00:00
|
|
|
[442, []],
|
2012-09-25 09:07:33 +00:00
|
|
|
[443, []],
|
2012-10-21 16:55:24 +00:00
|
|
|
[444, []],
|
2012-11-19 13:21:02 +00:00
|
|
|
[445, []],
|
2012-11-26 01:50:53 +00:00
|
|
|
[446, [convert_latexargs]],
|
2012-12-02 14:58:14 +00:00
|
|
|
[447, [convert_IEEEtran, convert_AASTeX, convert_AGUTeX, convert_IJMP, convert_SIGPLAN, convert_SIGGRAPH]],
|
2012-11-29 14:34:20 +00:00
|
|
|
[448, [convert_literate]],
|
|
|
|
[449, []]
|
2011-05-03 13:12:55 +00:00
|
|
|
]
|
|
|
|
|
2011-11-07 18:36:56 +00:00
|
|
|
revert = [
|
2012-11-29 14:34:20 +00:00
|
|
|
[448, [revert_itemargs]],
|
2012-11-28 11:54:34 +00:00
|
|
|
[447, [revert_literate]],
|
2012-12-02 14:58:14 +00:00
|
|
|
[446, [revert_IEEEtran, revert_AASTeX, revert_AGUTeX, revert_IJMP, revert_SIGPLAN, revert_SIGGRAPH]],
|
2012-11-19 13:21:02 +00:00
|
|
|
[445, [revert_latexargs]],
|
2012-10-21 16:55:24 +00:00
|
|
|
[444, [revert_uop]],
|
2012-09-25 09:07:33 +00:00
|
|
|
[443, [revert_biolinum]],
|
2012-09-24 11:19:25 +00:00
|
|
|
[442, []],
|
2012-09-23 16:33:04 +00:00
|
|
|
[441, [revert_newtxmath]],
|
2012-09-23 10:30:19 +00:00
|
|
|
[440, [revert_mdnomath]],
|
2012-09-22 15:44:00 +00:00
|
|
|
[439, [revert_mathfonts]],
|
2012-09-19 15:46:55 +00:00
|
|
|
[438, [revert_minionpro]],
|
2012-08-23 15:42:53 +00:00
|
|
|
[437, [revert_ipadeco, revert_ipachar]],
|
|
|
|
[436, [revert_texgyre]],
|
|
|
|
[435, [revert_mathdesign]],
|
2012-08-17 16:24:18 +00:00
|
|
|
[434, [revert_txtt]],
|
2012-08-17 12:11:02 +00:00
|
|
|
[433, [revert_libertine]],
|
2012-06-21 23:12:43 +00:00
|
|
|
[432, [revert_armenian]],
|
2012-06-08 00:37:36 +00:00
|
|
|
[431, [revert_languages, revert_ancientgreek]],
|
2012-05-06 18:48:04 +00:00
|
|
|
[430, [revert_use_amssymb]],
|
2012-04-16 19:40:59 +00:00
|
|
|
[429, [revert_listoflistings]],
|
2012-03-21 22:04:45 +00:00
|
|
|
[428, [revert_table_rotation]],
|
2012-03-16 01:29:37 +00:00
|
|
|
[427, [revert_cell_rotation]],
|
2012-03-06 07:54:22 +00:00
|
|
|
[426, [revert_tipa]],
|
2012-02-20 02:10:33 +00:00
|
|
|
[425, [revert_verbatim]],
|
2012-01-23 01:49:49 +00:00
|
|
|
[424, [revert_cancel]],
|
2012-01-23 17:20:07 +00:00
|
|
|
[423, [revert_cite_engine_type]],
|
2012-01-05 20:53:48 +00:00
|
|
|
[422, [revert_use_mathtools]],
|
2012-01-03 21:26:09 +00:00
|
|
|
[421, [revert_use_packages]],
|
2011-12-18 21:27:17 +00:00
|
|
|
[420, [revert_longtable_captions]],
|
2011-12-12 14:40:34 +00:00
|
|
|
[419, [revert_biblio_style]],
|
2011-12-08 23:58:30 +00:00
|
|
|
[418, [revert_australian]],
|
2011-12-07 22:33:25 +00:00
|
|
|
[417, [revert_justification]],
|
2011-11-07 18:36:56 +00:00
|
|
|
[416, [revert_japanese_encodings]],
|
2012-01-23 01:49:49 +00:00
|
|
|
[415, [revert_negative_space, revert_math_spaces]],
|
2011-08-29 14:07:30 +00:00
|
|
|
[414, [revert_undertilde]],
|
2011-08-10 03:37:33 +00:00
|
|
|
[413, [revert_visible_space]]
|
2011-05-03 13:12:55 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|