lyx_mirror/lib/lyx2lyx/lyx_2_1.py

1516 lines
55 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
# This file is part of lyx2lyx
# -*- coding: utf-8 -*-
# Copyright (C) 2011 The LyX team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
""" Convert files to the file format generated by lyx 2.1"""
import re, string
import unicodedata
import sys, os
# Uncomment only what you need to import, please.
from parser_tools import del_token, find_token, find_end_of, find_end_of_inset, \
find_end_of_layout, find_re, get_option_value, get_value, get_quoted_value, \
set_option_value
#from parser_tools import find_token, find_end_of, find_tokens, \
#find_token_exact, find_end_of_inset, find_end_of_layout, \
#find_token_backwards, is_in_inset, del_token, check_token
2012-04-16 19:40:59 +00:00
from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, get_ert
#from lyx2lyx_tools import insert_to_preamble, \
2012-04-16 19:40:59 +00:00
# lyx2latex, latex_length, revert_flex_inset, \
# revert_font_attrs, hex2ratio, str2bool
####################################################################
# Private helper functions
#def remove_option(lines, m, option):
#''' removes option from line m. returns whether we did anything '''
#l = lines[m].find(option)
#if l == -1:
#return False
#val = lines[m][l:].split('"')[1]
#lines[m] = lines[m][:l - 1] + lines[m][l+len(option + '="' + val + '"'):]
#return True
###############################################################################
###
### Conversion and reversion routines
###
###############################################################################
def revert_visible_space(document):
"Revert InsetSpace visible into its ERT counterpart"
i = 0
while True:
i = find_token(document.body, "\\begin_inset space \\textvisiblespace{}", i)
if i == -1:
return
end = find_end_of_inset(document.body, i)
subst = put_cmd_in_ert("\\textvisiblespace{}")
document.body[i:end + 1] = subst
def convert_undertilde(document):
" Load undertilde automatically "
i = find_token(document.header, "\\use_mathdots" , 0)
if i == -1:
i = find_token(document.header, "\\use_mhchem" , 0)
if i == -1:
i = find_token(document.header, "\\use_esint" , 0)
if i == -1:
document.warning("Malformed LyX document: Can't find \\use_mathdots.")
return;
j = find_token(document.preamble, "\\usepackage{undertilde}", 0)
if j == -1:
document.header.insert(i + 1, "\\use_undertilde 0")
else:
document.header.insert(i + 1, "\\use_undertilde 2")
del document.preamble[j]
def revert_undertilde(document):
" Load undertilde if used in the document "
undertilde = find_token(document.header, "\\use_undertilde" , 0)
if undertilde == -1:
document.warning("No \\use_undertilde line. Assuming auto.")
else:
val = get_value(document.header, "\\use_undertilde", undertilde)
del document.header[undertilde]
try:
usetilde = int(val)
except:
document.warning("Invalid \\use_undertilde value: " + val + ". Assuming auto.")
# probably usedots has not been changed, but be safe.
usetilde = 1
if usetilde == 0:
# do not load case
return
if usetilde == 2:
# force load case
add_to_preamble(document, ["\\usepackage{undertilde}"])
return
# so we are in the auto case. we want to load undertilde if \utilde is used.
i = 0
while True:
i = find_token(document.body, '\\begin_inset Formula', i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
i += 1
continue
code = "\n".join(document.body[i:j])
if code.find("\\utilde") != -1:
add_to_preamble(document, ["\\@ifundefined{utilde}{\\usepackage{undertilde}}"])
return
i = j
def revert_negative_space(document):
"Revert InsetSpace negmedspace and negthickspace into its TeX-code counterpart"
i = 0
j = 0
reverted = False
while True:
i = find_token(document.body, "\\begin_inset space \\negmedspace{}", i)
if i == -1:
j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
if j == -1:
# load amsmath in the preamble if not already loaded if we are at the end of checking
if reverted == True:
i = find_token(document.header, "\\use_amsmath 2", 0)
if i == -1:
add_to_preamble(document, ["\\@ifundefined{negthickspace}{\\usepackage{amsmath}}"])
return
if i == -1:
return
end = find_end_of_inset(document.body, i)
subst = put_cmd_in_ert("\\negmedspace{}")
document.body[i:end + 1] = subst
j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
if j == -1:
return
end = find_end_of_inset(document.body, j)
subst = put_cmd_in_ert("\\negthickspace{}")
document.body[j:end + 1] = subst
reverted = True
def revert_math_spaces(document):
"Revert formulas with protected custom space and protected hfills to TeX-code"
i = 0
while True:
i = find_token(document.body, "\\begin_inset Formula", i)
if i == -1:
return
j = document.body[i].find("\\hspace*")
if j != -1:
end = find_end_of_inset(document.body, i)
subst = put_cmd_in_ert(document.body[i][21:])
document.body[i:end + 1] = subst
i = i + 1
def convert_japanese_encodings(document):
" Rename the japanese encodings to names understood by platex "
jap_enc_dict = {
"EUC-JP-pLaTeX": "euc",
"JIS-pLaTeX": "jis",
"SJIS-pLaTeX": "sjis"
}
i = find_token(document.header, "\\inputencoding" , 0)
if i == -1:
return
val = get_value(document.header, "\\inputencoding", i)
if val in jap_enc_dict.keys():
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
def revert_japanese_encodings(document):
" Revert the japanese encodings name changes "
jap_enc_dict = {
"euc": "EUC-JP-pLaTeX",
"jis": "JIS-pLaTeX",
"sjis": "SJIS-pLaTeX"
}
i = find_token(document.header, "\\inputencoding" , 0)
if i == -1:
return
val = get_value(document.header, "\\inputencoding", i)
if val in jap_enc_dict.keys():
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
def revert_justification(document):
" Revert the \\justification buffer param"
if not del_token(document.header, '\\justification', 0):
document.warning("Malformed LyX document: Missing \\justification.")
def revert_australian(document):
"Set English language variants Australian and Newzealand to English"
if document.language == "australian" or document.language == "newzealand":
document.language = "english"
i = find_token(document.header, "\\language", 0)
if i != -1:
document.header[i] = "\\language english"
j = 0
while True:
j = find_token(document.body, "\\lang australian", j)
if j == -1:
j = find_token(document.body, "\\lang newzealand", 0)
if j == -1:
return
else:
document.body[j] = document.body[j].replace("\\lang newzealand", "\\lang english")
else:
document.body[j] = document.body[j].replace("\\lang australian", "\\lang english")
j += 1
def convert_biblio_style(document):
"Add a sensible default for \\biblio_style based on the citation engine."
i = find_token(document.header, "\\cite_engine", 0)
if i != -1:
engine = get_value(document.header, "\\cite_engine", i).split("_")[0]
style = {"basic": "plain", "natbib": "plainnat", "jurabib": "jurabib"}
document.header.insert(i + 1, "\\biblio_style " + style[engine])
def revert_biblio_style(document):
"BibTeX insets with default option use the style defined by \\biblio_style."
i = find_token(document.header, "\\biblio_style" , 0)
if i == -1:
document.warning("No \\biblio_style line. Nothing to do.")
return
default_style = get_value(document.header, "\\biblio_style", i)
del document.header[i]
# We are looking for bibtex insets having the default option
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of bibtex inset at line " + str(i))
i += 1
return
k = find_token(document.body, "options", i, j)
if k != -1:
options = get_quoted_value(document.body, "options", k)
if "default" in options.split(","):
document.body[k] = 'options "%s"' \
% options.replace("default", default_style)
i = j
def handle_longtable_captions(document, forward):
begin_table = 0
while True:
begin_table = find_token(document.body, '<lyxtabular version=', begin_table)
if begin_table == -1:
break
end_table = find_end_of(document.body, begin_table, '<lyxtabular', '</lyxtabular>')
if end_table == -1:
document.warning("Malformed LyX document: Could not find end of table.")
begin_table += 1
continue
fline = find_token(document.body, "<features", begin_table, end_table)
if fline == -1:
document.warning("Can't find features for inset at line " + str(begin_table))
begin_table += 1
continue
p = document.body[fline].find("islongtable")
if p == -1:
# no longtable
begin_table += 1
continue
numrows = get_option_value(document.body[begin_table], "rows")
try:
numrows = int(numrows)
except:
document.warning(document.body[begin_table])
document.warning("Unable to determine rows!")
begin_table = end_table
continue
begin_row = begin_table
for row in range(numrows):
begin_row = find_token(document.body, '<row', begin_row, end_table)
if begin_row == -1:
document.warning("Can't find row " + str(row + 1))
break
end_row = find_end_of(document.body, begin_row, '<row', '</row>')
if end_row == -1:
document.warning("Can't find end of row " + str(row + 1))
break
if forward:
if (get_option_value(document.body[begin_row], 'caption') == 'true' and
get_option_value(document.body[begin_row], 'endfirsthead') != 'true' and
get_option_value(document.body[begin_row], 'endhead') != 'true' and
get_option_value(document.body[begin_row], 'endfoot') != 'true' and
get_option_value(document.body[begin_row], 'endlastfoot') != 'true'):
document.body[begin_row] = set_option_value(document.body[begin_row], 'caption', 'true", endfirsthead="true')
elif get_option_value(document.body[begin_row], 'caption') == 'true':
if get_option_value(document.body[begin_row], 'endfirsthead') == 'true':
document.body[begin_row] = set_option_value(document.body[begin_row], 'endfirsthead', 'false')
if get_option_value(document.body[begin_row], 'endhead') == 'true':
document.body[begin_row] = set_option_value(document.body[begin_row], 'endhead', 'false')
if get_option_value(document.body[begin_row], 'endfoot') == 'true':
document.body[begin_row] = set_option_value(document.body[begin_row], 'endfoot', 'false')
if get_option_value(document.body[begin_row], 'endlastfoot') == 'true':
document.body[begin_row] = set_option_value(document.body[begin_row], 'endlastfoot', 'false')
begin_row = end_row
# since there could be a tabular inside this one, we
# cannot jump to end.
begin_table += 1
def convert_longtable_captions(document):
"Add a firsthead flag to caption rows"
handle_longtable_captions(document, True)
def revert_longtable_captions(document):
"remove head/foot flag from caption rows"
handle_longtable_captions(document, False)
def convert_use_packages(document):
"use_xxx yyy => use_package xxx yyy"
packages = ["amsmath", "esint", "mathdots", "mhchem", "undertilde"]
for p in packages:
2012-05-26 17:00:03 +00:00
i = find_token(document.header, "\\use_%s" % p, 0)
if i != -1:
2012-05-26 17:00:03 +00:00
value = get_value(document.header, "\\use_%s" % p, i)
document.header[i] = "\\use_package %s %s" % (p, value)
def revert_use_packages(document):
"use_package xxx yyy => use_xxx yyy"
2012-05-26 17:00:03 +00:00
packages = ["amsmath", "esint", "mathdots", "mhchem", "undertilde"]
# the order is arbitrary for the use_package version, and not all packages need to be given.
# Ensure a complete list and correct order (important for older LyX versions and especially lyx2lyx)
2012-05-26 17:00:03 +00:00
j = 0
for p in packages:
regexp = re.compile(r'(\\use_package\s+%s)' % p)
2012-05-26 17:00:03 +00:00
i = find_re(document.header, regexp, j)
if i != -1:
2012-05-26 17:00:03 +00:00
value = get_value(document.header, "\\use_package %s" % p, i).split()[1]
del document.header[i]
j = i
2012-05-26 17:00:03 +00:00
document.header.insert(j, "\\use_%s %s" % (p, value))
j = j + 1
def convert_use_mathtools(document):
"insert use_package mathtools"
i = find_token(document.header, "\\use_package", 0)
if i == -1:
document.warning("Malformed LyX document: Can't find \\use_package.")
return;
j = find_token(document.preamble, "\\usepackage{mathtools}", 0)
if j == -1:
document.header.insert(i + 1, "\\use_package mathtools 0")
else:
document.header.insert(i + 1, "\\use_package mathtools 2")
del document.preamble[j]
def revert_use_mathtools(document):
"remove use_package mathtools"
regexp = re.compile(r'(\\use_package\s+mathtools)')
i = find_re(document.header, regexp, 0)
value = "1" # default is auto
if i != -1:
value = get_value(document.header, "\\use_package" , i).split()[1]
del document.header[i]
if value == "2": # on
add_to_preamble(document, ["\\usepackage{mathtools}"])
elif value == "1": # auto
commands = ["mathclap", "mathllap", "mathrlap", \
"lgathered", "rgathered", "vcentcolon", "dblcolon", \
"coloneqq", "Coloneqq", "coloneq", "Coloneq", "eqqcolon", \
"Eqqcolon", "eqcolon", "Eqcolon", "colonapprox", \
"Colonapprox", "colonsim", "Colonsim"]
i = 0
while True:
i = find_token(document.body, '\\begin_inset Formula', i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
i += 1
continue
code = "\n".join(document.body[i:j])
for c in commands:
if code.find("\\%s" % c) != -1:
add_to_preamble(document, ["\\usepackage{mathtools}"])
return
i = j
def convert_cite_engine_type(document):
"Determine the \\cite_engine_type from the citation engine."
i = find_token(document.header, "\\cite_engine", 0)
if i == -1:
return
engine = get_value(document.header, "\\cite_engine", i)
if "_" in engine:
engine, type = engine.split("_")
else:
type = {"basic": "numerical", "jurabib": "authoryear"}[engine]
document.header[i] = "\\cite_engine " + engine
document.header.insert(i + 1, "\\cite_engine_type " + type)
def revert_cite_engine_type(document):
"Natbib had the type appended with an underscore."
engine_type = "numerical"
i = find_token(document.header, "\\cite_engine_type" , 0)
if i == -1:
document.warning("No \\cite_engine_type line. Assuming numerical.")
else:
engine_type = get_value(document.header, "\\cite_engine_type", i)
del document.header[i]
# We are looking for the natbib citation engine
i = find_token(document.header, "\\cite_engine natbib", 0)
if i == -1:
return
document.header[i] = "\\cite_engine natbib_" + engine_type
def revert_cancel(document):
"add cancel to the preamble if necessary"
commands = ["cancelto", "cancel", "bcancel", "xcancel"]
i = 0
while True:
i = find_token(document.body, '\\begin_inset Formula', i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
i += 1
continue
code = "\n".join(document.body[i:j])
for c in commands:
if code.find("\\%s" % c) != -1:
add_to_preamble(document, ["\\usepackage{cancel}"])
return
i = j
def revert_verbatim(document):
" Revert verbatim einvironments completely to TeX-code. "
i = 0
consecutive = False
subst_end = ['\end_layout', '', '\\begin_layout Plain Layout',
'\end_layout', '',
'\\begin_layout Plain Layout', '', '',
'\\backslash', '',
'end{verbatim}',
'\\end_layout', '', '\\end_inset',
'', '', '\\end_layout']
subst_begin = ['\\begin_layout Standard', '\\noindent',
'\\begin_inset ERT', 'status collapsed', '',
'\\begin_layout Plain Layout', '', '', '\\backslash',
'begin{verbatim}',
'\\end_layout', '', '\\begin_layout Plain Layout', '']
while 1:
i = find_token(document.body, "\\begin_layout Verbatim", i)
if i == -1:
return
j = find_end_of_layout(document.body, i)
if j == -1:
document.warning("Malformed lyx document: Can't find end of Verbatim layout")
i += 1
continue
# delete all line breaks insets (there are no other insets)
l = i
while 1:
n = find_token(document.body, "\\begin_inset Newline newline", l)
if n == -1:
n = find_token(document.body, "\\begin_inset Newline linebreak", l)
if n == -1:
break
m = find_end_of_inset(document.body, n)
del(document.body[m:m+1])
document.body[n:n+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
l += 1
j += 1
# consecutive verbatim environments need to be connected
k = find_token(document.body, "\\begin_layout Verbatim", j)
if k == j + 2 and consecutive == False:
consecutive = True
document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
document.body[i:i+1] = subst_begin
continue
if k == j + 2 and consecutive == True:
document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
del(document.body[i:i+1])
continue
if k != j + 2 and consecutive == True:
document.body[j:j+1] = subst_end
# the next paragraph must not be indented
document.body[j+19:j+19] = ['\\noindent']
del(document.body[i:i+1])
consecutive = False
continue
else:
document.body[j:j+1] = subst_end
# the next paragraph must not be indented
document.body[j+19:j+19] = ['\\noindent']
document.body[i:i+1] = subst_begin
def revert_tipa(document):
" Revert native TIPA insets to mathed or ERT. "
i = 0
while 1:
i = find_token(document.body, "\\begin_inset IPA", i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed lyx document: Can't find end of IPA inset")
i += 1
continue
Multipar = False
n = find_token(document.body, "\\begin_layout", i, j)
if n == -1:
document.warning("Malformed lyx document: IPA inset has no embedded layout")
i += 1
continue
m = find_end_of_layout(document.body, n)
if m == -1:
document.warning("Malformed lyx document: Can't find end of embedded layout")
i += 1
continue
content = document.body[n+1:m]
p = find_token(document.body, "\\begin_layout", m, j)
if p != -1 or len(content) > 1:
Multipar = True
content = document.body[i+1:j]
if Multipar:
# IPA insets with multiple pars need to be wrapped by \begin{IPA}...\end{IPA}
document.body[i:j+1] = ['\\end_layout', '', '\\begin_layout Standard'] + put_cmd_in_ert("\\begin{IPA}") + ['\\end_layout'] + content + ['\\begin_layout Standard'] + put_cmd_in_ert("\\end{IPA}")
add_to_preamble(document, ["\\usepackage{tipa,tipx}"])
else:
# single-par IPA insets can be reverted to mathed
document.body[i:j+1] = ["\\begin_inset Formula $\\text{\\textipa{" + content[0] + "}}$", "\\end_inset"]
i = j
def revert_cell_rotation(document):
"Revert cell rotations to TeX-code"
load_rotating = False
i = 0
try:
while True:
# first, let's find out if we need to do anything
i = find_token(document.body, '<cell ', i)
if i == -1:
return
j = document.body[i].find('rotate="')
if j != -1:
k = document.body[i].find('"', j + 8)
value = document.body[i][j + 8 : k]
if value == "0":
rgx = re.compile(r' rotate="[^"]+?"')
# remove rotate option
document.body[i] = rgx.sub('', document.body[i])
elif value == "90":
rgx = re.compile(r' rotate="[^"]+?"')
document.body[i] = rgx.sub('rotate="true"', document.body[i])
else:
rgx = re.compile(r' rotate="[^"]+?"')
load_rotating = True
# remove rotate option
document.body[i] = rgx.sub('', document.body[i])
# write ERT
document.body[i + 5 : i + 5] = \
put_cmd_in_ert("\\end{turn}")
document.body[i + 4 : i + 4] = \
put_cmd_in_ert("\\begin{turn}{" + value + "}")
i += 1
finally:
if load_rotating:
add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
def convert_cell_rotation(document):
'Convert cell rotation statements from "true" to "90"'
i = 0
while True:
# first, let's find out if we need to do anything
i = find_token(document.body, '<cell ', i)
if i == -1:
return
j = document.body[i].find('rotate="true"')
if j != -1:
rgx = re.compile(r'rotate="[^"]+?"')
# convert "true" to "90"
document.body[i] = rgx.sub('rotate="90"', document.body[i])
i += 1
def revert_table_rotation(document):
"Revert table rotations to TeX-code"
load_rotating = False
i = 0
try:
while True:
# first, let's find out if we need to do anything
i = find_token(document.body, '<features ', i)
if i == -1:
return
j = document.body[i].find('rotate="')
if j != -1:
end_table = find_token(document.body, '</lyxtabular>', j)
k = document.body[i].find('"', j + 8)
value = document.body[i][j + 8 : k]
if value == "0":
rgx = re.compile(r' rotate="[^"]+?"')
# remove rotate option
document.body[i] = rgx.sub('', document.body[i])
elif value == "90":
rgx = re.compile(r'rotate="[^"]+?"')
document.body[i] = rgx.sub('rotate="true"', document.body[i])
else:
rgx = re.compile(r' rotate="[^"]+?"')
load_rotating = True
# remove rotate option
document.body[i] = rgx.sub('', document.body[i])
# write ERT
document.body[end_table + 3 : end_table + 3] = \
put_cmd_in_ert("\\end{turn}")
document.body[i - 2 : i - 2] = \
put_cmd_in_ert("\\begin{turn}{" + value + "}")
i += 1
finally:
if load_rotating:
add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
def convert_table_rotation(document):
'Convert table rotation statements from "true" to "90"'
i = 0
while True:
# first, let's find out if we need to do anything
i = find_token(document.body, '<features ', i)
if i == -1:
return
j = document.body[i].find('rotate="true"')
if j != -1:
rgx = re.compile(r'rotate="[^"]+?"')
# convert "true" to "90"
document.body[i] = rgx.sub('rotate="90"', document.body[i])
i += 1
2012-04-16 19:40:59 +00:00
def convert_listoflistings(document):
'Convert ERT \lstlistoflistings to TOC lstlistoflistings inset'
# We can support roundtrip because the command is so simple
i = 0
while True:
i = find_token(document.body, "\\begin_inset ERT", i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed lyx document: Can't find end of ERT inset")
i += 1
continue
ert = get_ert(document.body, i)
if ert == "\\lstlistoflistings{}":
document.body[i:j] = ["\\begin_inset CommandInset toc", "LatexCommand lstlistoflistings", ""]
i = i + 4
else:
i = j + 1
def revert_listoflistings(document):
'Convert TOC lstlistoflistings inset to ERT lstlistoflistings'
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset toc", i)
if i == -1:
return
if document.body[i+1] == "LatexCommand lstlistoflistings":
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed lyx document: Can't find end of TOC inset")
i += 1
continue
subst = put_cmd_in_ert("\\lstlistoflistings{}")
document.body[i:j+1] = subst
add_to_preamble(document, ["\\usepackage{listings}"])
i = i + 1
def convert_use_amssymb(document):
"insert use_package amssymb"
regexp = re.compile(r'(\\use_package\s+amsmath)')
i = find_re(document.header, regexp, 0)
if i == -1:
document.warning("Malformed LyX document: Can't find \\use_package amsmath.")
return;
value = get_value(document.header, "\\use_package" , i).split()[1]
useamsmath = 0
try:
useamsmath = int(value)
except:
document.warning("Invalid \\use_package amsmath: " + value + ". Assuming auto.")
useamsmath = 1
j = find_token(document.preamble, "\\usepackage{amssymb}", 0)
if j == -1:
document.header.insert(i + 1, "\\use_package amssymb %d" % useamsmath)
else:
document.header.insert(i + 1, "\\use_package amssymb 2")
del document.preamble[j]
def revert_use_amssymb(document):
"remove use_package amssymb"
regexp1 = re.compile(r'(\\use_package\s+amsmath)')
regexp2 = re.compile(r'(\\use_package\s+amssymb)')
i = find_re(document.header, regexp1, 0)
j = find_re(document.header, regexp2, 0)
value1 = "1" # default is auto
value2 = "1" # default is auto
if i != -1:
value1 = get_value(document.header, "\\use_package" , i).split()[1]
if j != -1:
value2 = get_value(document.header, "\\use_package" , j).split()[1]
del document.header[j]
if value1 != value2 and value2 == "2": # on
add_to_preamble(document, ["\\usepackage{amssymb}"])
def revert_ancientgreek(document):
"Set the document language for ancientgreek to greek"
if document.language == "ancientgreek":
document.language = "greek"
i = find_token(document.header, "\\language", 0)
if i != -1:
document.header[i] = "\\language greek"
j = 0
while True:
j = find_token(document.body, "\\lang ancientgreek", j)
if j == -1:
return
else:
document.body[j] = document.body[j].replace("\\lang ancientgreek", "\\lang greek")
j += 1
def revert_languages(document):
"Set the document language for new supported languages to English"
languages = [
"coptic", "divehi", "hindi", "kurmanji", "lao", "marathi", "occitan", "sanskrit",
"syriac", "tamil", "telugu", "urdu"
]
for n in range(len(languages)):
if document.language == languages[n]:
document.language = "english"
i = find_token(document.header, "\\language", 0)
if i != -1:
document.header[i] = "\\language english"
j = 0
while j < len(document.body):
j = find_token(document.body, "\\lang " + languages[n], j)
if j != -1:
document.body[j] = document.body[j].replace("\\lang " + languages[n], "\\lang english")
j += 1
else:
j = len(document.body)
def convert_armenian(document):
"Use polyglossia and thus non-TeX fonts for Armenian"
if document.language == "armenian":
i = find_token(document.header, "\\use_non_tex_fonts", 0)
if i != -1:
document.header[i] = "\\use_non_tex_fonts true"
def revert_armenian(document):
"Use ArmTeX and thus TeX fonts for Armenian"
if document.language == "armenian":
i = find_token(document.header, "\\use_non_tex_fonts", 0)
if i != -1:
document.header[i] = "\\use_non_tex_fonts false"
2012-08-17 12:11:02 +00:00
def revert_libertine(document):
" Revert native libertine font definition to LaTeX "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_roman libertine", 0)
if i != -1:
osf = False
j = find_token(document.header, "\\font_osf true", 0)
if j != -1:
osf = True
preamble = "\\usepackage"
if osf:
document.header[j] = "\\font_osf false"
else:
preamble += "[lining]"
preamble += "{libertine-type1}"
2012-08-17 12:11:02 +00:00
add_to_preamble(document, [preamble])
document.header[i] = "\\font_roman default"
2012-08-17 16:24:18 +00:00
def revert_txtt(document):
" Revert native txtt font definition to LaTeX "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_typewriter txtt", 0)
if i != -1:
preamble = "\\renewcommand{\\ttdefault}{txtt}"
add_to_preamble(document, [preamble])
document.header[i] = "\\font_typewriter default"
def revert_mathdesign(document):
" Revert native mathdesign font definition to LaTeX "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
mathdesign_dict = {
"mdbch": "charter",
"mdput": "utopia",
"mdugm": "garamond"
}
i = find_token(document.header, "\\font_roman", 0)
if i == -1:
return
val = get_value(document.header, "\\font_roman", i)
if val in mathdesign_dict.keys():
preamble = "\\usepackage[%s" % mathdesign_dict[val]
expert = False
j = find_token(document.header, "\\font_osf true", 0)
if j != -1:
expert = True
document.header[j] = "\\font_osf false"
l = find_token(document.header, "\\font_sc true", 0)
if l != -1:
expert = True
document.header[l] = "\\font_sc false"
if expert:
preamble += ",expert"
preamble += "]{mathdesign}"
add_to_preamble(document, [preamble])
document.header[i] = "\\font_roman default"
2012-08-19 09:57:48 +00:00
def revert_texgyre(document):
" Revert native TeXGyre font definition to LaTeX "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
texgyre_fonts = ["tgadventor", "tgbonum", "tgchorus", "tgcursor", \
"tgheros", "tgpagella", "tgschola", "tgtermes"]
i = find_token(document.header, "\\font_roman", 0)
if i != -1:
val = get_value(document.header, "\\font_roman", i)
if val in texgyre_fonts:
preamble = "\\usepackage{%s}" % val
add_to_preamble(document, [preamble])
document.header[i] = "\\font_roman default"
i = find_token(document.header, "\\font_sans", 0)
if i != -1:
val = get_value(document.header, "\\font_sans", i)
if val in texgyre_fonts:
preamble = "\\usepackage{%s}" % val
add_to_preamble(document, [preamble])
document.header[i] = "\\font_sans default"
i = find_token(document.header, "\\font_typewriter", 0)
if i != -1:
val = get_value(document.header, "\\font_typewriter", i)
if val in texgyre_fonts:
preamble = "\\usepackage{%s}" % val
add_to_preamble(document, [preamble])
document.header[i] = "\\font_typewriter default"
def revert_ipadeco(document):
" Revert IPA decorations to ERT "
i = 0
while True:
i = find_token(document.body, "\\begin_inset IPADeco", i)
if i == -1:
return
end = find_end_of_inset(document.body, i)
if end == -1:
document.warning("Can't find end of inset at line " + str(i))
i += 1
continue
line = document.body[i]
rx = re.compile(r'\\begin_inset IPADeco (.*)$')
m = rx.match(line)
decotype = m.group(1)
if decotype != "toptiebar" and decotype != "bottomtiebar":
document.warning("Invalid IPADeco type: " + decotype)
i = end
continue
blay = find_token(document.body, "\\begin_layout Plain Layout", i, end)
if blay == -1:
document.warning("Can't find layout for inset at line " + str(i))
i = end
continue
bend = find_end_of_layout(document.body, blay)
if bend == -1:
document.warning("Malformed LyX document: Could not find end of IPADeco inset's layout.")
i = end
continue
substi = ["\\begin_inset ERT", "status collapsed", "",
"\\begin_layout Plain Layout", "", "", "\\backslash",
decotype + "{", "\\end_layout", "", "\\end_inset"]
substj = ["\\size default", "", "\\begin_inset ERT", "status collapsed", "",
"\\begin_layout Plain Layout", "", "}", "\\end_layout", "", "\\end_inset"]
# do the later one first so as not to mess up the numbering
document.body[bend:end + 1] = substj
document.body[i:blay + 1] = substi
i = end + len(substi) + len(substj) - (end - bend) - (blay - i) - 2
add_to_preamble(document, "\\usepackage{tipa}")
def revert_ipachar(document):
' Revert \\IPAChar to ERT '
i = 0
found = False
while i < len(document.body):
m = re.match(r'(.*)\\IPAChar \\(\w+\{\w+\})(.*)', document.body[i])
if m:
found = True
before = m.group(1)
ipachar = m.group(2)
after = m.group(3)
subst = [before,
'\\begin_inset ERT',
'status collapsed', '',
'\\begin_layout Standard',
'', '', '\\backslash',
ipachar,
'\\end_layout', '',
'\\end_inset', '',
after]
document.body[i: i+1] = subst
i = i + len(subst)
else:
i = i + 1
if found:
add_to_preamble(document, "\\usepackage{tone}")
2012-09-19 15:46:55 +00:00
def revert_minionpro(document):
" Revert native MinionPro font definition to LaTeX "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_roman minionpro", 0)
if i != -1:
osf = False
j = find_token(document.header, "\\font_osf true", 0)
if j != -1:
osf = True
preamble = "\\usepackage"
if osf:
document.header[j] = "\\font_osf false"
else:
preamble += "[lf]"
preamble += "{MinionPro}"
add_to_preamble(document, [preamble])
document.header[i] = "\\font_roman default"
def revert_mathfonts(document):
" Revert native math font definitions to LaTeX "
i = find_token(document.header, "\\font_math", 0)
if i == -1:
return
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
val = get_value(document.header, "\\font_math", i)
if val == "eulervm":
add_to_preamble(document, "\\usepackage{eulervm}")
elif val == "default":
mathfont_dict = {
"lmodern": "\\renewcommand{\\rmdefault}{lmr}",
"minionpro": "\\usepackage[onlytext,lf]{MinionPro}",
"minionpro-osf": "\\usepackage[onlytext]{MinionPro}",
"palatino": "\\renewcommand{\\rmdefault}{ppl}",
"palatino-osf": "\\renewcommand{\\rmdefault}{pplj}",
"times": "\\renewcommand{\\rmdefault}{ptm}",
"utopia": "\\renewcommand{\\rmdefault}{futs}",
"utopia-osf": "\\renewcommand{\\rmdefault}{futj}",
}
j = find_token(document.header, "\\font_roman", 0)
if j != -1:
rm = get_value(document.header, "\\font_roman", j)
k = find_token(document.header, "\\font_osf true", 0)
if k != -1:
rm += "-osf"
if rm in mathfont_dict.keys():
add_to_preamble(document, mathfont_dict[rm])
document.header[j] = "\\font_roman default"
if k != -1:
document.header[k] = "\\font_osf false"
del document.header[i]
def revert_mdnomath(document):
" Revert mathdesign and fourier without math "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
mathdesign_dict = {
"md-charter": "mdbch",
"md-utopia": "mdput",
"md-garamond": "mdugm"
}
i = find_token(document.header, "\\font_roman", 0)
if i == -1:
return
val = get_value(document.header, "\\font_roman", i)
if val in mathdesign_dict.keys():
j = find_token(document.header, "\\font_math", 0)
if j == -1:
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
mval = get_value(document.header, "\\font_math", j)
if mval == "default":
document.header[i] = "\\font_roman default"
add_to_preamble(document, "\\renewcommand{\\rmdefault}{%s}" % mathdesign_dict[val])
else:
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
def convert_mdnomath(document):
" Change mathdesign font name "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
mathdesign_dict = {
"mdbch": "md-charter",
"mdput": "md-utopia",
"mdugm": "md-garamond"
}
i = find_token(document.header, "\\font_roman", 0)
if i == -1:
return
val = get_value(document.header, "\\font_roman", i)
if val in mathdesign_dict.keys():
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
def revert_newtxmath(document):
" Revert native newtxmath definitions to LaTeX "
i = find_token(document.header, "\\font_math", 0)
if i == -1:
return
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
val = get_value(document.header, "\\font_math", i)
mathfont_dict = {
"libertine-ntxm": "\\usepackage[libertine]{newtxmath}",
"minion-ntxm": "\\usepackage[minion]{newtxmath}",
"newtxmath": "\\usepackage{newtxmath}",
}
if val in mathfont_dict.keys():
add_to_preamble(document, mathfont_dict[val])
document.header[i] = "\\font_math auto"
def revert_biolinum(document):
" Revert native biolinum font definition to LaTeX "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_sans biolinum", 0)
if i != -1:
osf = False
j = find_token(document.header, "\\font_osf true", 0)
if j != -1:
osf = True
preamble = "\\usepackage"
if not osf:
preamble += "[lf]"
preamble += "{biolinum-type1}"
add_to_preamble(document, [preamble])
document.header[i] = "\\font_sans default"
def revert_uop(document):
" Revert native URW Classico (Optima) font definition to LaTeX "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_sans uop", 0)
if i != -1:
preamble = "\\renewcommand{\\sfdefault}{uop}"
add_to_preamble(document, [preamble])
document.header[i] = "\\font_sans default"
def convert_latexargs(document):
" Convert InsetArgument to new syntax "
i = 0
while True:
i = find_token(document.body, "\\begin_inset Argument", i)
if i == -1:
return
2012-11-20 11:57:57 +00:00
# We cannot do more here since we have no access to the layout.
# InsetArgument itself will do the real work
2012-11-20 11:57:57 +00:00
# (see InsetArgument::updateBuffer())
document.body[i] = "\\begin_inset Argument 999"
i = i + 1
def revert_latexargs(document):
" Revert InsetArgument to old syntax "
# FIXME: This method does not revert correctly (it does
# not reorder the arguments)
# What needs to be done is this:
# * find all arguments in a paragraph and reorder them
2012-11-20 11:57:57 +00:00
# according to their ID (which is deleted)
# So: \\begin_inset Argument 2 ... \\begin_inset Argument 1
# => \\begin_inset Argument ... \\begin_inset Argument
# with correct order.
i = 0
while True:
i = find_token(document.body, "\\begin_inset Argument", i)
if i == -1:
return
# Convert the syntax so that LyX 2.0 can at least open this
document.body[i] = "\\begin_inset Argument"
i = i + 1
def revert_Argument_to_TeX_brace(document, line, n, nmax, environment):
'''
Reverts an InsetArgument to TeX-code
usage:
revert_Argument_to_TeX_brace(document, LineOfBeginLayout, StartArgument, EndArgument, isEnvironment)
LineOfBeginLayout is the line of the \begin_layout statement
StartArgument is the number of the first argument that needs to be converted
EndArgument is the number of the last argument that needs to be converted or the last defined one
isEnvironment must be true, if the layout id for a LaTeX environment
'''
lineArg = 0
while lineArg != -1 and n < nmax + 1:
lineArg = find_token(document.body, "\\begin_inset Argument " + str(n), line)
if lineArg != -1:
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
endLayout = find_token(document.body, "\\end_layout", beginPlain)
endInset = find_token(document.body, "\\end_inset", endLayout)
if environment == False:
document.body[endLayout : endInset + 1] = put_cmd_in_ert("}{")
del(document.body[lineArg : beginPlain + 1])
else:
document.body[endLayout : endInset + 1] = put_cmd_in_ert("}")
document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("{")
n = n + 1
def revert_IEEEtran(document):
'''
Reverts InsetArgument of
Page headings
Biography
Biography without photo
to TeX-code
'''
if document.textclass == "IEEEtran":
i = 0
j = 0
k = 0
while True:
if i != -1:
i = find_token(document.body, "\\begin_layout Page headings", i)
if i != -1:
revert_Argument_to_TeX_brace(document, i, 1, 1, False)
i = i + 1
if j != -1:
j = find_token(document.body, "\\begin_layout Biography without photo", j)
if j != -1:
revert_Argument_to_TeX_brace(document, j, 1, 1, True)
j = j + 1
if k != -1:
k = find_token(document.body, "\\begin_layout Biography", k)
kA = find_token(document.body, "\\begin_layout Biography without photo", k)
if k == kA and k != -1:
k = k + 1
continue
if k != -1:
# start with the second argument, therefore 2
revert_Argument_to_TeX_brace(document, k, 2, 2, True)
k = k + 1
if i == -1 and j == -1 and k == -1:
return
def convert_Argument_to_TeX_brace(document, line, n, nmax, environment):
'''
Converts TeX code to an InsetArgument
!!! Be careful if the braces are different in your case as exppected here:
- }{ separates mandatory arguments of commands
- { and } surround a mandatory argument of an environment
usage:
convert_Argument_to_TeX_brace(document, LineOfBeginLayout, StartArgument, EndArgument, isEnvironment)
LineOfBeginLayout is the line of the \begin_layout statement
StartArgument is the number of the first ERT that needs to be converted
EndArgument is the number of the last ERT that needs to be converted
isEnvironment must be true, if the layout id for a LaTeX environment
Notes:
- this routine will fail if the user has additional TeX-braces (there is nothing we can do)
- this routine can currently handle only one mandatory argument of environments
Todo:
- support the case that }{ is in the file in 2 separate ERTs
'''
lineArg = line
while lineArg != -1 and n < nmax + 1:
lineArg = find_token(document.body, "\\begin_inset ERT", lineArg)
if environment == False and lineArg != -1:
bracePair = find_token(document.body, "}{", lineArg)
# assure that the "}{" is in this ERT (5 is or files saved with LyX 2.0, 4 for files exported by LyX 2.1)
if bracePair == lineArg + 5 or bracePair == lineArg + 4:
end = find_token(document.body, "\\end_inset", bracePair)
document.body[lineArg : end + 1] = ["\\end_layout", "", "\\end_inset"]
document.body[line + 1 : line + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
n = n + 1
else:
lineArg = lineArg + 1
if environment == True and lineArg != -1:
opening = find_token(document.body, "{", lineArg)
if opening == lineArg + 5 or opening == lineArg + 4: # assure that the "{" is in this ERT
end = find_token(document.body, "\\end_inset", opening)
document.body[lineArg : end + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
n = n + 1
lineArg2 = find_token(document.body, "\\begin_inset ERT", lineArg)
closing = find_token(document.body, "}", lineArg2)
if closing == lineArg2 + 5 or closing == lineArg2 + 4: # assure that the "}" is in this ERT
end2 = find_token(document.body, "\\end_inset", closing)
document.body[lineArg2 : end2 + 1] = ["\\end_layout", "", "\\end_inset"]
else:
lineArg = lineArg + 1
def convert_IEEEtran(document):
'''
Converts ERT of
Page headings
Biography
Biography without photo
to InsetArgument
'''
if document.textclass == "IEEEtran":
i = 0
j = 0
k = 0
while True:
if i != -1:
i = find_token(document.body, "\\begin_layout Page headings", i)
if i != -1:
convert_Argument_to_TeX_brace(document, i, 1, 1, False)
i = i + 1
if j != -1:
j = find_token(document.body, "\\begin_layout Biography without photo", j)
if j != -1:
convert_Argument_to_TeX_brace(document, j, 1, 1, True)
j = j + 1
if k != -1:
# assure that we don't handle Biography Biography without photo
k = find_token(document.body, "\\begin_layout Biography", k)
kA = find_token(document.body, "\\begin_layout Biography without photo", k - 1)
if k == kA and k != -1:
k = k + 1
continue
if k != -1:
# the argument we want to convert is the second one
convert_Argument_to_TeX_brace(document, k, 2, 2, True)
k = k + 1
if i == -1 and j == -1 and k == -1:
return
def revert_AASTeX(document):
" Reverts InsetArgument of Altaffilation to TeX-code "
if document.textclass == "aastex":
i = 0
while True:
if i != -1:
i = find_token(document.body, "\\begin_layout Altaffilation", i)
if i != -1:
revert_Argument_to_TeX_brace(document, i, 1, 1, False)
i = i + 1
if i == -1:
return
def convert_AASTeX(document):
" Converts ERT of Altaffilation to InsetArgument "
if document.textclass == "aastex":
i = 0
while True:
if i != -1:
i = find_token(document.body, "\\begin_layout Altaffilation", i)
if i != -1:
convert_Argument_to_TeX_brace(document, i, 1, 1, False)
i = i + 1
if i == -1:
return
2012-11-26 03:21:23 +00:00
def revert_AGUTeX(document):
" Reverts InsetArgument of Author affiliation to TeX-code "
if document.textclass == "agutex":
2012-11-26 03:21:23 +00:00
i = 0
while True:
if i != -1:
i = find_token(document.body, "\\begin_layout Author affiliation", i)
if i != -1:
revert_Argument_to_TeX_brace(document, i, 1, 1, False)
i = i + 1
if i == -1:
return
def convert_AGUTeX(document):
" Converts ERT of Author affiliation to InsetArgument "
if document.textclass == "agutex":
2012-11-26 03:21:23 +00:00
i = 0
while True:
if i != -1:
i = find_token(document.body, "\\begin_layout Author affiliation", i)
if i != -1:
convert_Argument_to_TeX_brace(document, i, 1, 1, False)
i = i + 1
if i == -1:
return
def revert_IJMP(document):
" Reverts InsetArgument of MarkBoth to TeX-code "
if document.textclass == "ijmpc" or document.textclass == "ijmpd":
i = 0
while True:
if i != -1:
i = find_token(document.body, "\\begin_layout MarkBoth", i)
if i != -1:
revert_Argument_to_TeX_brace(document, i, 1, 1, False)
i = i + 1
if i == -1:
return
def convert_IJMP(document):
" Converts ERT of MarkBoth to InsetArgument "
if document.textclass == "ijmpc" or document.textclass == "ijmpd":
i = 0
while True:
if i != -1:
i = find_token(document.body, "\\begin_layout MarkBoth", i)
if i != -1:
convert_Argument_to_TeX_brace(document, i, 1, 1, False)
i = i + 1
if i == -1:
return
def revert_literate(document):
" Revert Literate document to old format "
if del_token(document.header, "noweb", 0):
document.textclass = "literate-" + document.textclass
i = 0
while True:
i = find_token(document.body, "\\begin_layout Chunk", i)
if i == -1:
break
document.body[i] = "\\begin_layout Scrap"
i = i + 1
def convert_literate(document):
" Convert Literate document to new format"
i = find_token(document.header, "\\textclass", 0)
if (i != -1) and "literate-" in document.header[i]:
document.textclass = document.header[i].replace("\\textclass literate-", "")
j = find_token(document.header, "\\begin_modules", 0)
if (j != -1):
document.header.insert(j + 1, "noweb")
else:
document.header.insert(i + 1, "\\end_modules")
document.header.insert(i + 1, "noweb")
document.header.insert(i + 1, "\\begin_modules")
i = 0
while True:
i = find_token(document.body, "\\begin_layout Scrap", i)
if i == -1:
break
document.body[i] = "\\begin_layout Chunk"
i = i + 1
##
# Conversion hub
#
supported_versions = ["2.1.0","2.1"]
convert = [
[414, []],
[415, [convert_undertilde]],
[416, []],
[417, [convert_japanese_encodings]],
[418, []],
[419, []],
[420, [convert_biblio_style]],
[421, [convert_longtable_captions]],
[422, [convert_use_packages]],
[423, [convert_use_mathtools]],
[424, [convert_cite_engine_type]],
[425, []],
[426, []],
[427, []],
[428, [convert_cell_rotation]],
2012-04-16 19:40:59 +00:00
[429, [convert_table_rotation]],
[430, [convert_listoflistings]],
[431, [convert_use_amssymb]],
[432, []],
2012-08-17 12:11:02 +00:00
[433, [convert_armenian]],
2012-08-17 16:24:18 +00:00
[434, []],
[435, []],
2012-08-19 09:57:48 +00:00
[436, []],
[437, []],
2012-09-19 15:46:55 +00:00
[438, []],
[439, []],
[440, []],
[441, [convert_mdnomath]],
[442, []],
[443, []],
[444, []],
[445, []],
[446, [convert_latexargs]],
[447, [convert_IEEEtran, convert_AASTeX, convert_AGUTeX, convert_IJMP]],
[448, [convert_literate]]
]
revert = [
[447, [revert_literate]],
[446, [revert_IEEEtran, revert_AASTeX, revert_AGUTeX, revert_IJMP]],
[445, [revert_latexargs]],
[444, [revert_uop]],
[443, [revert_biolinum]],
[442, []],
[441, [revert_newtxmath]],
[440, [revert_mdnomath]],
[439, [revert_mathfonts]],
2012-09-19 15:46:55 +00:00
[438, [revert_minionpro]],
[437, [revert_ipadeco, revert_ipachar]],
[436, [revert_texgyre]],
[435, [revert_mathdesign]],
2012-08-17 16:24:18 +00:00
[434, [revert_txtt]],
2012-08-17 12:11:02 +00:00
[433, [revert_libertine]],
[432, [revert_armenian]],
[431, [revert_languages, revert_ancientgreek]],
[430, [revert_use_amssymb]],
2012-04-16 19:40:59 +00:00
[429, [revert_listoflistings]],
[428, [revert_table_rotation]],
[427, [revert_cell_rotation]],
[426, [revert_tipa]],
[425, [revert_verbatim]],
[424, [revert_cancel]],
[423, [revert_cite_engine_type]],
[422, [revert_use_mathtools]],
[421, [revert_use_packages]],
[420, [revert_longtable_captions]],
[419, [revert_biblio_style]],
[418, [revert_australian]],
[417, [revert_justification]],
[416, [revert_japanese_encodings]],
[415, [revert_negative_space, revert_math_spaces]],
[414, [revert_undertilde]],
[413, [revert_visible_space]]
]
if __name__ == "__main__":
pass