Update lyx2lyx from 2.2.0. This is in preparation for the 2.1.5 release.

This commit is contained in:
Richard Heck 2016-06-01 12:31:48 -04:00
parent dd2aef2c33
commit 4eb3ed96e5
15 changed files with 354 additions and 186 deletions

View File

@ -14,4 +14,10 @@ if (UNIX)
# include(../PyCompile) # include(../PyCompile)
endif() endif()
set(_testname "lyx2lyx/parser_tools")
add_test(NAME ${_testname}
COMMAND ${LYX_PYTHON_EXECUTABLE} "${TOP_SRC_DIR}/lib/lyx2lyx/test_parser_tools.py"
)
settestlabel(${_testname} "lyx2lyx")
install(PROGRAMS ${TOP_SRC_DIR}/lib/lyx2lyx/lyx2lyx DESTINATION ${LYX_DATA_SUBDIR}lyx2lyx) install(PROGRAMS ${TOP_SRC_DIR}/lib/lyx2lyx/lyx2lyx DESTINATION ${LYX_DATA_SUBDIR}lyx2lyx)

View File

@ -1,6 +1,6 @@
# This file is part of lyx2lyx # This file is part of lyx2lyx
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# Copyright (C) 2002-2011 The LyX Team # Copyright (C) 2002-2015 The LyX Team
# Copyright (C) 2002-2004 Dekel Tsur <dekel@lyx.org> # Copyright (C) 2002-2004 Dekel Tsur <dekel@lyx.org>
# Copyright (C) 2002-2006 José Matos <jamatos@lyx.org> # Copyright (C) 2002-2006 José Matos <jamatos@lyx.org>
# #
@ -33,7 +33,7 @@ try:
import lyx2lyx_version import lyx2lyx_version
version__ = lyx2lyx_version.version version__ = lyx2lyx_version.version
except: # we are running from build directory so assume the last version except: # we are running from build directory so assume the last version
version__ = '2.1' version__ = '2.2'
default_debug__ = 2 default_debug__ = 2
@ -64,6 +64,7 @@ def minor_versions(major, last_minor_version):
format_re = re.compile(r"(\d)[\.,]?(\d\d)") format_re = re.compile(r"(\d)[\.,]?(\d\d)")
fileformat = re.compile(r"\\lyxformat\s*(\S*)") fileformat = re.compile(r"\\lyxformat\s*(\S*)")
original_version = re.compile(r".*?LyX ([\d.]*)") original_version = re.compile(r".*?LyX ([\d.]*)")
original_tex2lyx_version = re.compile(r".*?tex2lyx ([\d.]*)")
## ##
# file format information: # file format information:
@ -79,11 +80,13 @@ format_relation = [("0_06", [200], minor_versions("0.6" , 4)),
("1_1_6_3", [218], ["1.1", "1.1.6.3","1.1.6.4"]), ("1_1_6_3", [218], ["1.1", "1.1.6.3","1.1.6.4"]),
("1_2", [220], minor_versions("1.2" , 4)), ("1_2", [220], minor_versions("1.2" , 4)),
("1_3", [221], minor_versions("1.3" , 7)), ("1_3", [221], minor_versions("1.3" , 7)),
("1_4", range(222,246), minor_versions("1.4" , 5)), # Note that range(i,j) is up to j *excluded*.
("1_5", range(246,277), minor_versions("1.5" , 7)), ("1_4", list(range(222,246)), minor_versions("1.4" , 5)),
("1_6", range(277,346), minor_versions("1.6" , 10)), ("1_5", list(range(246,277)), minor_versions("1.5" , 7)),
("2_0", range(346,414), minor_versions("2.0", 8)), ("1_6", list(range(277,346)), minor_versions("1.6" , 10)),
("2_1", range(414,475), minor_versions("2.1", 0)) ("2_0", list(range(346,414)), minor_versions("2.0" , 8)),
("2_1", list(range(414,475)), minor_versions("2.1" , 5)),
("2_2", list(range(475,509)), minor_versions("2.2" , 0))
] ]
#################################################################### ####################################################################
@ -186,7 +189,8 @@ class LyX_base:
def __init__(self, end_format = 0, input = "", output = "", error = "", def __init__(self, end_format = 0, input = "", output = "", error = "",
debug = default_debug__, try_hard = 0, cjk_encoding = '', debug = default_debug__, try_hard = 0, cjk_encoding = '',
final_version = "", language = "english", encoding = "auto"): final_version = "", systemlyxdir = '', language = "english",
encoding = "auto"):
"""Arguments: """Arguments:
end_format: final format that the file should be converted. (integer) end_format: final format that the file should be converted. (integer)
@ -251,6 +255,7 @@ class LyX_base:
self.status = 0 self.status = 0
self.encoding = encoding self.encoding = encoding
self.language = language self.language = language
self.systemlyxdir = systemlyxdir
def warning(self, message, debug_level= default_debug__): def warning(self, message, debug_level= default_debug__):
@ -415,12 +420,16 @@ class LyX_base:
return None return None
line = line.replace("fix",".") line = line.replace("fix",".")
# need to test original_tex2lyx_version first because tex2lyx
# writes "#LyX file created by tex2lyx 2.2"
result = original_tex2lyx_version.match(line)
if not result:
result = original_version.match(line) result = original_version.match(line)
if result: if result:
# Special know cases: reLyX and KLyX # Special know cases: reLyX and KLyX
if line.find("reLyX") != -1 or line.find("KLyX") != -1: if line.find("reLyX") != -1 or line.find("KLyX") != -1:
return "0.12" return "0.12"
if result:
res = result.group(1) res = result.group(1)
if not res: if not res:
self.warning(line) self.warning(line)
@ -547,6 +556,11 @@ class LyX_base:
def convert(self): def convert(self):
"Convert from current (self.format) to self.end_format." "Convert from current (self.format) to self.end_format."
if self.format == self.end_format:
self.warning("No conversion needed: Target format %s "
"same as current format!" % self.format, default_debug__)
return
mode, conversion_chain = self.chain() mode, conversion_chain = self.chain()
self.warning("conversion chain: " + str(conversion_chain), 3) self.warning("conversion chain: " + str(conversion_chain), 3)
@ -722,9 +736,10 @@ class File(LyX_base):
def __init__(self, end_format = 0, input = "", output = "", error = "", def __init__(self, end_format = 0, input = "", output = "", error = "",
debug = default_debug__, try_hard = 0, cjk_encoding = '', debug = default_debug__, try_hard = 0, cjk_encoding = '',
final_version = ''): final_version = '', systemlyxdir = ''):
LyX_base.__init__(self, end_format, input, output, error, LyX_base.__init__(self, end_format, input, output, error,
debug, try_hard, cjk_encoding, final_version) debug, try_hard, cjk_encoding, final_version,
systemlyxdir)
self.read() self.read()

View File

@ -1,8 +1,8 @@
include $(top_srcdir)/config/common.am include $(top_srcdir)/config/common.am
CLEANFILES += *.pyc *.pyo CLEANFILES = *.pyc *.pyo
EXTRA_DIST = lyx2lyx_version.py.in EXTRA_DIST = lyx2lyx_version.py.in test_parser_tools.py CMakeLists.txt
CHMOD = chmod CHMOD = chmod
@ -32,8 +32,20 @@ dist_lyx2lyx_PYTHON = \
lyx_1_6.py \ lyx_1_6.py \
lyx_2_0.py \ lyx_2_0.py \
lyx_2_1.py \ lyx_2_1.py \
lyx_2_2.py \
profiling.py \ profiling.py \
test_parser_tools.py test_parser_tools.py
install-data-hook: install-data-hook:
$(CHMOD) 755 $(DESTDIR)$(lyx2lyxdir)/lyx2lyx $(CHMOD) 755 $(DESTDIR)$(lyx2lyxdir)/lyx2lyx
alltests: check alltests-recursive
alltests-recursive:
@$(PYTHON) "$(srcdir)/test_parser_tools.py"; \
if test $$? -eq 0; then \
echo -e "=====================\nlyx2lyx tests passed.\n====================="; \
else \
echo -e "=====================\nlyx2lyx tests failed.\n====================="; \
fi

View File

@ -19,6 +19,7 @@
""" This module parses lib/languages and prints it as a python """ This module parses lib/languages and prints it as a python
dictionary, ready to use by other python modules""" dictionary, ready to use by other python modules"""
from __future__ import print_function
import pprint import pprint
def parse_line(line): def parse_line(line):
@ -55,8 +56,8 @@ if __name__ == '__main__':
lang[tmp[0]] = tmp[1:] lang[tmp[0]] = tmp[1:]
print "# This file is generated by generate_incoding_info.py from lib/languages file." print ("# This file is generated by generate_incoding_info.py from lib/languages file.")
print "# Do not change this file directly." print ("# Do not change this file directly.")
print print ()
print "lang = ", print ("lang = ", end = " ")
pprint.pprint(lang) pprint.pprint(lang)

View File

@ -63,6 +63,8 @@ Copyright (C) 2011 The LyX Team, José Matos and Dekel Tsur""" % LyX.version__
help = "list all available formats and supported versions") help = "list all available formats and supported versions")
parser.add_option("-n", "--try-hard", action="store_true", parser.add_option("-n", "--try-hard", action="store_true",
help = "try hard (ignore any convertion errors)") help = "try hard (ignore any convertion errors)")
parser.add_option("-s", "--systemlyxdir", dest= "systemlyxdir",
help= "LyX system directory for conversion from version 489 or older")
(options, args) = parser.parse_args() (options, args) = parser.parse_args()
if args: if args:

View File

@ -17,7 +17,7 @@
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
''' '''
This modules offer several free functions to help with lyx2lyx'ing. This module offers several free functions to help with lyx2lyx'ing.
More documentaton is below, but here is a quick guide to what More documentaton is below, but here is a quick guide to what
they do. Optional arguments are marked by brackets. they do. Optional arguments are marked by brackets.
@ -47,18 +47,36 @@ put_cmd_in_ert(arg):
ert = put_cmd_in_ert(content) ert = put_cmd_in_ert(content)
document.body[i:j+1] = ert document.body[i:j+1] = ert
get_ert(lines, i[, verbatim]):
Here, lines is a list of lines of LyX material containing an ERT inset,
whose content we want to convert to LaTeX. The ERT starts at index i.
If the optional (by default: False) bool verbatim is True, the content
of the ERT is returned verbatim, that is in LyX syntax (not LaTeX syntax)
for the use in verbatim insets.
lyx2latex(document, lines): lyx2latex(document, lines):
Here, lines is a list of lines of LyX material we want to convert Here, lines is a list of lines of LyX material we want to convert
to LaTeX. We do the best we can and return a string containing to LaTeX. We do the best we can and return a string containing
the translated material. the translated material.
lyx2verbatim(document, lines):
Here, lines is a list of lines of LyX material we want to convert
to verbatim material (used in ERT an the like). We do the best we
can and return a string containing the translated material.
latex_length(slen): latex_length(slen):
Convert lengths (in LyX form) to their LaTeX representation. Returns Convert lengths (in LyX form) to their LaTeX representation. Returns
(bool, length), where the bool tells us if it was a percentage, and (bool, length), where the bool tells us if it was a percentage, and
the length is the LaTeX representation. the length is the LaTeX representation.
convert_info_insets(document, type, func):
Applies func to the argument of all info insets matching certain types
type : the type to match. This can be a regular expression.
func : function from string to string to apply to the "arg" field of
the info insets.
''' '''
import re
import string import string
from parser_tools import find_token, find_end_of_inset from parser_tools import find_token, find_end_of_inset
from unicode_symbols import unicode_reps from unicode_symbols import unicode_reps
@ -125,14 +143,14 @@ def put_cmd_in_ert(arg):
else: else:
s = arg s = arg
for rep in unicode_reps: for rep in unicode_reps:
s = s.replace(rep[1], rep[0].replace('\\\\', '\\')) s = s.replace(rep[1], rep[0])
s = s.replace('\\', "\\backslash\n") s = s.replace('\\', "\\backslash\n")
ret += s.splitlines() ret += s.splitlines()
ret += ["\\end_layout", "", "\\end_inset"] ret += ["\\end_layout", "", "\\end_inset"]
return ret return ret
def get_ert(lines, i): def get_ert(lines, i, verbatim = False):
'Convert an ERT inset into LaTeX.' 'Convert an ERT inset into LaTeX.'
if not lines[i].startswith("\\begin_inset ERT"): if not lines[i].startswith("\\begin_inset ERT"):
return "" return ""
@ -156,6 +174,9 @@ def get_ert(lines, i):
while i + 1 < j and lines[i+1] == "": while i + 1 < j and lines[i+1] == "":
i = i + 1 i = i + 1
elif lines[i] == "\\backslash": elif lines[i] == "\\backslash":
if verbatim:
ret = ret + "\n" + lines[i] + "\n"
else:
ret = ret + "\\" ret = ret + "\\"
else: else:
ret = ret + lines[i] ret = ret + lines[i]
@ -204,6 +225,10 @@ def lyx2latex(document, lines):
line = "''" line = "''"
else: else:
line = "'" line = "'"
elif line.startswith("\\begin_inset Newline newline"):
line = "\\\\ "
elif line.startswith("\\noindent"):
line = "\\noindent " # we need the space behind the command
elif line.startswith("\\begin_inset space"): elif line.startswith("\\begin_inset space"):
line = line[18:].strip() line = line[18:].strip()
if line.startswith("\\hspace"): if line.startswith("\\hspace"):
@ -250,7 +275,7 @@ def lyx2latex(document, lines):
# Do the LyX text --> LaTeX conversion # Do the LyX text --> LaTeX conversion
for rep in unicode_reps: for rep in unicode_reps:
line = line.replace(rep[1], rep[0] + "{}") line = line.replace(rep[1], rep[0])
line = line.replace(r'\backslash', r'\textbackslash{}') line = line.replace(r'\backslash', r'\textbackslash{}')
line = line.replace(r'\series bold', r'\bfseries{}').replace(r'\series default', r'\mdseries{}') line = line.replace(r'\series bold', r'\bfseries{}').replace(r'\series default', r'\mdseries{}')
line = line.replace(r'\shape italic', r'\itshape{}').replace(r'\shape smallcaps', r'\scshape{}') line = line.replace(r'\shape italic', r'\itshape{}').replace(r'\shape smallcaps', r'\scshape{}')
@ -265,6 +290,15 @@ def lyx2latex(document, lines):
return content return content
def lyx2verbatim(document, lines):
'Convert some LyX stuff into corresponding verbatim stuff, as best we can.'
content = lyx2latex(document, lines)
content = re.sub(r'\\(?!backslash)', r'\n\\backslash\n', content)
return content
def latex_length(slen): def latex_length(slen):
''' '''
Convert lengths to their LaTeX representation. Returns (bool, length), Convert lengths to their LaTeX representation. Returns (bool, length),
@ -283,7 +317,7 @@ def latex_length(slen):
units = {"text%":"\\textwidth", "col%":"\\columnwidth", units = {"text%":"\\textwidth", "col%":"\\columnwidth",
"page%":"\\paperwidth", "line%":"\\linewidth", "page%":"\\paperwidth", "line%":"\\linewidth",
"theight%":"\\textheight", "pheight%":"\\paperheight"} "theight%":"\\textheight", "pheight%":"\\paperheight"}
for unit in units.keys(): for unit in list(units.keys()):
i = slen.find(unit) i = slen.find(unit)
if i == -1: if i == -1:
continue continue
@ -319,6 +353,44 @@ def latex_length(slen):
return (percent, slen) return (percent, slen)
def length_in_bp(length):
" Convert a length in LyX format to its value in bp units "
em_width = 10.0 / 72.27 # assume 10pt font size
text_width = 8.27 / 1.7 # assume A4 with default margins
# scale factors are taken from Length::inInch()
scales = {"bp" : 1.0,
"cc" : (72.0 / (72.27 / (12.0 * 0.376 * 2.845))),
"cm" : (72.0 / 2.54),
"dd" : (72.0 / (72.27 / (0.376 * 2.845))),
"em" : (72.0 * em_width),
"ex" : (72.0 * em_width * 0.4305),
"in" : 72.0,
"mm" : (72.0 / 25.4),
"mu" : (72.0 * em_width / 18.0),
"pc" : (72.0 / (72.27 / 12.0)),
"pt" : (72.0 / (72.27)),
"sp" : (72.0 / (72.27 * 65536.0)),
"text%" : (72.0 * text_width / 100.0),
"col%" : (72.0 * text_width / 100.0), # assume 1 column
"page%" : (72.0 * text_width * 1.7 / 100.0),
"line%" : (72.0 * text_width / 100.0),
"theight%" : (72.0 * text_width * 1.787 / 100.0),
"pheight%" : (72.0 * text_width * 2.2 / 100.0)}
rx = re.compile(r'^\s*([^a-zA-Z%]+)([a-zA-Z%]+)\s*$')
m = rx.match(length)
if not m:
document.warning("Invalid length value: " + length + ".")
return 0
value = m.group(1)
unit = m.group(2)
if not unit in scales.keys():
document.warning("Unknown length unit: " + unit + ".")
return value
return "%g" % (float(value) * scales[unit])
def revert_flex_inset(lines, name, LaTeXname): def revert_flex_inset(lines, name, LaTeXname):
" Convert flex insets to TeX code " " Convert flex insets to TeX code "
i = 0 i = 0
@ -436,3 +508,21 @@ def str2bool(s):
"'true' goes to True, case-insensitively, and we strip whitespace." "'true' goes to True, case-insensitively, and we strip whitespace."
s = s.strip().lower() s = s.strip().lower()
return s == "true" return s == "true"
def convert_info_insets(document, type, func):
"Convert info insets matching type using func."
i = 0
type_re = re.compile(r'^type\s+"(%s)"$' % type)
arg_re = re.compile(r'^arg\s+"(.*)"$')
while True:
i = find_token(document.body, "\\begin_inset Info", i)
if i == -1:
return
t = type_re.match(document.body[i + 1])
if t:
arg = arg_re.match(document.body[i + 2])
if arg:
new_arg = func(arg.group(1))
document.body[i + 2] = 'arg "%s"' % new_arg
i += 3

View File

@ -154,7 +154,7 @@ def remove_oldfloat(document):
j = find_token(lines, "\\end_float", i+1) j = find_token(lines, "\\end_float", i+1)
floattype = lines[i].split()[1] floattype = lines[i].split()[1]
if not floats.has_key(floattype): if floattype not in floats:
document.warning("Error! Unknown float type " + floattype) document.warning("Error! Unknown float type " + floattype)
floattype = "fig" floattype = "fig"
@ -284,7 +284,7 @@ def remove_pextra(document):
def is_empty(lines): def is_empty(lines):
" Are all the lines empty?" " Are all the lines empty?"
return filter(is_nonempty_line, lines) == [] return list(filter(is_nonempty_line, lines)) == []
move_rexp = re.compile(r"\\(family|series|shape|size|emph|numeric|bar|noun|end_deeper)") move_rexp = re.compile(r"\\(family|series|shape|size|emph|numeric|bar|noun|end_deeper)")
@ -358,7 +358,7 @@ def remove_oldert(document):
tmp.append(line) tmp.append(line)
if is_empty(tmp): if is_empty(tmp):
if filter(lambda x:x != "", tmp) != []: if [x for x in tmp if x != ""] != []:
if new == []: if new == []:
# This is not necessary, but we want the output to be # This is not necessary, but we want the output to be
# as similar as posible to the lyx format # as similar as posible to the lyx format

View File

@ -1495,7 +1495,7 @@ def convert_len(len, special):
len = '%f\\' % len2value(len) + special len = '%f\\' % len2value(len) + special
# Convert LyX units to LaTeX units # Convert LyX units to LaTeX units
for unit in units.keys(): for unit in list(units.keys()):
if len.find(unit) != -1: if len.find(unit) != -1:
len = '%f' % (len2value(len) / 100) + units[unit] len = '%f' % (len2value(len) / 100) + units[unit]
break break
@ -1571,7 +1571,7 @@ def convert_frameless_box(document):
'inner_pos':1, 'use_parbox':'0', 'width':'100col%', 'inner_pos':1, 'use_parbox':'0', 'width':'100col%',
'special':'none', 'height':'1in', 'special':'none', 'height':'1in',
'height_special':'totalheight', 'collapsed':'false'} 'height_special':'totalheight', 'collapsed':'false'}
for key in params.keys(): for key in list(params.keys()):
value = get_value(document.body, key, i, j).replace('"', '') value = get_value(document.body, key, i, j).replace('"', '')
if value != "": if value != "":
if key == 'position': if key == 'position':

View File

@ -26,6 +26,15 @@ import sys, os
from parser_tools import find_re, find_token, find_token_backwards, find_token_exact, find_tokens, find_end_of, get_value, find_beginning_of, find_nonempty_line from parser_tools import find_re, find_token, find_token_backwards, find_token_exact, find_tokens, find_end_of, get_value, find_beginning_of, find_nonempty_line
from LyX import get_encoding from LyX import get_encoding
# Provide support for both python 2 and 3
PY2 = sys.version_info[0] == 2
if not PY2:
text_type = str
unichr = chr
else:
text_type = unicode
unichr = unichr
# End of code to support for both python 2 and 3
#################################################################### ####################################################################
# Private helper functions # Private helper functions
@ -93,7 +102,7 @@ def convert_font_settings(document):
if font_scheme == '': if font_scheme == '':
document.warning("Malformed LyX document: Empty `\\fontscheme'.") document.warning("Malformed LyX document: Empty `\\fontscheme'.")
font_scheme = 'default' font_scheme = 'default'
if not font_scheme in roman_fonts.keys(): if not font_scheme in list(roman_fonts.keys()):
document.warning("Malformed LyX document: Unknown `\\fontscheme' `%s'." % font_scheme) document.warning("Malformed LyX document: Unknown `\\fontscheme' `%s'." % font_scheme)
font_scheme = 'default' font_scheme = 'default'
document.header[i:i+1] = ['\\font_roman %s' % roman_fonts[font_scheme], document.header[i:i+1] = ['\\font_roman %s' % roman_fonts[font_scheme],
@ -163,7 +172,7 @@ def revert_font_settings(document):
del document.header[i] del document.header[i]
if font_tt_scale != '100': if font_tt_scale != '100':
document.warning("Conversion of '\\font_tt_scale' not yet implemented.") document.warning("Conversion of '\\font_tt_scale' not yet implemented.")
for font_scheme in roman_fonts.keys(): for font_scheme in list(roman_fonts.keys()):
if (roman_fonts[font_scheme] == fonts['roman'] and if (roman_fonts[font_scheme] == fonts['roman'] and
sans_fonts[font_scheme] == fonts['sans'] and sans_fonts[font_scheme] == fonts['sans'] and
typewriter_fonts[font_scheme] == fonts['typewriter']): typewriter_fonts[font_scheme] == fonts['typewriter']):
@ -334,6 +343,7 @@ def revert_utf8(document):
convert_multiencoding(document, False) convert_multiencoding(document, False)
# FIXME: Use the version in unicode_symbols.py which has some bug fixes
def read_unicodesymbols(): def read_unicodesymbols():
" Read the unicodesymbols list of unicode characters and corresponding commands." " Read the unicodesymbols list of unicode characters and corresponding commands."
pathname = os.path.abspath(os.path.dirname(sys.argv[0])) pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
@ -376,7 +386,7 @@ def revert_unicode_line(document, i, insets, spec_chars, replacement_character =
last_char = character last_char = character
except: except:
# Try to replace with ERT/math inset # Try to replace with ERT/math inset
if spec_chars.has_key(character): if character in spec_chars:
command = spec_chars[character][0] # the command to replace unicode command = spec_chars[character][0] # the command to replace unicode
flag1 = spec_chars[character][1] flag1 = spec_chars[character][1]
flag2 = spec_chars[character][2] flag2 = spec_chars[character][2]
@ -1211,7 +1221,7 @@ def revert_accent(document):
try: try:
document.body[i] = normalize("NFD", document.body[i]) document.body[i] = normalize("NFD", document.body[i])
except TypeError: except TypeError:
document.body[i] = normalize("NFD", unicode(document.body[i], 'utf-8')) document.body[i] = normalize("NFD", text_type(document.body[i], 'utf-8'))
# Replace accented characters with InsetLaTeXAccent # Replace accented characters with InsetLaTeXAccent
# Do not convert characters that can be represented in the chosen # Do not convert characters that can be represented in the chosen
@ -1346,15 +1356,15 @@ def normalize_font_whitespace(document, char_properties):
# a new paragraph resets all font changes # a new paragraph resets all font changes
changes.clear() changes.clear()
# also reset the default language to be the paragraph's language # also reset the default language to be the paragraph's language
if "\\lang" in char_properties.keys(): if "\\lang" in list(char_properties.keys()):
char_properties["\\lang"] = \ char_properties["\\lang"] = \
get_paragraph_language(document, i + 1) get_paragraph_language(document, i + 1)
elif len(words) > 1 and words[0] in char_properties.keys(): elif len(words) > 1 and words[0] in list(char_properties.keys()):
# we have a font change # we have a font change
if char_properties[words[0]] == words[1]: if char_properties[words[0]] == words[1]:
# property gets reset # property gets reset
if words[0] in changes.keys(): if words[0] in list(changes.keys()):
del changes[words[0]] del changes[words[0]]
defaultproperty = True defaultproperty = True
else: else:
@ -1372,11 +1382,11 @@ def normalize_font_whitespace(document, char_properties):
lines[i-1] = lines[i-1][:-1] lines[i-1] = lines[i-1][:-1]
# a space before the font change # a space before the font change
added_lines = [" "] added_lines = [" "]
for k in changes.keys(): for k in list(changes.keys()):
# exclude property k because that is already in lines[i] # exclude property k because that is already in lines[i]
if k != words[0]: if k != words[0]:
added_lines[1:1] = ["%s %s" % (k, changes[k])] added_lines[1:1] = ["%s %s" % (k, changes[k])]
for k in changes.keys(): for k in list(changes.keys()):
# exclude property k because that must be added below anyway # exclude property k because that must be added below anyway
if k != words[0]: if k != words[0]:
added_lines[0:0] = ["%s %s" % (k, char_properties[k])] added_lines[0:0] = ["%s %s" % (k, char_properties[k])]
@ -1400,11 +1410,11 @@ def normalize_font_whitespace(document, char_properties):
continue continue
lines[i+1] = lines[i+1][1:] lines[i+1] = lines[i+1][1:]
added_lines = [" "] added_lines = [" "]
for k in changes.keys(): for k in list(changes.keys()):
# exclude property k because that is already in lines[i] # exclude property k because that is already in lines[i]
if k != words[0]: if k != words[0]:
added_lines[1:1] = ["%s %s" % (k, changes[k])] added_lines[1:1] = ["%s %s" % (k, changes[k])]
for k in changes.keys(): for k in list(changes.keys()):
# exclude property k because that must be added below anyway # exclude property k because that must be added below anyway
if k != words[0]: if k != words[0]:
added_lines[0:0] = ["%s %s" % (k, char_properties[k])] added_lines[0:0] = ["%s %s" % (k, char_properties[k])]

View File

@ -94,7 +94,7 @@ def convert_len(len):
"theight%":"\\backslash\ntextheight", "pheight%":"\\backslash\npageheight"} "theight%":"\\backslash\ntextheight", "pheight%":"\\backslash\npageheight"}
# Convert LyX units to LaTeX units # Convert LyX units to LaTeX units
for unit in units.keys(): for unit in list(units.keys()):
if len.find(unit) != -1: if len.find(unit) != -1:
len = '%f' % (len2value(len) / 100) len = '%f' % (len2value(len) / 100)
len = len.strip('0') + units[unit] len = len.strip('0') + units[unit]
@ -145,6 +145,7 @@ def set_option(document, m, option, value):
return l return l
# FIXME: Use the version in unicode_symbols.py which has some bug fixes
def read_unicodesymbols(): def read_unicodesymbols():
" Read the unicodesymbols list of unicode characters and corresponding commands." " Read the unicodesymbols list of unicode characters and corresponding commands."
pathname = os.path.abspath(os.path.dirname(sys.argv[0])) pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
@ -1765,7 +1766,7 @@ def convert_module_names(document):
return return
newmodlist = [] newmodlist = []
for mod in modlist: for mod in modlist:
if modulemap.has_key(mod): if mod in modulemap:
newmodlist.append(modulemap[mod]) newmodlist.append(modulemap[mod])
else: else:
document.warning("Can't find module %s in the module map!" % mod) document.warning("Can't find module %s in the module map!" % mod)

View File

@ -89,10 +89,7 @@ def revert_tabularvalign(document):
if p != -1: if p != -1:
q = document.body[fline].find("tabularvalignment") q = document.body[fline].find("tabularvalignment")
if q != -1: if q != -1:
# FIXME document.body[fline] = re.sub(r' tabularvalignment=\"[a-z]+\"', "", document.body[fline])
# This seems wrong: It removes everything after
# tabularvalignment, too.
document.body[fline] = document.body[fline][:q - 1] + '>'
i += 1 i += 1
continue continue
@ -108,16 +105,13 @@ def revert_tabularvalign(document):
# delete tabularvalignment # delete tabularvalignment
q = document.body[fline].find("tabularvalignment") q = document.body[fline].find("tabularvalignment")
if q != -1: if q != -1:
# FIXME document.body[fline] = re.sub(r' tabularvalignment=\"[a-z]+\"', "", document.body[fline])
# This seems wrong: It removes everything after
# tabularvalignment, too.
document.body[fline] = document.body[fline][:q - 1] + '>'
# don't add a box when centered # don't add a box when centered
if tabularvalignment == 'c': if tabularvalignment == 'c':
i = end i = end
continue continue
subst = ['\\end_layout', '\\end_inset'] subst = ['\\end_inset', '\\end_layout']
document.body[end:end] = subst # just inserts those lines document.body[end:end] = subst # just inserts those lines
subst = ['\\begin_inset Box Frameless', subst = ['\\begin_inset Box Frameless',
'position "' + tabularvalignment +'"', 'position "' + tabularvalignment +'"',
@ -563,7 +557,6 @@ def revert_nomencl_cwidth(document):
j = find_end_of_inset(document.body, i) j = find_end_of_inset(document.body, i)
l = find_token(document.body, "width", i, j) l = find_token(document.body, "width", i, j)
if l == -1: if l == -1:
document.warning("Can't find width option for nomencl_print!")
i = j i = j
continue continue
width = get_quoted_value(document.body, "width", i, j) width = get_quoted_value(document.body, "width", i, j)
@ -1585,10 +1578,13 @@ def convert_use_makebox(document):
def revert_IEEEtran(document): def revert_IEEEtran(document):
" Convert IEEEtran layouts and styles to TeX code " " Convert IEEEtran layouts and styles to TeX code "
if document.textclass != "IEEEtran": if document.textclass != "IEEEtran":
return return
revert_flex_inset(document.body, "IEEE membership", "\\IEEEmembership") revert_flex_inset(document.body, "IEEE membership", "\\IEEEmembership")
revert_flex_inset(document.body, "Lowercase", "\\MakeLowercase") revert_flex_inset(document.body, "Lowercase", "\\MakeLowercase")
layouts = ("Special Paper Notice", "After Title Text", "Publication ID", layouts = ("Special Paper Notice", "After Title Text", "Publication ID",
"Page headings", "Biography without photo") "Page headings", "Biography without photo")
latexcmd = {"Special Paper Notice": "\\IEEEspecialpapernotice", latexcmd = {"Special Paper Notice": "\\IEEEspecialpapernotice",
@ -1596,6 +1592,7 @@ def revert_IEEEtran(document):
"Publication ID": "\\IEEEpubid"} "Publication ID": "\\IEEEpubid"}
obsoletedby = {"Page headings": "MarkBoth", obsoletedby = {"Page headings": "MarkBoth",
"Biography without photo": "BiographyNoPhoto"} "Biography without photo": "BiographyNoPhoto"}
for layout in layouts: for layout in layouts:
i = 0 i = 0
while True: while True:
@ -1607,7 +1604,7 @@ def revert_IEEEtran(document):
document.warning("Malformed LyX document: Can't find end of " + layout + " layout.") document.warning("Malformed LyX document: Can't find end of " + layout + " layout.")
i += 1 i += 1
continue continue
if layout in obsoletedby: if layout in list(obsoletedby.keys()):
document.body[i] = "\\begin_layout " + obsoletedby[layout] document.body[i] = "\\begin_layout " + obsoletedby[layout]
i = j i = j
continue continue
@ -2394,11 +2391,10 @@ def revert_script(document):
def convert_use_xetex(document): def convert_use_xetex(document):
" convert \\use_xetex to \\use_non_tex_fonts " " convert \\use_xetex to \\use_non_tex_fonts "
i = 0
i = find_token(document.header, "\\use_xetex", 0) i = find_token(document.header, "\\use_xetex", 0)
if i == -1: if i == -1:
return document.header.insert(-1, "\\use_non_tex_fonts 0")
else:
val = get_value(document.header, "\\use_xetex", 0) val = get_value(document.header, "\\use_xetex", 0)
document.header[i] = "\\use_non_tex_fonts " + val document.header[i] = "\\use_non_tex_fonts " + val

View File

@ -58,7 +58,7 @@ def revert_Argument_to_TeX_brace(document, line, endline, n, nmax, environment,
Reverts an InsetArgument to TeX-code Reverts an InsetArgument to TeX-code
usage: usage:
revert_Argument_to_TeX_brace(document, LineOfBegin, LineOfEnd, StartArgument, EndArgument, isEnvironment, isOpt) revert_Argument_to_TeX_brace(document, LineOfBegin, LineOfEnd, StartArgument, EndArgument, isEnvironment, isOpt)
LineOfBegin is the line of the \begin_layout or \begin_inset statement LineOfBegin is the line of the \\begin_layout or \\begin_inset statement
LineOfEnd is the line of the \end_layout or \end_inset statement, if "0" is given, the end of the file is used instead LineOfEnd is the line of the \end_layout or \end_inset statement, if "0" is given, the end of the file is used instead
StartArgument is the number of the first argument that needs to be converted StartArgument is the number of the first argument that needs to be converted
EndArgument is the number of the last argument that needs to be converted or the last defined one EndArgument is the number of the last argument that needs to be converted or the last defined one
@ -111,7 +111,7 @@ def convert_TeX_brace_to_Argument(document, line, n, nmax, inset, environment, o
- { and } surround a mandatory argument of an environment - { and } surround a mandatory argument of an environment
usage: usage:
convert_TeX_brace_to_Argument(document, LineOfBeginLayout/Inset, StartArgument, EndArgument, isInset, isEnvironment, isOpt) convert_TeX_brace_to_Argument(document, LineOfBeginLayout/Inset, StartArgument, EndArgument, isInset, isEnvironment, isOpt)
LineOfBeginLayout/Inset is the line of the \begin_layout or \begin_inset statement LineOfBeginLayout/Inset is the line of the \\begin_layout or \\begin_inset statement
StartArgument is the number of the first ERT that needs to be converted StartArgument is the number of the first ERT that needs to be converted
EndArgument is the number of the last ERT that needs to be converted EndArgument is the number of the last ERT that needs to be converted
isInset must be true, if braces inside an InsetLayout needs to be converted isInset must be true, if braces inside an InsetLayout needs to be converted
@ -181,7 +181,7 @@ def convert_TeX_brace_to_Argument(document, line, n, nmax, inset, environment, o
else: else:
beginBrace = find_token(document.body, "{", endBrace, end_layout) beginBrace = find_token(document.body, "{", endBrace, end_layout)
# assure that the ERTs are consecutive (11 or 12 depending if there is a space between the ERTs or not) # assure that the ERTs are consecutive (11 or 12 depending if there is a space between the ERTs or not)
if beginBrace == endBrace + 11 or beginBrace == endBrace + 12: if beginBrance != -1 and (beginBrace == endBrace + 11 or beginBrace == endBrace + 12):
end = find_token(document.body, "\\end_inset", beginBrace) end = find_token(document.body, "\\end_inset", beginBrace)
document.body[lineERT : end + 1] = ["\\end_layout", "", "\\end_inset"] document.body[lineERT : end + 1] = ["\\end_layout", "", "\\end_inset"]
if loop == 1: if loop == 1:
@ -398,7 +398,7 @@ def convert_japanese_encodings(document):
if i == -1: if i == -1:
return return
val = get_value(document.header, "\\inputencoding", i) val = get_value(document.header, "\\inputencoding", i)
if val in jap_enc_dict.keys(): if val in list(jap_enc_dict.keys()):
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val] document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
@ -413,7 +413,7 @@ def revert_japanese_encodings(document):
if i == -1: if i == -1:
return return
val = get_value(document.header, "\\inputencoding", i) val = get_value(document.header, "\\inputencoding", i)
if val in jap_enc_dict.keys(): if val in list(jap_enc_dict.keys()):
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val] document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
@ -814,30 +814,38 @@ def revert_cancel(document):
revert_use_package(document, "cancel", cancel_commands, False) revert_use_package(document, "cancel", cancel_commands, False)
def revert_verbatim(document): def revert_verbatim(document, starred = False):
" Revert verbatim einvironments completely to TeX-code. " " Revert verbatim environments completely to TeX-code. "
i = 0 i = 0
consecutive = False consecutive = False
subst_end = ['\end_layout', '', '\\begin_layout Plain Layout',
'\end_layout', '', layout_name = "Verbatim"
latex_name = "verbatim"
if starred:
layout_name = "Verbatim*"
latex_name = "verbatim*"
subst_end = ['\\end_layout', '', '\\begin_layout Plain Layout',
'\\end_layout', '',
'\\begin_layout Plain Layout', '', '', '\\begin_layout Plain Layout', '', '',
'\\backslash', '', '\\backslash', '',
'end{verbatim}', 'end{%s}' % (latex_name),
'\\end_layout', '', '\\end_inset', '\\end_layout', '', '\\end_inset',
'', '', '\\end_layout'] '', '', '\\end_layout']
subst_begin = ['\\begin_layout Standard', '\\noindent', subst_begin = ['\\begin_layout Standard', '\\noindent',
'\\begin_inset ERT', 'status open', '', '\\begin_inset ERT', 'status open', '',
'\\begin_layout Plain Layout', '', '', '\\backslash', '\\begin_layout Plain Layout', '', '', '\\backslash',
'begin{verbatim}', 'begin{%s}' % (latex_name),
'\\end_layout', '', '\\begin_layout Plain Layout', ''] '\\end_layout', '', '\\begin_layout Plain Layout', '']
while 1: while 1:
i = find_token(document.body, "\\begin_layout Verbatim", i) i = find_token(document.body, "\\begin_layout %s" % (layout_name), i)
if i == -1: if i == -1:
return return
j = find_end_of_layout(document.body, i) j = find_end_of_layout(document.body, i)
if j == -1: if j == -1:
document.warning("Malformed LyX document: Can't find end of Verbatim layout") document.warning("Malformed LyX document: Can't find end of %s layout" \
% (layout_name))
i += 1 i += 1
continue continue
# delete all line breaks insets (there are no other insets) # delete all line breaks insets (there are no other insets)
@ -850,24 +858,29 @@ def revert_verbatim(document):
break break
m = find_end_of_inset(document.body, n) m = find_end_of_inset(document.body, n)
del(document.body[m:m+1]) del(document.body[m:m+1])
document.body[n:n+1] = ['\end_layout', '', '\\begin_layout Plain Layout'] document.body[n:n+1] = ['\\end_layout', '', '\\begin_layout Plain Layout']
l += 1 l += 1
# we deleted a line, so the end of the inset moved forward. # we deleted a line, so the end of the inset moved forward.
# FIXME But we also added some lines, didn't we? I think this
# should be j += 1.
j -= 1 j -= 1
# consecutive verbatim environments need to be connected # consecutive verbatim environments need to be connected
k = find_token(document.body, "\\begin_layout Verbatim", j) k = find_token(document.body, "\\begin_layout %s" % (layout_name), j)
if k == j + 2 and consecutive == False: if k == j + 2 and consecutive == False:
consecutive = True consecutive = True
document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout'] document.body[j:j+1] = ['\\end_layout', '', '\\begin_layout Plain Layout']
document.body[i:i+1] = subst_begin document.body[i:i+1] = subst_begin
continue continue
if k == j + 2 and consecutive == True: if k == j + 2 and consecutive == True:
document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout'] document.body[j:j+1] = ['\\end_layout', '', '\\begin_layout Plain Layout']
del(document.body[i:i+1]) del(document.body[i:i+1])
continue continue
if k != j + 2 and consecutive == True: if k != j + 2 and consecutive == True:
document.body[j:j+1] = subst_end document.body[j:j+1] = subst_end
# the next paragraph must not be indented # the next paragraph must not be indented
# FIXME This seems to be causing problems, because of the
# hardcoded use of 19. We should figure out exactly where
# this needs to go by searching for the right tag.
document.body[j+19:j+19] = ['\\noindent'] document.body[j+19:j+19] = ['\\noindent']
del(document.body[i:i+1]) del(document.body[i:i+1])
consecutive = False consecutive = False
@ -875,6 +888,9 @@ def revert_verbatim(document):
else: else:
document.body[j:j+1] = subst_end document.body[j:j+1] = subst_end
# the next paragraph must not be indented # the next paragraph must not be indented
# FIXME This seems to be causing problems, because of the
# hardcoded use of 19. We should figure out exactly where
# this needs to go by searching for the right tag.
document.body[j+19:j+19] = ['\\noindent'] document.body[j+19:j+19] = ['\\noindent']
document.body[i:i+1] = subst_begin document.body[i:i+1] = subst_begin
@ -1227,7 +1243,7 @@ def revert_mathdesign(document):
if i == -1: if i == -1:
return return
val = get_value(document.header, "\\font_roman", i) val = get_value(document.header, "\\font_roman", i)
if val in mathdesign_dict.keys(): if val in list(mathdesign_dict.keys()):
preamble = "\\usepackage[%s" % mathdesign_dict[val] preamble = "\\usepackage[%s" % mathdesign_dict[val]
expert = False expert = False
j = find_token(document.header, "\\font_osf true", 0) j = find_token(document.header, "\\font_osf true", 0)
@ -1391,7 +1407,7 @@ def revert_mathfonts(document):
k = find_token(document.header, "\\font_osf true", 0) k = find_token(document.header, "\\font_osf true", 0)
if k != -1: if k != -1:
rm += "-osf" rm += "-osf"
if rm in mathfont_dict.keys(): if rm in list(mathfont_dict.keys()):
add_to_preamble(document, mathfont_dict[rm]) add_to_preamble(document, mathfont_dict[rm])
document.header[j] = "\\font_roman default" document.header[j] = "\\font_roman default"
if k != -1: if k != -1:
@ -1412,7 +1428,7 @@ def revert_mdnomath(document):
if i == -1: if i == -1:
return return
val = get_value(document.header, "\\font_roman", i) val = get_value(document.header, "\\font_roman", i)
if val in mathdesign_dict.keys(): if val in list(mathdesign_dict.keys()):
j = find_token(document.header, "\\font_math", 0) j = find_token(document.header, "\\font_math", 0)
if j == -1: if j == -1:
document.header[i] = "\\font_roman %s" % mathdesign_dict[val] document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
@ -1424,6 +1440,10 @@ def revert_mdnomath(document):
document.header[i] = "\\font_roman %s" % mathdesign_dict[val] document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
def convert_mathfonts(document):
document.header.insert(-1, "\\font_math auto")
def convert_mdnomath(document): def convert_mdnomath(document):
" Change mathdesign font name " " Change mathdesign font name "
@ -1437,7 +1457,7 @@ def convert_mdnomath(document):
if i == -1: if i == -1:
return return
val = get_value(document.header, "\\font_roman", i) val = get_value(document.header, "\\font_roman", i)
if val in mathdesign_dict.keys(): if val in list(mathdesign_dict.keys()):
document.header[i] = "\\font_roman %s" % mathdesign_dict[val] document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
@ -1454,7 +1474,7 @@ def revert_newtxmath(document):
"minion-ntxm": "\\usepackage[minion]{newtxmath}", "minion-ntxm": "\\usepackage[minion]{newtxmath}",
"newtxmath": "\\usepackage{newtxmath}", "newtxmath": "\\usepackage{newtxmath}",
} }
if val in mathfont_dict.keys(): if val in list(mathfont_dict.keys()):
add_to_preamble(document, mathfont_dict[val]) add_to_preamble(document, mathfont_dict[val])
document.header[i] = "\\font_math auto" document.header[i] = "\\font_math auto"
@ -1676,39 +1696,42 @@ def revert_IEEEtran(document):
Biography without photo Biography without photo
to TeX-code to TeX-code
''' '''
if document.textclass == "IEEEtran": if document.textclass != "IEEEtran":
return
layouts = {"Page headings": False,
"Biography without photo": True}
for layout in list(layouts.keys()):
i = 0 i = 0
i2 = 0
j = 0
k = 0
while True: while True:
if i != -1: i = find_token(document.body, '\\begin_layout ' + layout, i)
i = find_token(document.body, "\\begin_layout Page headings", i) if i == -1:
if i != -1: break
revert_Argument_to_TeX_brace(document, i, 0, 1, 1, layouts[layout], False)
i += 1
i = 0
while True:
i = find_token(document.body, '\\begin_inset Flex Paragraph Start', i)
if i == -1:
break
revert_Argument_to_TeX_brace(document, i, 0, 1, 1, False, False) revert_Argument_to_TeX_brace(document, i, 0, 1, 1, False, False)
i += 1 i += 1
if i2 != -1:
i2 = find_token(document.body, "\\begin_inset Flex Paragraph Start", i2) i = 0
if i2 != -1: while True:
revert_Argument_to_TeX_brace(document, i2, 0, 1, 1, False, False) i = find_token_exact(document.body, "\\begin_layout Biography", i)
i2 = i2 + 1 if i == -1:
if j != -1: break
j = find_token(document.body, "\\begin_layout Biography without photo", j)
if j != -1: if document.body[i] == "\\begin_layout Biography without photo":
revert_Argument_to_TeX_brace(document, j, 0, 1, 1, True, False) i += 1
j += 1
if k != -1:
k = find_token(document.body, "\\begin_layout Biography", k)
kA = find_token(document.body, "\\begin_layout Biography without photo", k)
if k == kA and k != -1:
k += 1
continue continue
if k != -1:
# start with the second argument, therefore 2 # start with the second argument, therefore 2
revert_Argument_to_TeX_brace(document, k, 0, 2, 2, True, False) revert_Argument_to_TeX_brace(document, i, 0, 2, 2, True, False)
k += 1 i += 1
if i == -1 and i2 == -1 and j == -1 and k == -1:
return
def revert_IEEEtran_2(document): def revert_IEEEtran_2(document):
@ -1735,35 +1758,35 @@ def convert_IEEEtran(document):
Biography without photo Biography without photo
to InsetArgument to InsetArgument
''' '''
if document.textclass == "IEEEtran": if document.textclass != "IEEEtran":
i = 0
j = 0
k = 0
while True:
if i != -1:
i = find_token(document.body, "\\begin_layout Page headings", i)
if i != -1:
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False, False)
i += 1
if j != -1:
j = find_token(document.body, "\\begin_layout Biography without photo", j)
if j != -1:
convert_TeX_brace_to_Argument(document, j, 1, 1, False, True, False)
j += 1
if k != -1:
# assure that we don't handle Biography Biography without photo
k = find_token(document.body, "\\begin_layout Biography", k)
kA = find_token(document.body, "\\begin_layout Biography without photo", k - 1)
if k == kA and k != -1:
k += 1
continue
if k != -1:
# the argument we want to convert is the second one
convert_TeX_brace_to_Argument(document, k, 2, 2, False, True, False)
k += 1
if i == -1 and j == -1 and k == -1:
return return
layouts = {"Page headings": False,
"Biography without photo": True}
for layout in list(layouts.keys()):
i = 0
while True:
i = find_token(document.body, '\\begin_layout ' + layout, i)
if i == -1:
break
convert_TeX_brace_to_Argument(document, i, 1, 1, False, layouts[layout], False)
i += 1
i = 0
while True:
i = find_token_exact(document.body, "\\begin_layout Biography", i)
if i == -1:
break
if document.body[i] == "\\begin_layout Biography without photo":
i += 1
continue
# the argument we want to convert is the second one
convert_TeX_brace_to_Argument(document, i, 2, 2, False, True, False)
i += 1
def revert_AASTeX(document): def revert_AASTeX(document):
" Reverts InsetArgument of Altaffilation to TeX-code " " Reverts InsetArgument of Altaffilation to TeX-code "
@ -3630,7 +3653,7 @@ def convert_captionlayouts(document):
if i == -1: if i == -1:
return return
val = get_value(document.body, "\\begin_layout", i) val = get_value(document.body, "\\begin_layout", i)
if val in caption_dict.keys(): if val in list(caption_dict.keys()):
j = find_end_of_layout(document.body, i) j = find_end_of_layout(document.body, i)
if j == -1: if j == -1:
document.warning("Malformed LyX document: Missing `\\end_layout'.") document.warning("Malformed LyX document: Missing `\\end_layout'.")
@ -3666,7 +3689,7 @@ def revert_captionlayouts(document):
val = "" val = ""
if m: if m:
val = m.group(1) val = m.group(1)
if val not in caption_dict.keys(): if val not in list(caption_dict.keys()):
i += 1 i += 1
continue continue
@ -3847,7 +3870,7 @@ def revert_newframes(document):
val = "" val = ""
if m: if m:
val = m.group(1) val = m.group(1)
if val not in frame_dict.keys(): if val not in list(frame_dict.keys()):
i += 1 i += 1
continue continue
# Find end of sequence # Find end of sequence
@ -3963,7 +3986,7 @@ def convert_encodings(document):
if i == -1: if i == -1:
return return
val = get_value(document.header, "\\inputencoding", i) val = get_value(document.header, "\\inputencoding", i)
if val in LaTeX2LyX_enc_dict.keys(): if val in list(LaTeX2LyX_enc_dict.keys()):
document.header[i] = "\\inputencoding %s" % LaTeX2LyX_enc_dict[val] document.header[i] = "\\inputencoding %s" % LaTeX2LyX_enc_dict[val]
elif val not in known_enc_tuple: elif val not in known_enc_tuple:
document.warning("Ignoring unknown input encoding: `%s'" % val) document.warning("Ignoring unknown input encoding: `%s'" % val)
@ -4004,7 +4027,7 @@ def revert_encodings(document):
if i == -1: if i == -1:
return return
val = get_value(document.header, "\\inputencoding", i) val = get_value(document.header, "\\inputencoding", i)
if val in LyX2LaTeX_enc_dict.keys(): if val in list(LyX2LaTeX_enc_dict.keys()):
document.header[i] = "\\inputencoding %s" % LyX2LaTeX_enc_dict[val] document.header[i] = "\\inputencoding %s" % LyX2LaTeX_enc_dict[val]
elif val not in known_enc_tuple: elif val not in known_enc_tuple:
document.warning("Ignoring unknown input encoding: `%s'" % val) document.warning("Ignoring unknown input encoding: `%s'" % val)
@ -4810,7 +4833,7 @@ convert = [
[437, []], [437, []],
[438, []], [438, []],
[439, []], [439, []],
[440, []], [440, [convert_mathfonts]],
[441, [convert_mdnomath]], [441, [convert_mdnomath]],
[442, []], [442, []],
[443, []], [443, []],

View File

@ -56,9 +56,9 @@ find_re(lines, rexp, start[, end]):
get_value(lines, token, start[, end[, default]): get_value(lines, token, start[, end[, default]):
Similar to find_token, but it returns what follows the Similar to find_token, but it returns what follows the
token on the found line. Example: token on the found line. Example:
get_value(document.header, "\use_xetex", 0) get_value(document.header, "\\use_xetex", 0)
will find a line like: will find a line like:
\use_xetex true \\use_xetex true
and, in that case, return "true". (Note that whitespace and, in that case, return "true". (Note that whitespace
is stripped.) The final argument, default, defaults to "", is stripped.) The final argument, default, defaults to "",
and is what is returned if we do not find anything. So you and is what is returned if we do not find anything. So you
@ -80,10 +80,10 @@ del_token(lines, token, start[, end]):
find_beginning_of(lines, i, start_token, end_token): find_beginning_of(lines, i, start_token, end_token):
Here, start_token and end_token are meant to be a matching Here, start_token and end_token are meant to be a matching
pair, like "\begin_layout" and "\end_layout". We look for pair, like "\\begin_layout" and "\\end_layout". We look for
the start_token that pairs with the end_token that occurs the start_token that pairs with the end_token that occurs
on or after line i. Returns -1 if not found. on or after line i. Returns -1 if not found.
So, in the layout case, this would find the \begin_layout So, in the layout case, this would find the \\begin_layout
for the layout line i is in. for the layout line i is in.
Example: Example:
ec = find_token(document.body, "</cell", i) ec = find_token(document.body, "</cell", i)
@ -187,7 +187,7 @@ def find_token(lines, token, start, end = 0, ignorews = False):
if end == 0 or end > len(lines): if end == 0 or end > len(lines):
end = len(lines) end = len(lines)
m = len(token) m = len(token)
for i in xrange(start, end): for i in range(start, end):
if ignorews: if ignorews:
x = lines[i].split() x = lines[i].split()
y = token.split() y = token.split()
@ -215,7 +215,7 @@ def find_tokens(lines, tokens, start, end = 0, ignorews = False):
if end == 0 or end > len(lines): if end == 0 or end > len(lines):
end = len(lines) end = len(lines)
for i in xrange(start, end): for i in range(start, end):
for token in tokens: for token in tokens:
if ignorews: if ignorews:
x = lines[i].split() x = lines[i].split()
@ -244,7 +244,7 @@ def find_re(lines, rexp, start, end = 0):
if end == 0 or end > len(lines): if end == 0 or end > len(lines):
end = len(lines) end = len(lines)
for i in xrange(start, end): for i in range(start, end):
if rexp.match(lines[i]): if rexp.match(lines[i]):
return i return i
return -1 return -1
@ -258,7 +258,7 @@ def find_token_backwards(lines, token, start):
Return -1 on failure.""" Return -1 on failure."""
m = len(token) m = len(token)
for i in xrange(start, -1, -1): for i in range(start, -1, -1):
line = lines[i] line = lines[i]
if line[:m] == token: if line[:m] == token:
return i return i
@ -272,7 +272,7 @@ def find_tokens_backwards(lines, tokens, start):
element, in lines[end, start]. element, in lines[end, start].
Return -1 on failure.""" Return -1 on failure."""
for i in xrange(start, -1, -1): for i in range(start, -1, -1):
line = lines[i] line = lines[i]
for token in tokens: for token in tokens:
if line[:len(token)] == token: if line[:len(token)] == token:
@ -381,7 +381,7 @@ def find_end_of(lines, i, start_token, end_token):
def find_nonempty_line(lines, start, end = 0): def find_nonempty_line(lines, start, end = 0):
if end == 0: if end == 0:
end = len(lines) end = len(lines)
for i in xrange(start, end): for i in range(start, end):
if is_nonempty_line(lines[i]): if is_nonempty_line(lines[i]):
return i return i
return -1 return -1

View File

@ -78,8 +78,8 @@ class TestParserTools(unittest.TestCase):
def test_find_token(self): def test_find_token(self):
self.assertEquals(find_token(lines, '\\emph', 0), 7) self.assertEquals(find_token(lines, '\\emph', 0), 7)
self.assertEquals(find_token(lines, '\\emph', 0, 5), -1) self.assertEquals(find_token(lines, '\\emph', 0, 5), -1)
self.assertEquals(find_token(lines, '\\emp', 0, exact = True), -1) self.assertEquals(find_token(lines, '\\emp', 0, 0, True), -1)
self.assertEquals(find_token(lines, '\\emp', 0, exact = False), 7) self.assertEquals(find_token(lines, '\\emp', 0, 0, False), 7)
self.assertEquals(find_token(lines, 'emph', 0), -1) self.assertEquals(find_token(lines, 'emph', 0), -1)

View File

@ -20,35 +20,47 @@
import sys, os, re import sys, os, re
# Provide support for both python 2 and 3
PY2 = sys.version_info[0] == 2
if not PY2:
unichr = chr
# End of code to support for both python 2 and 3
def read_unicodesymbols(): def read_unicodesymbols():
" Read the unicodesymbols list of unicode characters and corresponding commands." " Read the unicodesymbols list of unicode characters and corresponding commands."
pathname = os.path.abspath(os.path.dirname(sys.argv[0])) pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
fp = open(os.path.join(pathname.strip('lyx2lyx'), 'unicodesymbols')) fp = open(os.path.join(pathname.strip('lyx2lyx'), 'unicodesymbols'))
spec_chars = [] spec_chars = []
# Two backslashes, followed by some non-word character, and then a character # A backslash, followed by some non-word character, and then a character
# in brackets. The idea is to check for constructs like: \"{u}, which is how # in brackets. The idea is to check for constructs like: \"{u}, which is how
# they are written in the unicodesymbols file; but they can also be written # they are written in the unicodesymbols file; but they can also be written
# as: \"u or even \" u. # as: \"u or even \" u.
r = re.compile(r'\\\\(\W)\{(\w)\}') # The two backslashes in the string literal are needed to specify a literal
# backslash in the regex. Without r prefix, these would be four backslashes.
r = re.compile(r'\\(\W)\{(\w)\}')
for line in fp.readlines(): for line in fp.readlines():
if line[0] != '#' and line.strip() != "": if line[0] != '#' and line.strip() != "":
# Note: backslashes in the string literals with r prefix are not escaped,
# so one backslash in the source file equals one backslash in memory.
# Without r prefix backslahses are escaped, so two backslashes in the
# source file equal one backslash in memory.
line=line.replace(' "',' ') # remove all quotation marks with spaces before line=line.replace(' "',' ') # remove all quotation marks with spaces before
line=line.replace('" ',' ') # remove all quotation marks with spaces after line=line.replace('" ',' ') # remove all quotation marks with spaces after
line=line.replace(r'\"','"') # replace \" by " (for characters with diaeresis) line=line.replace(r'\"','"') # unescape "
line=line.replace(r'\\','\\') # unescape \
try: try:
[ucs4,command,dead] = line.split(None,2) [ucs4,command,dead] = line.split(None,2)
if command[0:1] != "\\": if command[0:1] != "\\":
continue continue
if (line.find("notermination=text") < 0 and
line.find("notermination=both") < 0 and command[-1] != "}"):
command = command + "{}"
spec_chars.append([command, unichr(eval(ucs4))]) spec_chars.append([command, unichr(eval(ucs4))])
except: except:
continue continue
m = r.match(command) m = r.match(command)
if m != None: if m != None:
command = "\\\\" command = "\\"
# If the character is a double-quote, then we need to escape it, too,
# since it is done that way in the LyX file.
if m.group(1) == "\"":
command += "\\"
commandbl = command commandbl = command
command += m.group(1) + m.group(2) command += m.group(1) + m.group(2)
commandbl += m.group(1) + ' ' + m.group(2) commandbl += m.group(1) + ' ' + m.group(2)