mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-11-21 17:51:03 +00:00
First step to make lyx2lyx code support both python 2 and python 3 (3.3+)
Most of the changes are related with dictionaries returning views instead of lists. xrange -> range (since xrange is gone in python 3) The code that is special to support both python 2 and 3 is enclosed in a comment \# Provide support for both python 2 and 3 and \# End of code to support for both python 2 and 3 And so later it can be removed safely when python 2 is no longer supported.
This commit is contained in:
parent
50b570e05a
commit
9a0d70a45f
@ -80,12 +80,12 @@ format_relation = [("0_06", [200], minor_versions("0.6" , 4)),
|
||||
("1_1_6_3", [218], ["1.1", "1.1.6.3","1.1.6.4"]),
|
||||
("1_2", [220], minor_versions("1.2" , 4)),
|
||||
("1_3", [221], minor_versions("1.3" , 7)),
|
||||
("1_4", range(222,246), minor_versions("1.4" , 5)),
|
||||
("1_5", range(246,277), minor_versions("1.5" , 7)),
|
||||
("1_6", range(277,346), minor_versions("1.6" , 10)),
|
||||
("2_0", range(346,414), minor_versions("2.0", 8)),
|
||||
("2_1", range(414,475), minor_versions("2.1", 0)),
|
||||
("2_2", range(475,483), minor_versions("2.2", 0))
|
||||
("1_4", list(range(222,246)), minor_versions("1.4" , 5)),
|
||||
("1_5", list(range(246,277)), minor_versions("1.5" , 7)),
|
||||
("1_6", list(range(277,346)), minor_versions("1.6" , 10)),
|
||||
("2_0", list(range(346,414)), minor_versions("2.0", 8)),
|
||||
("2_1", list(range(414,475)), minor_versions("2.1", 0)),
|
||||
("2_2", list(range(475,483)), minor_versions("2.2", 0))
|
||||
]
|
||||
|
||||
####################################################################
|
||||
|
@ -19,6 +19,7 @@
|
||||
""" This module parses lib/languages and prints it as a python
|
||||
dictionary, ready to use by other python modules"""
|
||||
|
||||
from __future__ import print_function
|
||||
import pprint
|
||||
|
||||
def parse_line(line):
|
||||
@ -55,8 +56,8 @@ if __name__ == '__main__':
|
||||
lang[tmp[0]] = tmp[1:]
|
||||
|
||||
|
||||
print "# This file is generated by generate_incoding_info.py from lib/languages file."
|
||||
print "# Do not change this file directly."
|
||||
print
|
||||
print "lang = ",
|
||||
print ("# This file is generated by generate_incoding_info.py from lib/languages file.")
|
||||
print ("# Do not change this file directly.")
|
||||
print ()
|
||||
print ("lang = ", end = " ")
|
||||
pprint.pprint(lang)
|
||||
|
@ -283,7 +283,7 @@ def latex_length(slen):
|
||||
units = {"text%":"\\textwidth", "col%":"\\columnwidth",
|
||||
"page%":"\\paperwidth", "line%":"\\linewidth",
|
||||
"theight%":"\\textheight", "pheight%":"\\paperheight"}
|
||||
for unit in units.keys():
|
||||
for unit in list(units.keys()):
|
||||
i = slen.find(unit)
|
||||
if i == -1:
|
||||
continue
|
||||
|
@ -154,7 +154,7 @@ def remove_oldfloat(document):
|
||||
j = find_token(lines, "\\end_float", i+1)
|
||||
|
||||
floattype = lines[i].split()[1]
|
||||
if not floats.has_key(floattype):
|
||||
if floattype not in floats:
|
||||
document.warning("Error! Unknown float type " + floattype)
|
||||
floattype = "fig"
|
||||
|
||||
@ -284,7 +284,7 @@ def remove_pextra(document):
|
||||
|
||||
def is_empty(lines):
|
||||
" Are all the lines empty?"
|
||||
return filter(is_nonempty_line, lines) == []
|
||||
return list(filter(is_nonempty_line, lines)) == []
|
||||
|
||||
|
||||
move_rexp = re.compile(r"\\(family|series|shape|size|emph|numeric|bar|noun|end_deeper)")
|
||||
@ -358,7 +358,7 @@ def remove_oldert(document):
|
||||
tmp.append(line)
|
||||
|
||||
if is_empty(tmp):
|
||||
if filter(lambda x:x != "", tmp) != []:
|
||||
if [x for x in tmp if x != ""] != []:
|
||||
if new == []:
|
||||
# This is not necessary, but we want the output to be
|
||||
# as similar as posible to the lyx format
|
||||
|
@ -1495,7 +1495,7 @@ def convert_len(len, special):
|
||||
len = '%f\\' % len2value(len) + special
|
||||
|
||||
# Convert LyX units to LaTeX units
|
||||
for unit in units.keys():
|
||||
for unit in list(units.keys()):
|
||||
if len.find(unit) != -1:
|
||||
len = '%f' % (len2value(len) / 100) + units[unit]
|
||||
break
|
||||
@ -1571,7 +1571,7 @@ def convert_frameless_box(document):
|
||||
'inner_pos':1, 'use_parbox':'0', 'width':'100col%',
|
||||
'special':'none', 'height':'1in',
|
||||
'height_special':'totalheight', 'collapsed':'false'}
|
||||
for key in params.keys():
|
||||
for key in list(params.keys()):
|
||||
value = get_value(document.body, key, i, j).replace('"', '')
|
||||
if value != "":
|
||||
if key == 'position':
|
||||
|
@ -26,6 +26,15 @@ import sys, os
|
||||
from parser_tools import find_re, find_token, find_token_backwards, find_token_exact, find_tokens, find_end_of, get_value, find_beginning_of, find_nonempty_line
|
||||
from LyX import get_encoding
|
||||
|
||||
# Provide support for both python 2 and 3
|
||||
PY2 = sys.version_info[0] == 2
|
||||
if not PY2:
|
||||
text_type = str
|
||||
unichr = chr
|
||||
else:
|
||||
text_type = unicode
|
||||
unichr = unichr
|
||||
# End of code to support for both python 2 and 3
|
||||
|
||||
####################################################################
|
||||
# Private helper functions
|
||||
@ -93,7 +102,7 @@ def convert_font_settings(document):
|
||||
if font_scheme == '':
|
||||
document.warning("Malformed LyX document: Empty `\\fontscheme'.")
|
||||
font_scheme = 'default'
|
||||
if not font_scheme in roman_fonts.keys():
|
||||
if not font_scheme in list(roman_fonts.keys()):
|
||||
document.warning("Malformed LyX document: Unknown `\\fontscheme' `%s'." % font_scheme)
|
||||
font_scheme = 'default'
|
||||
document.header[i:i+1] = ['\\font_roman %s' % roman_fonts[font_scheme],
|
||||
@ -163,7 +172,7 @@ def revert_font_settings(document):
|
||||
del document.header[i]
|
||||
if font_tt_scale != '100':
|
||||
document.warning("Conversion of '\\font_tt_scale' not yet implemented.")
|
||||
for font_scheme in roman_fonts.keys():
|
||||
for font_scheme in list(roman_fonts.keys()):
|
||||
if (roman_fonts[font_scheme] == fonts['roman'] and
|
||||
sans_fonts[font_scheme] == fonts['sans'] and
|
||||
typewriter_fonts[font_scheme] == fonts['typewriter']):
|
||||
@ -376,7 +385,7 @@ def revert_unicode_line(document, i, insets, spec_chars, replacement_character =
|
||||
last_char = character
|
||||
except:
|
||||
# Try to replace with ERT/math inset
|
||||
if spec_chars.has_key(character):
|
||||
if character in spec_chars:
|
||||
command = spec_chars[character][0] # the command to replace unicode
|
||||
flag1 = spec_chars[character][1]
|
||||
flag2 = spec_chars[character][2]
|
||||
@ -1211,7 +1220,7 @@ def revert_accent(document):
|
||||
try:
|
||||
document.body[i] = normalize("NFD", document.body[i])
|
||||
except TypeError:
|
||||
document.body[i] = normalize("NFD", unicode(document.body[i], 'utf-8'))
|
||||
document.body[i] = normalize("NFD", text_type(document.body[i], 'utf-8'))
|
||||
|
||||
# Replace accented characters with InsetLaTeXAccent
|
||||
# Do not convert characters that can be represented in the chosen
|
||||
@ -1346,15 +1355,15 @@ def normalize_font_whitespace(document, char_properties):
|
||||
# a new paragraph resets all font changes
|
||||
changes.clear()
|
||||
# also reset the default language to be the paragraph's language
|
||||
if "\\lang" in char_properties.keys():
|
||||
if "\\lang" in list(char_properties.keys()):
|
||||
char_properties["\\lang"] = \
|
||||
get_paragraph_language(document, i + 1)
|
||||
|
||||
elif len(words) > 1 and words[0] in char_properties.keys():
|
||||
elif len(words) > 1 and words[0] in list(char_properties.keys()):
|
||||
# we have a font change
|
||||
if char_properties[words[0]] == words[1]:
|
||||
# property gets reset
|
||||
if words[0] in changes.keys():
|
||||
if words[0] in list(changes.keys()):
|
||||
del changes[words[0]]
|
||||
defaultproperty = True
|
||||
else:
|
||||
@ -1372,11 +1381,11 @@ def normalize_font_whitespace(document, char_properties):
|
||||
lines[i-1] = lines[i-1][:-1]
|
||||
# a space before the font change
|
||||
added_lines = [" "]
|
||||
for k in changes.keys():
|
||||
for k in list(changes.keys()):
|
||||
# exclude property k because that is already in lines[i]
|
||||
if k != words[0]:
|
||||
added_lines[1:1] = ["%s %s" % (k, changes[k])]
|
||||
for k in changes.keys():
|
||||
for k in list(changes.keys()):
|
||||
# exclude property k because that must be added below anyway
|
||||
if k != words[0]:
|
||||
added_lines[0:0] = ["%s %s" % (k, char_properties[k])]
|
||||
@ -1400,11 +1409,11 @@ def normalize_font_whitespace(document, char_properties):
|
||||
continue
|
||||
lines[i+1] = lines[i+1][1:]
|
||||
added_lines = [" "]
|
||||
for k in changes.keys():
|
||||
for k in list(changes.keys()):
|
||||
# exclude property k because that is already in lines[i]
|
||||
if k != words[0]:
|
||||
added_lines[1:1] = ["%s %s" % (k, changes[k])]
|
||||
for k in changes.keys():
|
||||
for k in list(changes.keys()):
|
||||
# exclude property k because that must be added below anyway
|
||||
if k != words[0]:
|
||||
added_lines[0:0] = ["%s %s" % (k, char_properties[k])]
|
||||
|
@ -94,7 +94,7 @@ def convert_len(len):
|
||||
"theight%":"\\backslash\ntextheight", "pheight%":"\\backslash\npageheight"}
|
||||
|
||||
# Convert LyX units to LaTeX units
|
||||
for unit in units.keys():
|
||||
for unit in list(units.keys()):
|
||||
if len.find(unit) != -1:
|
||||
len = '%f' % (len2value(len) / 100)
|
||||
len = len.strip('0') + units[unit]
|
||||
@ -1765,7 +1765,7 @@ def convert_module_names(document):
|
||||
return
|
||||
newmodlist = []
|
||||
for mod in modlist:
|
||||
if modulemap.has_key(mod):
|
||||
if mod in modulemap:
|
||||
newmodlist.append(modulemap[mod])
|
||||
else:
|
||||
document.warning("Can't find module %s in the module map!" % mod)
|
||||
|
@ -398,7 +398,7 @@ def convert_japanese_encodings(document):
|
||||
if i == -1:
|
||||
return
|
||||
val = get_value(document.header, "\\inputencoding", i)
|
||||
if val in jap_enc_dict.keys():
|
||||
if val in list(jap_enc_dict.keys()):
|
||||
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
|
||||
|
||||
|
||||
@ -413,7 +413,7 @@ def revert_japanese_encodings(document):
|
||||
if i == -1:
|
||||
return
|
||||
val = get_value(document.header, "\\inputencoding", i)
|
||||
if val in jap_enc_dict.keys():
|
||||
if val in list(jap_enc_dict.keys()):
|
||||
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
|
||||
|
||||
|
||||
@ -1227,7 +1227,7 @@ def revert_mathdesign(document):
|
||||
if i == -1:
|
||||
return
|
||||
val = get_value(document.header, "\\font_roman", i)
|
||||
if val in mathdesign_dict.keys():
|
||||
if val in list(mathdesign_dict.keys()):
|
||||
preamble = "\\usepackage[%s" % mathdesign_dict[val]
|
||||
expert = False
|
||||
j = find_token(document.header, "\\font_osf true", 0)
|
||||
@ -1391,7 +1391,7 @@ def revert_mathfonts(document):
|
||||
k = find_token(document.header, "\\font_osf true", 0)
|
||||
if k != -1:
|
||||
rm += "-osf"
|
||||
if rm in mathfont_dict.keys():
|
||||
if rm in list(mathfont_dict.keys()):
|
||||
add_to_preamble(document, mathfont_dict[rm])
|
||||
document.header[j] = "\\font_roman default"
|
||||
if k != -1:
|
||||
@ -1412,7 +1412,7 @@ def revert_mdnomath(document):
|
||||
if i == -1:
|
||||
return
|
||||
val = get_value(document.header, "\\font_roman", i)
|
||||
if val in mathdesign_dict.keys():
|
||||
if val in list(mathdesign_dict.keys()):
|
||||
j = find_token(document.header, "\\font_math", 0)
|
||||
if j == -1:
|
||||
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
|
||||
@ -1437,7 +1437,7 @@ def convert_mdnomath(document):
|
||||
if i == -1:
|
||||
return
|
||||
val = get_value(document.header, "\\font_roman", i)
|
||||
if val in mathdesign_dict.keys():
|
||||
if val in list(mathdesign_dict.keys()):
|
||||
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
|
||||
|
||||
|
||||
@ -1454,7 +1454,7 @@ def revert_newtxmath(document):
|
||||
"minion-ntxm": "\\usepackage[minion]{newtxmath}",
|
||||
"newtxmath": "\\usepackage{newtxmath}",
|
||||
}
|
||||
if val in mathfont_dict.keys():
|
||||
if val in list(mathfont_dict.keys()):
|
||||
add_to_preamble(document, mathfont_dict[val])
|
||||
document.header[i] = "\\font_math auto"
|
||||
|
||||
@ -3630,7 +3630,7 @@ def convert_captionlayouts(document):
|
||||
if i == -1:
|
||||
return
|
||||
val = get_value(document.body, "\\begin_layout", i)
|
||||
if val in caption_dict.keys():
|
||||
if val in list(caption_dict.keys()):
|
||||
j = find_end_of_layout(document.body, i)
|
||||
if j == -1:
|
||||
document.warning("Malformed LyX document: Missing `\\end_layout'.")
|
||||
@ -3666,7 +3666,7 @@ def revert_captionlayouts(document):
|
||||
val = ""
|
||||
if m:
|
||||
val = m.group(1)
|
||||
if val not in caption_dict.keys():
|
||||
if val not in list(caption_dict.keys()):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
@ -3847,7 +3847,7 @@ def revert_newframes(document):
|
||||
val = ""
|
||||
if m:
|
||||
val = m.group(1)
|
||||
if val not in frame_dict.keys():
|
||||
if val not in list(frame_dict.keys()):
|
||||
i += 1
|
||||
continue
|
||||
# Find end of sequence
|
||||
@ -3963,7 +3963,7 @@ def convert_encodings(document):
|
||||
if i == -1:
|
||||
return
|
||||
val = get_value(document.header, "\\inputencoding", i)
|
||||
if val in LaTeX2LyX_enc_dict.keys():
|
||||
if val in list(LaTeX2LyX_enc_dict.keys()):
|
||||
document.header[i] = "\\inputencoding %s" % LaTeX2LyX_enc_dict[val]
|
||||
elif val not in known_enc_tuple:
|
||||
document.warning("Ignoring unknown input encoding: `%s'" % val)
|
||||
@ -4004,7 +4004,7 @@ def revert_encodings(document):
|
||||
if i == -1:
|
||||
return
|
||||
val = get_value(document.header, "\\inputencoding", i)
|
||||
if val in LyX2LaTeX_enc_dict.keys():
|
||||
if val in list(LyX2LaTeX_enc_dict.keys()):
|
||||
document.header[i] = "\\inputencoding %s" % LyX2LaTeX_enc_dict[val]
|
||||
elif val not in known_enc_tuple:
|
||||
document.warning("Ignoring unknown input encoding: `%s'" % val)
|
||||
|
@ -74,7 +74,7 @@ def convert_separator(document):
|
||||
lay = get_containing_layout(document.body, j-1)
|
||||
if lay != False:
|
||||
content = "\n".join(document.body[lay[1]:lay[2]])
|
||||
for val in sty_dict.keys():
|
||||
for val in list(sty_dict.keys()):
|
||||
if content.find("\\%s" % val) != -1:
|
||||
document.body[j:j] = ["\\%s %s" % (val, sty_dict[val])]
|
||||
i = i + 1
|
||||
@ -103,7 +103,7 @@ def convert_separator(document):
|
||||
and find_token(document.body, "\\begin_inset VSpace", lay[1], lay[2]) == -1:
|
||||
# reset any text style before inserting the inset
|
||||
content = "\n".join(document.body[lay[1]:lay[2]])
|
||||
for val in sty_dict.keys():
|
||||
for val in list(sty_dict.keys()):
|
||||
if content.find("\\%s" % val) != -1:
|
||||
document.body[j:j] = ["\\%s %s" % (val, sty_dict[val])]
|
||||
i = i + 1
|
||||
|
@ -56,9 +56,9 @@ find_re(lines, rexp, start[, end]):
|
||||
get_value(lines, token, start[, end[, default]):
|
||||
Similar to find_token, but it returns what follows the
|
||||
token on the found line. Example:
|
||||
get_value(document.header, "\use_xetex", 0)
|
||||
get_value(document.header, "\\use_xetex", 0)
|
||||
will find a line like:
|
||||
\use_xetex true
|
||||
\\use_xetex true
|
||||
and, in that case, return "true". (Note that whitespace
|
||||
is stripped.) The final argument, default, defaults to "",
|
||||
and is what is returned if we do not find anything. So you
|
||||
@ -80,10 +80,10 @@ del_token(lines, token, start[, end]):
|
||||
|
||||
find_beginning_of(lines, i, start_token, end_token):
|
||||
Here, start_token and end_token are meant to be a matching
|
||||
pair, like "\begin_layout" and "\end_layout". We look for
|
||||
pair, like "\\begin_layout" and "\\end_layout". We look for
|
||||
the start_token that pairs with the end_token that occurs
|
||||
on or after line i. Returns -1 if not found.
|
||||
So, in the layout case, this would find the \begin_layout
|
||||
So, in the layout case, this would find the \\begin_layout
|
||||
for the layout line i is in.
|
||||
Example:
|
||||
ec = find_token(document.body, "</cell", i)
|
||||
@ -187,7 +187,7 @@ def find_token(lines, token, start, end = 0, ignorews = False):
|
||||
if end == 0 or end > len(lines):
|
||||
end = len(lines)
|
||||
m = len(token)
|
||||
for i in xrange(start, end):
|
||||
for i in range(start, end):
|
||||
if ignorews:
|
||||
x = lines[i].split()
|
||||
y = token.split()
|
||||
@ -215,7 +215,7 @@ def find_tokens(lines, tokens, start, end = 0, ignorews = False):
|
||||
if end == 0 or end > len(lines):
|
||||
end = len(lines)
|
||||
|
||||
for i in xrange(start, end):
|
||||
for i in range(start, end):
|
||||
for token in tokens:
|
||||
if ignorews:
|
||||
x = lines[i].split()
|
||||
@ -244,7 +244,7 @@ def find_re(lines, rexp, start, end = 0):
|
||||
|
||||
if end == 0 or end > len(lines):
|
||||
end = len(lines)
|
||||
for i in xrange(start, end):
|
||||
for i in range(start, end):
|
||||
if rexp.match(lines[i]):
|
||||
return i
|
||||
return -1
|
||||
@ -258,7 +258,7 @@ def find_token_backwards(lines, token, start):
|
||||
|
||||
Return -1 on failure."""
|
||||
m = len(token)
|
||||
for i in xrange(start, -1, -1):
|
||||
for i in range(start, -1, -1):
|
||||
line = lines[i]
|
||||
if line[:m] == token:
|
||||
return i
|
||||
@ -272,7 +272,7 @@ def find_tokens_backwards(lines, tokens, start):
|
||||
element, in lines[end, start].
|
||||
|
||||
Return -1 on failure."""
|
||||
for i in xrange(start, -1, -1):
|
||||
for i in range(start, -1, -1):
|
||||
line = lines[i]
|
||||
for token in tokens:
|
||||
if line[:len(token)] == token:
|
||||
@ -381,7 +381,7 @@ def find_end_of(lines, i, start_token, end_token):
|
||||
def find_nonempty_line(lines, start, end = 0):
|
||||
if end == 0:
|
||||
end = len(lines)
|
||||
for i in xrange(start, end):
|
||||
for i in range(start, end):
|
||||
if is_nonempty_line(lines[i]):
|
||||
return i
|
||||
return -1
|
||||
|
@ -20,6 +20,12 @@
|
||||
|
||||
import sys, os, re
|
||||
|
||||
# Provide support for both python 2 and 3
|
||||
PY2 = sys.version_info[0] == 2
|
||||
if not PY2:
|
||||
unichr = chr
|
||||
# End of code to support for both python 2 and 3
|
||||
|
||||
def read_unicodesymbols():
|
||||
" Read the unicodesymbols list of unicode characters and corresponding commands."
|
||||
pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
|
||||
|
Loading…
Reference in New Issue
Block a user