mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-12-27 14:29:21 +00:00
Update lyx2lyx folder to Python 3+
Remove support for Python 2 Take advantage of new features (euphemism) not possible before due to python 2 compatibility
This commit is contained in:
parent
df0e337684
commit
f9ec4186d7
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2024 The LyX Team
|
||||
# Copyright (C) 2002-2004 Dekel Tsur <dekel@lyx.org>
|
||||
# Copyright (C) 2002-2006 José Matos <jamatos@lyx.org>
|
||||
@ -41,9 +40,6 @@ except: # we are running from build directory so assume the last version
|
||||
|
||||
default_debug__ = 2
|
||||
|
||||
# Provide support for both python 2 and 3
|
||||
PY2 = sys.version_info[0] == 2
|
||||
# End of code to support for both python 2 and 3
|
||||
|
||||
####################################################################
|
||||
# Private helper functions
|
||||
@ -140,9 +136,9 @@ def format_info():
|
||||
elif not stable_version and major == version__:
|
||||
stable_format = "-- not yet --"
|
||||
versions = "-- not yet --"
|
||||
formats = "%s - %s" % (version[1][0], version[1][-1])
|
||||
formats = f"{version[1][0]} - {version[1][-1]}"
|
||||
else:
|
||||
formats = "%s - %s" % (version[1][0], version[1][-2])
|
||||
formats = f"{version[1][0]} - {version[1][-2]}"
|
||||
stable_format = str(version[1][-1])
|
||||
|
||||
out += template % (major, stable_format, versions, formats)
|
||||
@ -219,10 +215,10 @@ def get_encoding(language, inputencoding, format, cjk_encoding):
|
||||
class LyX_base:
|
||||
"""This class carries all the information of the LyX file."""
|
||||
|
||||
def __init__(self, end_format = 0, input = u'', output = u'', error = u'',
|
||||
debug = default_debug__, try_hard = 0, cjk_encoding = u'',
|
||||
final_version = u'', systemlyxdir = u'', language = u'english',
|
||||
encoding = u'auto'):
|
||||
def __init__(self, end_format = 0, input = '', output = '', error = '',
|
||||
debug = default_debug__, try_hard = 0, cjk_encoding = '',
|
||||
final_version = '', systemlyxdir = '', language = 'english',
|
||||
encoding = 'auto'):
|
||||
|
||||
"""Arguments:
|
||||
end_format: final format that the file should be converted. (integer)
|
||||
@ -333,12 +329,8 @@ class LyX_base:
|
||||
|
||||
first_line = False
|
||||
|
||||
if PY2:
|
||||
line = trim_eol(line)
|
||||
decoded = line
|
||||
else:
|
||||
line = trim_eol_binary(line)
|
||||
decoded = line.decode('latin1')
|
||||
line = trim_eol_binary(line)
|
||||
decoded = line.decode('latin1')
|
||||
if check_token(decoded, '\\begin_preamble'):
|
||||
while True:
|
||||
line = self.input.readline()
|
||||
@ -346,12 +338,8 @@ class LyX_base:
|
||||
# eof found before end of header
|
||||
self.error("Invalid LyX file: Missing body.")
|
||||
|
||||
if PY2:
|
||||
line = trim_eol(line)
|
||||
decoded = line
|
||||
else:
|
||||
line = trim_eol_binary(line)
|
||||
decoded = line.decode('latin1')
|
||||
line = trim_eol_binary(line)
|
||||
decoded = line.decode('latin1')
|
||||
if check_token(decoded, '\\end_preamble'):
|
||||
break
|
||||
|
||||
@ -379,33 +367,18 @@ class LyX_base:
|
||||
|
||||
self.header.append(line)
|
||||
|
||||
if PY2:
|
||||
i = find_token(self.header, '\\textclass', 0)
|
||||
else:
|
||||
i = find_token(self.header, b'\\textclass', 0)
|
||||
i = find_token(self.header, b'\\textclass', 0)
|
||||
if i == -1:
|
||||
self.warning("Malformed LyX file: Missing '\\textclass'.")
|
||||
if PY2:
|
||||
i = find_token(self.header, '\\lyxformat', 0) + 1
|
||||
self.header[i:i] = ['\\textclass article']
|
||||
else:
|
||||
i = find_token(self.header, b'\\lyxformat', 0) + 1
|
||||
self.header[i:i] = [b'\\textclass article']
|
||||
i = find_token(self.header, b'\\lyxformat', 0) + 1
|
||||
self.header[i:i] = [b'\\textclass article']
|
||||
|
||||
if PY2:
|
||||
self.textclass = get_value(self.header, "\\textclass", 0,
|
||||
default = "")
|
||||
self.language = get_value(self.header, "\\language", 0,
|
||||
default = "english")
|
||||
self.inputencoding = get_value(self.header, "\\inputencoding", 0,
|
||||
default = "auto")
|
||||
else:
|
||||
self.textclass = get_value(self.header, b"\\textclass", 0,
|
||||
default = b"")
|
||||
self.language = get_value(self.header, b"\\language", 0,
|
||||
default = b"english").decode('ascii')
|
||||
self.inputencoding = get_value(self.header, b"\\inputencoding", 0,
|
||||
default = b"auto").decode('ascii')
|
||||
self.textclass = get_value(self.header, b"\\textclass", 0,
|
||||
default = b"")
|
||||
self.language = get_value(self.header, b"\\language", 0,
|
||||
default = b"english").decode('ascii')
|
||||
self.inputencoding = get_value(self.header, b"\\inputencoding", 0,
|
||||
default = b"auto").decode('ascii')
|
||||
self.format = self.read_format()
|
||||
self.initial_format = self.format
|
||||
self.encoding = get_encoding(self.language,
|
||||
@ -448,8 +421,8 @@ class LyX_base:
|
||||
else:
|
||||
header = self.header
|
||||
|
||||
for line in header + [u''] + self.body:
|
||||
self.output.write(line+u'\n')
|
||||
for line in header + [''] + self.body:
|
||||
self.output.write(line+'\n')
|
||||
|
||||
|
||||
def choose_output(self, output):
|
||||
@ -472,9 +445,9 @@ class LyX_base:
|
||||
self.output = io.TextIOWrapper(zipbuffer, encoding=self.encoding, newline='\n')
|
||||
else:
|
||||
if output:
|
||||
self.output = io.open(output, 'w', encoding=self.encoding)
|
||||
self.output = open(output, 'w', encoding=self.encoding)
|
||||
else:
|
||||
self.output = io.open(sys.stdout.fileno(), 'w', encoding=self.encoding)
|
||||
self.output = open(sys.stdout.fileno(), 'w', encoding=self.encoding)
|
||||
|
||||
|
||||
def choose_input(self, input):
|
||||
@ -483,7 +456,7 @@ class LyX_base:
|
||||
|
||||
# Since we do not know the encoding yet we need to read the input as
|
||||
# bytes in binary mode, and convert later to unicode.
|
||||
if input and input != u'-':
|
||||
if input and input != '-':
|
||||
self.dir = os.path.dirname(os.path.abspath(input))
|
||||
try:
|
||||
gzip.open(input).readline()
|
||||
@ -493,7 +466,7 @@ class LyX_base:
|
||||
self.input = open(input, 'rb')
|
||||
self.compressed = False
|
||||
else:
|
||||
self.dir = u''
|
||||
self.dir = ''
|
||||
self.input = os.fdopen(sys.stdin.fileno(), 'rb')
|
||||
self.compressed = False
|
||||
|
||||
@ -538,7 +511,7 @@ class LyX_base:
|
||||
if not res:
|
||||
self.warning(line)
|
||||
#self.warning("Version %s" % result.group(1))
|
||||
return res.decode('ascii') if not PY2 else res
|
||||
return res.decode('ascii')
|
||||
self.warning(str(self.header[:2]))
|
||||
return None
|
||||
|
||||
@ -568,10 +541,7 @@ class LyX_base:
|
||||
def read_format(self):
|
||||
" Read from the header the fileformat of the present LyX file."
|
||||
for line in self.header:
|
||||
if PY2:
|
||||
result = fileformat.match(line)
|
||||
else:
|
||||
result = fileformat.match(line.decode('ascii'))
|
||||
result = fileformat.match(line.decode('ascii'))
|
||||
if result:
|
||||
return self.lyxformat(result.group(1))
|
||||
else:
|
||||
@ -660,7 +630,7 @@ class LyX_base:
|
||||
if i == -1:
|
||||
self.warning('Parameter not found in the header: %s' % param, 3)
|
||||
return
|
||||
self.header[i] = '\\%s %s' % (param, str(value))
|
||||
self.header[i] = f'\\{param} {str(value)}'
|
||||
|
||||
|
||||
def is_default_layout(self, layout):
|
||||
@ -684,7 +654,7 @@ class LyX_base:
|
||||
for step in conversion_chain:
|
||||
steps = getattr(__import__("lyx_" + step), mode)
|
||||
|
||||
self.warning("Convertion step: %s - %s" % (step, mode),
|
||||
self.warning(f"Convertion step: {step} - {mode}",
|
||||
default_debug__ + 1)
|
||||
if not steps:
|
||||
self.error("The conversion to an older "
|
||||
@ -897,9 +867,9 @@ class LyX_base:
|
||||
class File(LyX_base):
|
||||
" This class reads existing LyX files."
|
||||
|
||||
def __init__(self, end_format = 0, input = u'', output = u'', error = u'',
|
||||
debug = default_debug__, try_hard = 0, cjk_encoding = u'',
|
||||
final_version = u'', systemlyxdir = u''):
|
||||
def __init__(self, end_format = 0, input = '', output = '', error = '',
|
||||
debug = default_debug__, try_hard = 0, cjk_encoding = '',
|
||||
final_version = '', systemlyxdir = ''):
|
||||
LyX_base.__init__(self, end_format, input, output, error,
|
||||
debug, try_hard, cjk_encoding, final_version,
|
||||
systemlyxdir)
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
@ -19,7 +18,6 @@
|
||||
""" This module parses lib/languages and prints it as a python
|
||||
dictionary, ready to use by other python modules"""
|
||||
|
||||
from __future__ import print_function
|
||||
import pprint
|
||||
|
||||
def parse_line(line):
|
||||
|
@ -1,5 +1,4 @@
|
||||
#! /usr/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2011 The LyX Team
|
||||
# Copyright (C) 2002-2007 José Matos <jamatos@lyx.org>
|
||||
# Copyright (C) 2002-2004 Dekel Tsur <dekel@lyx.org>
|
||||
@ -23,17 +22,6 @@ import argparse
|
||||
import sys
|
||||
import LyX
|
||||
|
||||
# Provide support for both python 2 and 3
|
||||
PY2 = sys.version_info[0] == 2
|
||||
if PY2:
|
||||
# argparse returns strings in the commandline encoding, we need to convert.
|
||||
# sys.getdefaultencoding() would not always be correct, see
|
||||
# http://legacy.python.org/dev/peps/pep-0383/
|
||||
def cmd_arg(arg):
|
||||
return arg.decode(sys.getfilesystemencoding())
|
||||
else:
|
||||
cmd_arg = str
|
||||
# End of code to support for both python 2 and 3
|
||||
|
||||
def main():
|
||||
args = {}
|
||||
@ -55,29 +43,29 @@ def main():
|
||||
action="store_const", const=1, dest="debug")
|
||||
parser.add_argument("--noisy",
|
||||
action="store_const", const=10, dest="debug")
|
||||
parser.add_argument("-c", "--encoding", type=cmd_arg, dest="cjk_encoding",
|
||||
parser.add_argument("-c", "--encoding", type=str, dest="cjk_encoding",
|
||||
help="Files in format 413 and lower are read and"
|
||||
" written in the format of CJK-LyX."
|
||||
" If encoding is not given or 'auto' the encoding"
|
||||
" is determined from the locale.")
|
||||
parser.add_argument("-e", "--err", type=cmd_arg, dest="error",
|
||||
parser.add_argument("-e", "--err", type=str, dest="error",
|
||||
help= "File name of the error file else goes to stderr.")
|
||||
parser.add_argument("-o", "--output", type=cmd_arg, dest="output",
|
||||
parser.add_argument("-o", "--output", type=str, dest="output",
|
||||
help= "Name of the output file else goes to stdout.")
|
||||
parser.add_argument("-t", "--to", type=cmd_arg, dest= "end_format",
|
||||
parser.add_argument("-t", "--to", type=str, dest= "end_format",
|
||||
help= "Destination file format, default <latest>.")
|
||||
parser.add_argument("-V", "--final_version", type=cmd_arg, dest= "final_version",
|
||||
parser.add_argument("-V", "--final_version", type=str, dest= "final_version",
|
||||
help= "Destination version, default <latest>.")
|
||||
parser.add_argument("-l", "--list", action="store_true",
|
||||
help = "List all available formats and supported versions.")
|
||||
parser.add_argument("-n", "--try-hard", action="store_true",
|
||||
help = "Try hard (ignore any conversion errors).")
|
||||
parser.add_argument("-s", "--systemlyxdir", type=cmd_arg, dest= "systemlyxdir",
|
||||
parser.add_argument("-s", "--systemlyxdir", type=str, dest= "systemlyxdir",
|
||||
help= "LyX system directory for conversion from"
|
||||
" version 489 or older.")
|
||||
parser.add_argument('--version', action='version', version="""lyx2lyx, version %s
|
||||
Copyright (C) 2011 The LyX Team, José Matos and Dekel Tsur""" % LyX.version__)
|
||||
parser.add_argument("input", nargs='?', type=cmd_arg, default=None)
|
||||
parser.add_argument("input", nargs='?', type=str, default=None)
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2011 The LyX team
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
@ -90,7 +89,6 @@ revert_language(document, lyxname, babelname="", polyglossianame=""):
|
||||
this language package is not supported for the given language.
|
||||
'''
|
||||
|
||||
from __future__ import print_function
|
||||
import re
|
||||
import sys
|
||||
from parser_tools import (find_token, find_end_of_inset, get_containing_layout,
|
||||
@ -169,9 +167,7 @@ def put_cmd_in_ert(cmd, is_open=False, as_paragraph=False):
|
||||
"", "", "\\end_layout", ""]
|
||||
# ensure cmd is an unicode instance and make it "LyX safe".
|
||||
if isinstance(cmd, list):
|
||||
cmd = u"\n".join(cmd)
|
||||
elif sys.version_info[0] == 2 and isinstance(cmd, str):
|
||||
cmd = cmd.decode('utf8')
|
||||
cmd = "\n".join(cmd)
|
||||
cmd = cmd.translate(licr_table)
|
||||
cmd = cmd.replace("\\", "\n\\backslash\n")
|
||||
|
||||
@ -615,13 +611,13 @@ def is_document_option(document, option):
|
||||
|
||||
|
||||
singlepar_insets = [s.strip() for s in
|
||||
u"Argument, Caption Above, Caption Below, Caption Bicaption,"
|
||||
u"Caption Centered, Caption FigCaption, Caption Standard, Caption Table,"
|
||||
u"Flex Chemistry, Flex Fixme_Note, Flex Latin, Flex ListOfSlides,"
|
||||
u"Flex Missing_Figure, Flex PDF-Annotation, Flex PDF-Comment-Setup,"
|
||||
u"Flex Reflectbox, Flex S/R expression, Flex Sweave Input File,"
|
||||
u"Flex Sweave Options, Flex Thanks_Reference, Flex URL, Foot InTitle,"
|
||||
u"IPADeco, Index, Info, Phantom, Script".split(',')]
|
||||
"Argument, Caption Above, Caption Below, Caption Bicaption,"
|
||||
"Caption Centered, Caption FigCaption, Caption Standard, Caption Table,"
|
||||
"Flex Chemistry, Flex Fixme_Note, Flex Latin, Flex ListOfSlides,"
|
||||
"Flex Missing_Figure, Flex PDF-Annotation, Flex PDF-Comment-Setup,"
|
||||
"Flex Reflectbox, Flex S/R expression, Flex Sweave Input File,"
|
||||
"Flex Sweave Options, Flex Thanks_Reference, Flex URL, Foot InTitle,"
|
||||
"IPADeco, Index, Info, Phantom, Script".split(',')]
|
||||
# print(singlepar_insets)
|
||||
|
||||
def revert_language(document, lyxname, babelname="", polyglossianame=""):
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx -*- python -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2003-2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This document is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
@ -220,7 +219,7 @@ def update_ref(document):
|
||||
if lines[i].split()[-1] == "\\ref{":
|
||||
i = i + 1
|
||||
ref, label = latexdel_getargs(document, i)
|
||||
lines[i - 1] = "%s[%s]{%s}" % (lines[i - 1][:-1], ref, label)
|
||||
lines[i - 1] = f"{lines[i - 1][:-1]}[{ref}]{{{label}}}"
|
||||
|
||||
i = i + 1
|
||||
|
||||
@ -246,7 +245,7 @@ def update_latexdel(document):
|
||||
i = i + 1
|
||||
|
||||
ref, label = latexdel_getargs(document, i)
|
||||
lines[i -1] = "%s[%s]{%s}" % (lines[i-1][:-1], label, ref)
|
||||
lines[i -1] = f"{lines[i-1][:-1]}[{label}]{{{ref}}}"
|
||||
|
||||
i = i + 1
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
@ -48,7 +47,7 @@ def update_tabular(document):
|
||||
|
||||
tabular_line = i
|
||||
i = i +1
|
||||
lines.insert(i, '<Features rotate="%s" islongtable="%s" endhead="%s" endfirsthead="%s" endfoot="%s" endlastfoot="%s">' % (head[2],head[3],head[4],head[5],head[6],head[7]))
|
||||
lines.insert(i, f'<Features rotate="{head[2]}" islongtable="{head[3]}" endhead="{head[4]}" endfirsthead="{head[5]}" endfoot="{head[6]}" endlastfoot="{head[7]}">')
|
||||
|
||||
i = i +1
|
||||
|
||||
@ -79,7 +78,7 @@ def update_tabular(document):
|
||||
ncells = ncells + 1
|
||||
del lines[i]
|
||||
|
||||
lines[tabular_line] = '<LyXTabular version="1" rows="%s" columns="%s">' % (rows-len(cont_row),columns)
|
||||
lines[tabular_line] = f'<LyXTabular version="1" rows="{rows-len(cont_row)}" columns="{columns}">'
|
||||
del lines[i]
|
||||
if not lines[i]:
|
||||
del lines[i]
|
||||
@ -125,13 +124,13 @@ def update_tabular(document):
|
||||
for j in range(rows):
|
||||
if j in cont_row:
|
||||
continue
|
||||
tmp.append('<Row topline="%s" bottomline="%s" newpage="%s">' % (row_info[j][0],row_info[j][1],row_info[j][3]))
|
||||
tmp.append(f'<Row topline="{row_info[j][0]}" bottomline="{row_info[j][1]}" newpage="{row_info[j][3]}">')
|
||||
|
||||
for k in range(columns):
|
||||
if j:
|
||||
tmp.append('<Column>')
|
||||
else:
|
||||
tmp.append('<Column alignment="%s" valignment="0" leftline="%s" rightline="%s" width=%s special=%s>' % (column_info[k][0],column_info[k][1], column_info[k][2], column_info[k][3], column_info[k][4]))
|
||||
tmp.append(f'<Column alignment="{column_info[k][0]}" valignment="0" leftline="{column_info[k][1]}" rightline="{column_info[k][2]}" width={column_info[k][3]} special={column_info[k][4]}>')
|
||||
m = j*columns + k
|
||||
|
||||
leftline = int(column_info[k][1])
|
||||
@ -218,9 +217,9 @@ def set_paragraph_properties(lines, prop_dict):
|
||||
if prop_dict[prop] != 'default':
|
||||
insert = 1
|
||||
if prop == "color":
|
||||
aux.append("\\%s %s" % (prop, prop_dict[prop]))
|
||||
aux.append(f"\\{prop} {prop_dict[prop]}")
|
||||
elif prop != "family" or prop_dict[prop] != "roman":
|
||||
aux.append("\\%s %s " % (prop, prop_dict[prop]))
|
||||
aux.append(f"\\{prop} {prop_dict[prop]} ")
|
||||
|
||||
# remove final char properties
|
||||
n = len(lines)
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
@ -73,7 +72,7 @@ def table_update(lines):
|
||||
res = features_re.match( lines[1] )
|
||||
if res:
|
||||
val = res.groups()
|
||||
lines[1] = '<features rotate="%s" islongtable="%s" endhead="%s" endfirsthead="%s" endfoot="%s" endlastfoot="%s">' % (bool_table(val[0]), bool_table(val[1]), val[2], val[3], val[4], val[5])
|
||||
lines[1] = f'<features rotate="{bool_table(val[0])}" islongtable="{bool_table(val[1])}" endhead="{val[2]}" endfirsthead="{val[3]}" endfoot="{val[4]}" endlastfoot="{val[5]}">'
|
||||
|
||||
if lines[2]=="":
|
||||
del lines[2]
|
||||
@ -100,12 +99,12 @@ def table_update(lines):
|
||||
res = cell_re.match(lines[i])
|
||||
if res:
|
||||
val = res.groups()
|
||||
lines[i] = '<cell multicolumn="%s" alignment="%s" valignment="%s" topline="%s" bottomline="%s" leftline="%s" rightline="%s" rotate="%s" usebox="%s" width="%s" special="%s">' % ( val[0], align_table[val[1]], align_vertical[val[2]], bool_table(val[3]), bool_table(val[4]), bool_table(val[5]), bool_table(val[6]), bool_table(val[7]), use_table[val[8]], val[9], val[10])
|
||||
lines[i] = f'<cell multicolumn="{val[0]}" alignment="{align_table[val[1]]}" valignment="{align_vertical[val[2]]}" topline="{bool_table(val[3])}" bottomline="{bool_table(val[4])}" leftline="{bool_table(val[5])}" rightline="{bool_table(val[6])}" rotate="{bool_table(val[7])}" usebox="{use_table[val[8]]}" width="{val[9]}" special="{val[10]}">'
|
||||
|
||||
res = row_re.match(lines[i])
|
||||
if res:
|
||||
val = res.groups()
|
||||
lines[i] = '<row topline="%s" bottomline="%s" newpage="%s">' % (bool_table(val[0]), bool_table(val[1]), bool_table(val[2]))
|
||||
lines[i] = f'<row topline="{bool_table(val[0])}" bottomline="{bool_table(val[1])}" newpage="{bool_table(val[2])}">'
|
||||
|
||||
i = i + 1
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
# Copyright (C) 2004-2005 Georg Baum <Georg.Baum@post.rwth-aachen.de>
|
||||
@ -1893,7 +1892,7 @@ def convert_graphics(document):
|
||||
return
|
||||
i = i + 1
|
||||
filename = document.body[j].split()[1]
|
||||
if document.dir == u'' and not os.path.isabs(filename):
|
||||
if document.dir == '' and not os.path.isabs(filename):
|
||||
# We don't know the directory and cannot check the document.
|
||||
# We could use a heuristic and take the current directory,
|
||||
# and we could try to find out if documentname has an extension,
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
# Copyright (C) 2004-2006 Georg Baum <Georg.Baum@post.rwth-aachen.de>
|
||||
#
|
||||
@ -27,15 +26,6 @@ from parser_tools import find_re, find_token, find_token_backwards, find_token_e
|
||||
from lyx2lyx_tools import insert_document_option
|
||||
from LyX import get_encoding
|
||||
|
||||
# Provide support for both python 2 and 3
|
||||
PY2 = sys.version_info[0] == 2
|
||||
if not PY2:
|
||||
text_type = str
|
||||
unichr = chr
|
||||
else:
|
||||
text_type = unicode
|
||||
# End of code to support for both python 2 and 3
|
||||
|
||||
####################################################################
|
||||
# Private helper functions
|
||||
|
||||
@ -272,11 +262,11 @@ necessary parsing in modern formats than in ancient ones.
|
||||
if result:
|
||||
language = result.group(1)
|
||||
if language == "default":
|
||||
document.warning("Resetting encoding from %s to %s." % (encoding_stack[-1], document.encoding), 3)
|
||||
document.warning(f"Resetting encoding from {encoding_stack[-1]} to {document.encoding}.", 3)
|
||||
encoding_stack[-1] = document.encoding
|
||||
else:
|
||||
from lyx2lyx_lang import lang
|
||||
document.warning("Setting encoding from %s to %s." % (encoding_stack[-1], lang[language][3]), 3)
|
||||
document.warning(f"Setting encoding from {encoding_stack[-1]} to {lang[language][3]}.", 3)
|
||||
encoding_stack[-1] = lang[language][3]
|
||||
elif find_token(document.body, "\\begin_layout", i, i + 1) == i:
|
||||
document.warning("Adding nested encoding %s." % encoding_stack[-1], 3)
|
||||
@ -357,7 +347,7 @@ def read_unicodesymbols():
|
||||
try:
|
||||
# flag1 and flag2 are preamble and other flags
|
||||
[ucs4,command,flag1,flag2] =line.split(None,3)
|
||||
spec_chars[unichr(eval(ucs4))] = [command, flag1, flag2]
|
||||
spec_chars[chr(eval(ucs4))] = [command, flag1, flag2]
|
||||
except:
|
||||
pass
|
||||
fp.close()
|
||||
@ -371,7 +361,7 @@ def revert_unicode_line(document, i, insets, spec_chars, replacement_character =
|
||||
math_intro='\n\\begin_inset Formula $'
|
||||
math_outro='$\n\\end_inset'
|
||||
|
||||
mod_line = u''
|
||||
mod_line = ''
|
||||
if i and not is_inset_line(document, i-1):
|
||||
last_char = document.body[i - 1][-1:]
|
||||
else:
|
||||
@ -661,19 +651,19 @@ def convert_commandparams(document):
|
||||
lines = ["\\begin_inset LatexCommand %s" % name]
|
||||
if option1 != "":
|
||||
if commandparams_info[name][0] == "":
|
||||
document.warning("Ignoring invalid option `%s' of command `%s'." % (option1, name))
|
||||
document.warning(f"Ignoring invalid option `{option1}' of command `{name}'.")
|
||||
else:
|
||||
lines.append('%s "%s"' % (commandparams_info[name][0], option1.replace('\\', '\\\\').replace('"', '\\"')))
|
||||
lines.append('{} "{}"'.format(commandparams_info[name][0], option1.replace('\\', '\\\\').replace('"', '\\"')))
|
||||
if option2 != "":
|
||||
if commandparams_info[name][1] == "":
|
||||
document.warning("Ignoring invalid second option `%s' of command `%s'." % (option2, name))
|
||||
document.warning(f"Ignoring invalid second option `{option2}' of command `{name}'.")
|
||||
else:
|
||||
lines.append('%s "%s"' % (commandparams_info[name][1], option2.replace('\\', '\\\\').replace('"', '\\"')))
|
||||
lines.append('{} "{}"'.format(commandparams_info[name][1], option2.replace('\\', '\\\\').replace('"', '\\"')))
|
||||
if argument != "":
|
||||
if commandparams_info[name][2] == "":
|
||||
document.warning("Ignoring invalid argument `%s' of command `%s'." % (argument, name))
|
||||
document.warning(f"Ignoring invalid argument `{argument}' of command `{name}'.")
|
||||
else:
|
||||
lines.append('%s "%s"' % (commandparams_info[name][2], argument.replace('\\', '\\\\').replace('"', '\\"')))
|
||||
lines.append('{} "{}"'.format(commandparams_info[name][2], argument.replace('\\', '\\\\').replace('"', '\\"')))
|
||||
document.body[i:i+1] = lines
|
||||
i = i + 1
|
||||
|
||||
@ -708,23 +698,23 @@ def revert_commandparams(document):
|
||||
pname == commandparams_info[name][2]):
|
||||
argument = pvalue.strip('"').replace('\\"', '"').replace('\\\\', '\\')
|
||||
elif document.body[k].strip() != "":
|
||||
document.warning("Ignoring unknown contents `%s' in command inset %s." % (document.body[k], name))
|
||||
document.warning(f"Ignoring unknown contents `{document.body[k]}' in command inset {name}.")
|
||||
if name == "bibitem":
|
||||
if option1 == "":
|
||||
lines = ["\\bibitem {%s}" % argument]
|
||||
else:
|
||||
lines = ["\\bibitem [%s]{%s}" % (option1, argument)]
|
||||
lines = [f"\\bibitem [{option1}]{{{argument}}}"]
|
||||
else:
|
||||
if option1 == "":
|
||||
if option2 == "":
|
||||
lines = ["\\begin_inset LatexCommand \\%s{%s}" % (name, argument)]
|
||||
lines = [f"\\begin_inset LatexCommand \\{name}{{{argument}}}"]
|
||||
else:
|
||||
lines = ["\\begin_inset LatexCommand \\%s[][%s]{%s}" % (name, option2, argument)]
|
||||
lines = [f"\\begin_inset LatexCommand \\{name}[][{option2}]{{{argument}}}"]
|
||||
else:
|
||||
if option2 == "":
|
||||
lines = ["\\begin_inset LatexCommand \\%s[%s]{%s}" % (name, option1, argument)]
|
||||
lines = [f"\\begin_inset LatexCommand \\{name}[{option1}]{{{argument}}}"]
|
||||
else:
|
||||
lines = ["\\begin_inset LatexCommand \\%s[%s][%s]{%s}" % (name, option1, option2, argument)]
|
||||
lines = [f"\\begin_inset LatexCommand \\{name}[{option1}][{option2}]{{{argument}}}"]
|
||||
if name != "bibitem":
|
||||
if preview_line != "":
|
||||
lines.append(preview_line)
|
||||
@ -765,9 +755,9 @@ def revert_nomenclature(document):
|
||||
elif document.body[k].strip() != "":
|
||||
document.warning("Ignoring unknown contents `%s' in nomenclature inset." % document.body[k])
|
||||
if prefix == "":
|
||||
command = 'nomenclature{%s}{%s}' % (symbol, description)
|
||||
command = f'nomenclature{{{symbol}}}{{{description}}}'
|
||||
else:
|
||||
command = 'nomenclature[%s]{%s}{%s}' % (prefix, symbol, description)
|
||||
command = f'nomenclature[{prefix}]{{{symbol}}}{{{description}}}'
|
||||
document.body[i:j+1] = ['\\begin_inset ERT',
|
||||
'status collapsed',
|
||||
'',
|
||||
@ -1027,22 +1017,22 @@ def revert_caption(document):
|
||||
|
||||
# Accents of InsetLaTeXAccent
|
||||
accent_map = {
|
||||
"`" : u'\u0300', # grave
|
||||
"'" : u'\u0301', # acute
|
||||
"^" : u'\u0302', # circumflex
|
||||
"~" : u'\u0303', # tilde
|
||||
"=" : u'\u0304', # macron
|
||||
"u" : u'\u0306', # breve
|
||||
"." : u'\u0307', # dot above
|
||||
"\"": u'\u0308', # diaeresis
|
||||
"r" : u'\u030a', # ring above
|
||||
"H" : u'\u030b', # double acute
|
||||
"v" : u'\u030c', # caron
|
||||
"b" : u'\u0320', # minus sign below
|
||||
"d" : u'\u0323', # dot below
|
||||
"c" : u'\u0327', # cedilla
|
||||
"k" : u'\u0328', # ogonek
|
||||
"t" : u'\u0361' # tie. This is special: It spans two characters, but
|
||||
"`" : '\u0300', # grave
|
||||
"'" : '\u0301', # acute
|
||||
"^" : '\u0302', # circumflex
|
||||
"~" : '\u0303', # tilde
|
||||
"=" : '\u0304', # macron
|
||||
"u" : '\u0306', # breve
|
||||
"." : '\u0307', # dot above
|
||||
"\"": '\u0308', # diaeresis
|
||||
"r" : '\u030a', # ring above
|
||||
"H" : '\u030b', # double acute
|
||||
"v" : '\u030c', # caron
|
||||
"b" : '\u0320', # minus sign below
|
||||
"d" : '\u0323', # dot below
|
||||
"c" : '\u0327', # cedilla
|
||||
"k" : '\u0328', # ogonek
|
||||
"t" : '\u0361' # tie. This is special: It spans two characters, but
|
||||
# only one is given as argument, so we don't need to
|
||||
# treat it differently.
|
||||
}
|
||||
@ -1050,17 +1040,17 @@ accent_map = {
|
||||
|
||||
# special accents of InsetLaTeXAccent without argument
|
||||
special_accent_map = {
|
||||
'i' : u'\u0131', # dotless i
|
||||
'j' : u'\u0237', # dotless j
|
||||
'l' : u'\u0142', # l with stroke
|
||||
'L' : u'\u0141' # L with stroke
|
||||
'i' : '\u0131', # dotless i
|
||||
'j' : '\u0237', # dotless j
|
||||
'l' : '\u0142', # l with stroke
|
||||
'L' : '\u0141' # L with stroke
|
||||
}
|
||||
|
||||
|
||||
# special accent arguments of InsetLaTeXAccent
|
||||
accented_map = {
|
||||
'\\i' : u'\u0131', # dotless i
|
||||
'\\j' : u'\u0237' # dotless j
|
||||
'\\i' : '\u0131', # dotless i
|
||||
'\\j' : '\u0237' # dotless j
|
||||
}
|
||||
|
||||
|
||||
@ -1087,7 +1077,7 @@ def _convert_accent(accent, accented_char):
|
||||
return ''
|
||||
a = accent_map.get(type)
|
||||
if a:
|
||||
return unicodedata.normalize("NFC", "%s%s" % (char, a))
|
||||
return unicodedata.normalize("NFC", f"{char}{a}")
|
||||
return ''
|
||||
|
||||
|
||||
@ -1137,9 +1127,9 @@ def convert_accent(document):
|
||||
converted = _convert_accent(accent, accented_char)
|
||||
if converted == '':
|
||||
# Normalize contents
|
||||
contents = '%s{%s}' % (accent, accented_char),
|
||||
contents = f'{accent}{{{accented_char}}}',
|
||||
else:
|
||||
document.body[i] = '%s%s' % (prefix, converted)
|
||||
document.body[i] = f'{prefix}{converted}'
|
||||
i += 1
|
||||
continue
|
||||
document.warning("Converting unknown InsetLaTeXAccent `\\i %s' to ERT." % contents)
|
||||
@ -1221,7 +1211,7 @@ def revert_accent(document):
|
||||
try:
|
||||
document.body[i] = normalize("NFD", document.body[i])
|
||||
except TypeError:
|
||||
document.body[i] = normalize("NFD", text_type(document.body[i], 'utf-8'))
|
||||
document.body[i] = normalize("NFD", str(document.body[i], 'utf-8'))
|
||||
|
||||
# Replace accented characters with InsetLaTeXAccent
|
||||
# Do not convert characters that can be represented in the chosen
|
||||
@ -1284,7 +1274,7 @@ def revert_accent(document):
|
||||
# Delete the accented characters
|
||||
document.body[i] = document.body[i][:j-1]
|
||||
# Finally add the InsetLaTeXAccent
|
||||
document.body[i] += "\\i \\%s{%s}" % (inverse_accent_map[accent], accented_char)
|
||||
document.body[i] += f"\\i \\{inverse_accent_map[accent]}{{{accented_char}}}"
|
||||
break
|
||||
i = i + 1
|
||||
|
||||
@ -1385,17 +1375,17 @@ def normalize_font_whitespace(document, char_properties):
|
||||
for k in list(changes.keys()):
|
||||
# exclude property k because that is already in lines[i]
|
||||
if k != words[0]:
|
||||
added_lines[1:1] = ["%s %s" % (k, changes[k])]
|
||||
added_lines[1:1] = [f"{k} {changes[k]}"]
|
||||
for k in list(changes.keys()):
|
||||
# exclude property k because that must be added below anyway
|
||||
if k != words[0]:
|
||||
added_lines[0:0] = ["%s %s" % (k, char_properties[k])]
|
||||
added_lines[0:0] = [f"{k} {char_properties[k]}"]
|
||||
if defaultproperty:
|
||||
# Property is reset in lines[i], so add the new stuff afterwards
|
||||
lines[i+1:i+1] = added_lines
|
||||
else:
|
||||
# Reset property for the space
|
||||
added_lines[0:0] = ["%s %s" % (words[0], char_properties[words[0]])]
|
||||
added_lines[0:0] = [f"{words[0]} {char_properties[words[0]]}"]
|
||||
lines[i:i] = added_lines
|
||||
i = i + len(added_lines)
|
||||
|
||||
@ -1413,13 +1403,13 @@ def normalize_font_whitespace(document, char_properties):
|
||||
for k in list(changes.keys()):
|
||||
# exclude property k because that is already in lines[i]
|
||||
if k != words[0]:
|
||||
added_lines[1:1] = ["%s %s" % (k, changes[k])]
|
||||
added_lines[1:1] = [f"{k} {changes[k]}"]
|
||||
for k in list(changes.keys()):
|
||||
# exclude property k because that must be added below anyway
|
||||
if k != words[0]:
|
||||
added_lines[0:0] = ["%s %s" % (k, char_properties[k])]
|
||||
added_lines[0:0] = [f"{k} {char_properties[k]}"]
|
||||
# Reset property for the space
|
||||
added_lines[0:0] = ["%s %s" % (words[0], char_properties[words[0]])]
|
||||
added_lines[0:0] = [f"{words[0]} {char_properties[words[0]]}"]
|
||||
lines[i:i] = added_lines
|
||||
i = i + len(added_lines)
|
||||
|
||||
@ -1598,7 +1588,7 @@ def revert_graphics_rotation(document):
|
||||
document.body.insert(j-1, '\tspecial angle=%s' % rotateAngle)
|
||||
else:
|
||||
l = find_token(document.body, "\tspecial", i + 1, j)
|
||||
document.body[l] = document.body[l].replace(special, 'angle=%s,%s' % (rotateAngle, special))
|
||||
document.body[l] = document.body[l].replace(special, f'angle={rotateAngle},{special}')
|
||||
k = find_token(document.body, "\trotateAngle", i + 1, j)
|
||||
if k != -1:
|
||||
del document.body[k]
|
||||
@ -1819,7 +1809,7 @@ after label
|
||||
'',
|
||||
'',
|
||||
r'\backslash',
|
||||
'lstinline%s{%s}' % (params, inlinecode),
|
||||
f'lstinline{params}{{{inlinecode}}}',
|
||||
r'\end_layout',
|
||||
'',
|
||||
r'\end_inset']
|
||||
@ -1892,7 +1882,7 @@ lstinputlisting{file}[opt]
|
||||
'',
|
||||
'',
|
||||
r'\backslash',
|
||||
'%s%s{%s}' % (cmd, option, file),
|
||||
f'{cmd}{option}{{{file}}}',
|
||||
r'\end_layout',
|
||||
'',
|
||||
r'\end_inset']
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2007-2008 The LyX Team <lyx-devel@lists.lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of lyx2lyx
|
||||
# Copyright (C) 2011 The LyX team
|
||||
#
|
||||
@ -801,7 +800,7 @@ def revert_author_id(document):
|
||||
idmap[author_id] = anum
|
||||
name = m.group(3)
|
||||
email = m.group(4)
|
||||
document.header[i] = "\\author %s %s" % (name, email)
|
||||
document.header[i] = f"\\author {name} {email}"
|
||||
i += 1
|
||||
# FIXME Should this be incremented if we didn't match?
|
||||
anum += 1
|
||||
@ -1316,7 +1315,7 @@ def revert_fontcolor(document):
|
||||
insert_to_preamble(document,
|
||||
['% Set the font color',
|
||||
'\\@ifundefined{definecolor}{\\usepackage{color}}{}',
|
||||
'\\definecolor{document_fontcolor}{rgb}{%s,%s,%s}' % (red, green, blue),
|
||||
f'\\definecolor{{document_fontcolor}}{{rgb}}{{{red},{green},{blue}}}',
|
||||
'\\color{document_fontcolor}'])
|
||||
|
||||
|
||||
@ -1335,7 +1334,7 @@ def revert_shadedboxcolor(document):
|
||||
insert_to_preamble(document,
|
||||
['% Set the color of boxes with shaded background',
|
||||
'\\@ifundefined{definecolor}{\\usepackage{color}}{}',
|
||||
"\\definecolor{shadecolor}{rgb}{%s,%s,%s}" % (red, green, blue)])
|
||||
f"\\definecolor{{shadecolor}}{{rgb}}{{{red},{green},{blue}}}"])
|
||||
|
||||
|
||||
def revert_lyx_version(document):
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of lyx2lyx
|
||||
# Copyright (C) 2011 The LyX team
|
||||
#
|
||||
@ -585,7 +584,7 @@ def convert_use_packages(document):
|
||||
i = find_token(document.header, "\\use_%s" % p, 0)
|
||||
if i != -1:
|
||||
value = get_value(document.header, "\\use_%s" % p, i)
|
||||
document.header[i] = "\\use_package %s %s" % (p, value)
|
||||
document.header[i] = f"\\use_package {p} {value}"
|
||||
|
||||
|
||||
def revert_use_packages(document):
|
||||
@ -607,7 +606,7 @@ def revert_use_packages(document):
|
||||
if i != -1:
|
||||
value = get_value(document.header, "\\use_package %s" % p, i).split()[1]
|
||||
del document.header[i]
|
||||
document.header.insert(j, "\\use_%s %s" % (p, value))
|
||||
document.header.insert(j, f"\\use_{p} {value}")
|
||||
else:
|
||||
document.header.insert(j, "\\use_%s 1" % p)
|
||||
j += 1
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of lyx2lyx
|
||||
# Copyright (C) 2015 The LyX team
|
||||
#
|
||||
@ -159,7 +158,7 @@ def convert_separator(document):
|
||||
content = "\n".join(document.body[lay[1]:lay[2]])
|
||||
for val in list(sty_dict.keys()):
|
||||
if content.find("\\%s" % val) != -1:
|
||||
document.body[j:j] = ["\\%s %s" % (val, sty_dict[val])]
|
||||
document.body[j:j] = [f"\\{val} {sty_dict[val]}"]
|
||||
i = i + 1
|
||||
j = j + 1
|
||||
document.body[j:j] = parins
|
||||
@ -195,7 +194,7 @@ def convert_separator(document):
|
||||
content = "\n".join(document.body[lay[1]:lay[2]])
|
||||
for val in list(sty_dict.keys()):
|
||||
if content.find("\\%s" % val) != -1:
|
||||
document.body[j:j] = ["\\%s %s" % (val, sty_dict[val])]
|
||||
document.body[j:j] = [f"\\{val} {sty_dict[val]}"]
|
||||
i = i + 1
|
||||
j = j + 1
|
||||
document.body[j:j] = parins
|
||||
@ -1168,11 +1167,11 @@ def convert_origin(document):
|
||||
if i == -1:
|
||||
document.warning("Malformed LyX document: No \\textclass!!")
|
||||
return
|
||||
if document.dir == u'':
|
||||
origin = u'stdin'
|
||||
if document.dir == '':
|
||||
origin = 'stdin'
|
||||
else:
|
||||
relpath = u''
|
||||
if document.systemlyxdir and document.systemlyxdir != u'':
|
||||
relpath = ''
|
||||
if document.systemlyxdir and document.systemlyxdir != '':
|
||||
try:
|
||||
if os.path.isabs(document.dir):
|
||||
absdir = os.path.normpath(document.dir)
|
||||
@ -1183,14 +1182,14 @@ def convert_origin(document):
|
||||
else:
|
||||
abssys = os.path.normpath(os.path.abspath(document.systemlyxdir))
|
||||
relpath = os.path.relpath(absdir, abssys)
|
||||
if relpath.find(u'..') == 0:
|
||||
relpath = u''
|
||||
if relpath.find('..') == 0:
|
||||
relpath = ''
|
||||
except:
|
||||
relpath = u''
|
||||
if relpath == u'':
|
||||
origin = document.dir.replace(u'\\', u'/') + u'/'
|
||||
relpath = ''
|
||||
if relpath == '':
|
||||
origin = document.dir.replace('\\', '/') + '/'
|
||||
else:
|
||||
origin = os.path.join(u"/systemlyxdir", relpath).replace(u'\\', u'/') + u'/'
|
||||
origin = os.path.join("/systemlyxdir", relpath).replace('\\', '/') + '/'
|
||||
document.header[i:i] = ["\\origin " + origin]
|
||||
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of lyx2lyx
|
||||
# Copyright (C) 2016 The LyX team
|
||||
#
|
||||
@ -685,13 +684,13 @@ def revert_cjkquotes(document):
|
||||
if val[1] == "l":
|
||||
# inner opening mark
|
||||
if cjk:
|
||||
replace = [u"\u300E"]
|
||||
replace = ["\u300E"]
|
||||
else:
|
||||
replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
|
||||
else:
|
||||
# inner closing mark
|
||||
if cjk:
|
||||
replace = [u"\u300F"]
|
||||
replace = ["\u300F"]
|
||||
else:
|
||||
replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
|
||||
else:
|
||||
@ -699,13 +698,13 @@ def revert_cjkquotes(document):
|
||||
if val[1] == "l":
|
||||
# outer opening mark
|
||||
if cjk:
|
||||
replace = [u"\u300C"]
|
||||
replace = ["\u300C"]
|
||||
else:
|
||||
replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
|
||||
else:
|
||||
# outer closing mark
|
||||
if cjk:
|
||||
replace = [u"\u300D"]
|
||||
replace = ["\u300D"]
|
||||
else:
|
||||
replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
|
||||
|
||||
@ -737,13 +736,13 @@ def revert_cjkquotes(document):
|
||||
if val[1] == "l":
|
||||
# inner opening mark
|
||||
if cjk:
|
||||
replace = [u"\u3008"]
|
||||
replace = ["\u3008"]
|
||||
else:
|
||||
replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
|
||||
else:
|
||||
# inner closing mark
|
||||
if cjk:
|
||||
replace = [u"\u3009"]
|
||||
replace = ["\u3009"]
|
||||
else:
|
||||
replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
|
||||
else:
|
||||
@ -751,13 +750,13 @@ def revert_cjkquotes(document):
|
||||
if val[1] == "l":
|
||||
# outer opening mark
|
||||
if cjk:
|
||||
replace = [u"\u300A"]
|
||||
replace = ["\u300A"]
|
||||
else:
|
||||
replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
|
||||
else:
|
||||
# outer closing mark
|
||||
if cjk:
|
||||
replace = [u"\u300B"]
|
||||
replace = ["\u300B"]
|
||||
else:
|
||||
replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
|
||||
|
||||
@ -1591,7 +1590,7 @@ def convert_dashligatures(document):
|
||||
# or "\threehyphens\n" as interim representation for -- an ---.)
|
||||
lines = document.body
|
||||
has_literal_dashes = has_ligature_dashes = False
|
||||
dash_pattern = re.compile(u".*[\u2013\u2014]|\\twohyphens|\\threehyphens")
|
||||
dash_pattern = re.compile(".*[\u2013\u2014]|\\twohyphens|\\threehyphens")
|
||||
i = j = 0
|
||||
while True:
|
||||
# skip lines without dashes:
|
||||
@ -1627,12 +1626,12 @@ def convert_dashligatures(document):
|
||||
continue
|
||||
|
||||
# literal dash followed by a non-white-character or no-break space:
|
||||
if re.search(u"[\u2013\u2014]([\\S\u00A0\u202F\u2060]|$)",
|
||||
if re.search("[\u2013\u2014]([\\S\u00A0\u202F\u2060]|$)",
|
||||
line, flags=re.UNICODE):
|
||||
has_literal_dashes = True
|
||||
# ligature dash followed by non-white-char or no-break space on next line:
|
||||
if (re.search(r"(\\twohyphens|\\threehyphens)", line) and
|
||||
re.match(u"[\\S\u00A0\u202F\u2060]", lines[i+1], flags=re.UNICODE)):
|
||||
re.match("[\\S\u00A0\u202F\u2060]", lines[i+1], flags=re.UNICODE)):
|
||||
has_ligature_dashes = True
|
||||
if has_literal_dashes and has_ligature_dashes:
|
||||
# TODO: insert a warning note in the document?
|
||||
@ -1661,7 +1660,7 @@ def revert_dashligatures(document):
|
||||
if use_dash_ligatures != "true" or document.backend != "latex":
|
||||
return
|
||||
i = 0
|
||||
dash_pattern = re.compile(u".*[\u2013\u2014]")
|
||||
dash_pattern = re.compile(".*[\u2013\u2014]")
|
||||
while True:
|
||||
# skip lines without dashes:
|
||||
i = find_re(document.body, dash_pattern, i+1)
|
||||
@ -1692,8 +1691,8 @@ def revert_dashligatures(document):
|
||||
i = end
|
||||
continue
|
||||
# TODO: skip replacement in typewriter fonts
|
||||
line = line.replace(u'\u2013', '\\twohyphens\n')
|
||||
line = line.replace(u'\u2014', '\\threehyphens\n')
|
||||
line = line.replace('\u2013', '\\twohyphens\n')
|
||||
line = line.replace('\u2014', '\\threehyphens\n')
|
||||
document.body[i:i+1] = line.split('\n')
|
||||
# redefine the dash LICRs to use ligature dashes:
|
||||
add_to_preamble(document, [r'\renewcommand{\textendash}{--}',
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of lyx2lyx
|
||||
# Copyright (C) 2018 The LyX team
|
||||
#
|
||||
@ -31,7 +30,7 @@ from parser_tools import (count_pars_in_inset, del_complete_lines, del_token,
|
||||
find_token_backwards, find_token_exact, find_re, get_bool_value,
|
||||
get_containing_inset, get_containing_layout, get_option_value, get_value,
|
||||
get_quoted_value, is_in_inset)
|
||||
# del_value,
|
||||
# del_value,
|
||||
# find_complete_lines,
|
||||
# find_re, find_substring,
|
||||
# set_bool_value
|
||||
@ -54,7 +53,7 @@ def add_preamble_fonts(document, fontmap):
|
||||
xoption = "[" + ",".join(fontmap[pkg]) + "]"
|
||||
else:
|
||||
xoption = ""
|
||||
preamble = "\\usepackage%s{%s}" % (xoption, pkg)
|
||||
preamble = f"\\usepackage{xoption}{{{pkg}}}"
|
||||
add_to_preamble(document, [preamble])
|
||||
|
||||
|
||||
@ -1247,10 +1246,6 @@ def revert_dateinfo(document):
|
||||
fmt = re.sub('[^\'%]d', '%d', fmt)
|
||||
fmt = fmt.replace("'", "")
|
||||
result = dte.strftime(fmt)
|
||||
if sys.version_info < (3,0):
|
||||
# In Python 2, datetime module works with binary strings,
|
||||
# our dateformat strings are utf8-encoded:
|
||||
result = result.decode('utf-8')
|
||||
document.body[i : j+1] = [result]
|
||||
|
||||
|
||||
@ -1548,7 +1543,7 @@ def convert_hebrew_parentheses(document):
|
||||
'PlainFrame':['1', '2'], 'FragileFrame':['1', '2'],
|
||||
'FrameTitle':['1'], 'FrameSubtitle':['1'],
|
||||
'Overprint':['item:1'],
|
||||
'Uncover':['1'], 'Only':['1'], 'Block':['1'],
|
||||
'Uncover':['1'], 'Only':['1'], 'Block':['1'],
|
||||
'ExampleBlock':['1'], 'AlertBlock':['1'],
|
||||
'Quotation':['1'], 'Quote':['1'],
|
||||
'Verse':['1'], 'Corollary':['1'],
|
||||
@ -1565,26 +1560,26 @@ def convert_hebrew_parentheses(document):
|
||||
'Flex Visible':['1'], 'Flex Invisible':['1'],
|
||||
'Flex Alternative':['1'], 'Flex Beamer Note':['1']
|
||||
})
|
||||
elif document.textclass == 'europecv':
|
||||
elif document.textclass == 'europecv':
|
||||
skip_layouts_arguments.update({
|
||||
'Picture':['1'], 'Item':['1'],
|
||||
'MotherTongue':['1']
|
||||
})
|
||||
elif document.textclass in ['acmsiggraph', 'acmsiggraph-0-92']:
|
||||
skip_insets_arguments.update({'Flex CRcat':['1', '2', '3']})
|
||||
})
|
||||
elif document.textclass in ['acmsiggraph', 'acmsiggraph-0-92']:
|
||||
skip_insets_arguments.update({'Flex CRcat':['1', '2', '3']})
|
||||
elif document.textclass in ['aastex', 'aastex6', 'aastex62']:
|
||||
skip_layouts_arguments.update({'Altaffilation':['1'],})
|
||||
elif document.textclass == 'jss':
|
||||
skip_layouts_arguments.update({'Altaffilation':['1'],})
|
||||
elif document.textclass == 'jss':
|
||||
skip_insets.append('Flex Code Chunk')
|
||||
elif document.textclass == 'moderncv':
|
||||
skip_layouts_arguments.update({'Photo':['1', '2'],})
|
||||
skip_insets_arguments.update({'Flex Column':['1']})
|
||||
elif document.textclass == 'agutex':
|
||||
skip_layouts_arguments.update({'Author affiliation':['1']})
|
||||
elif document.textclass in ['ijmpd', 'ijmpc']:
|
||||
skip_layouts_arguments.update({'RomanList':['1']})
|
||||
skip_insets_arguments.update({'Flex Column':['1']})
|
||||
elif document.textclass == 'agutex':
|
||||
skip_layouts_arguments.update({'Author affiliation':['1']})
|
||||
elif document.textclass in ['ijmpd', 'ijmpc']:
|
||||
skip_layouts_arguments.update({'RomanList':['1']})
|
||||
elif document.textclass in ['jlreq-book', 'jlreq-report', 'jlreq-article']:
|
||||
skip_insets.append('Flex Warichu*')
|
||||
skip_insets.append('Flex Warichu*')
|
||||
# pathru insets per module
|
||||
if 'hpstatement' in document.get_module_list():
|
||||
skip_insets.append('Flex H-P number')
|
||||
@ -1604,11 +1599,11 @@ def convert_hebrew_parentheses(document):
|
||||
skip_insets.append('Flex Mainline')
|
||||
skip_layouts_arguments.update({'NewChessGame':['1']})
|
||||
skip_insets_arguments.update({'Flex ChessBoard':['1']})
|
||||
if 'lilypond' in document.get_module_list():
|
||||
if 'lilypond' in document.get_module_list():
|
||||
skip_insets.append('Flex LilyPond')
|
||||
if 'noweb' in document.get_module_list():
|
||||
if 'noweb' in document.get_module_list():
|
||||
skip_insets.append('Flex Chunk')
|
||||
if 'multicol' in document.get_module_list():
|
||||
if 'multicol' in document.get_module_list():
|
||||
skip_insets_arguments.update({'Flex Multiple Columns':['1']})
|
||||
i = 0
|
||||
inset_is_arg = False
|
||||
@ -3865,23 +3860,23 @@ def revert_counter_inset(document):
|
||||
if not val:
|
||||
document.warning("Can't convert counter inset at line %d!" % i)
|
||||
else:
|
||||
ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
|
||||
ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{{val}}}")
|
||||
elif cmd == "addto":
|
||||
val = get_quoted_value(document.body, "value", i, j)
|
||||
if not val:
|
||||
document.warning("Can't convert counter inset at line %d!" % i)
|
||||
else:
|
||||
ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
|
||||
ert = put_cmd_in_ert(f"\\addtocounter{{{cnt}}}{{{val}}}")
|
||||
elif cmd == "reset":
|
||||
ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
|
||||
elif cmd == "save":
|
||||
needed_counters[cnt] = 1
|
||||
savecnt = "LyXSave" + cnt
|
||||
ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
|
||||
ert = put_cmd_in_ert(f"\\setcounter{{{savecnt}}}{{\\value{{{cnt}}}}}")
|
||||
elif cmd == "restore":
|
||||
needed_counters[cnt] = 1
|
||||
savecnt = "LyXSave" + cnt
|
||||
ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
|
||||
ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{\\value{{{savecnt}}}}}")
|
||||
else:
|
||||
document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
|
||||
|
||||
@ -4080,7 +4075,7 @@ def revert_nopagebreak(document):
|
||||
|
||||
def revert_hrquotes(document):
|
||||
" Revert Hungarian Quotation marks "
|
||||
|
||||
|
||||
i = find_token(document.header, "\\quotes_style hungarian", 0)
|
||||
if i != -1:
|
||||
document.header[i] = "\\quotes_style polish"
|
||||
@ -4114,7 +4109,7 @@ def convert_math_refs(document):
|
||||
while i < j:
|
||||
document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
|
||||
i += 1
|
||||
|
||||
|
||||
|
||||
def revert_math_refs(document):
|
||||
i = 0
|
||||
@ -4425,12 +4420,12 @@ def convert_vcolumns2(document):
|
||||
eertins = get_containing_inset(document.body, ecvw)
|
||||
if eertins and eertins[0] == "ERT":
|
||||
del document.body[eertins[1] : eertins[2] + 1]
|
||||
|
||||
cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
|
||||
|
||||
cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
|
||||
ertins = get_containing_inset(document.body, cvw)
|
||||
if ertins and ertins[0] == "ERT":
|
||||
del(document.body[ertins[1] : ertins[2] + 1])
|
||||
|
||||
|
||||
# Convert ERT newlines (as cellvarwidth detection relies on that)
|
||||
while True:
|
||||
endcell = find_token(document.body, "</cell>", begcell)
|
||||
@ -4660,7 +4655,7 @@ def revert_index_macros(document):
|
||||
document.body[ple:ple] = put_cmd_in_ert("!") + subentry
|
||||
if len(sortkey) > 0:
|
||||
document.body[pl:pl+1] = document.body[pl:pl] + sortkey + put_cmd_in_ert("@")
|
||||
|
||||
|
||||
|
||||
def revert_starred_refs(document):
|
||||
" Revert starred refs "
|
||||
@ -4750,16 +4745,16 @@ def revert_familydefault(document):
|
||||
|
||||
if find_token(document.header, "\\use_non_tex_fonts true", 0) == -1:
|
||||
return
|
||||
|
||||
|
||||
i = find_token(document.header, "\\font_default_family", 0)
|
||||
if i == -1:
|
||||
document.warning("Malformed LyX document: Can't find \\font_default_family header")
|
||||
return
|
||||
|
||||
|
||||
dfamily = get_value(document.header, "\\font_default_family", i)
|
||||
if dfamily == "default":
|
||||
return
|
||||
|
||||
|
||||
document.header[i] = "\\font_default_family default"
|
||||
add_to_preamble(document, ["\\renewcommand{\\familydefault}{\\" + dfamily + "}"])
|
||||
|
||||
@ -5277,7 +5272,7 @@ def revert_linggloss2(document):
|
||||
del document.body[arg - 1 : endarg + 4]
|
||||
else:
|
||||
del document.body[arg : endarg + 1]
|
||||
|
||||
|
||||
arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
|
||||
endarg = find_end_of_inset(document.body, arg)
|
||||
marg4content = []
|
||||
@ -5294,7 +5289,7 @@ def revert_linggloss2(document):
|
||||
del document.body[arg - 1 : endarg + 4]
|
||||
else:
|
||||
del document.body[arg : endarg + 1]
|
||||
|
||||
|
||||
arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
|
||||
endarg = find_end_of_inset(document.body, arg)
|
||||
marg5content = []
|
||||
@ -5311,7 +5306,7 @@ def revert_linggloss2(document):
|
||||
del document.body[arg - 1 : endarg + 4]
|
||||
else:
|
||||
del document.body[arg : endarg + 1]
|
||||
|
||||
|
||||
arg = find_token(document.body, "\\begin_inset Argument post:6", i, j)
|
||||
endarg = find_end_of_inset(document.body, arg)
|
||||
marg6content = []
|
||||
@ -5340,7 +5335,7 @@ def revert_linggloss2(document):
|
||||
if len(optargcontent) > 0:
|
||||
precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
|
||||
precontent += put_cmd_in_ert("{")
|
||||
|
||||
|
||||
postcontent = put_cmd_in_ert("}")
|
||||
if len(marg1content) > 0:
|
||||
postcontent += put_cmd_in_ert("[") + marg1content + put_cmd_in_ert("]")
|
||||
@ -5369,7 +5364,7 @@ def revert_exarg2(document):
|
||||
return
|
||||
|
||||
cov_req = False
|
||||
|
||||
|
||||
layouts = ["Numbered Example", "Subexample"]
|
||||
|
||||
for layout in layouts:
|
||||
@ -5484,7 +5479,7 @@ def revert_exarg2(document):
|
||||
envname = "examples"
|
||||
elif subexpl:
|
||||
envname = "subexamples"
|
||||
|
||||
|
||||
cmd = put_cmd_in_ert("\\begin{" + envname + "}[" + optargcontent + "]")
|
||||
|
||||
# re-find end of layout
|
||||
@ -5635,10 +5630,10 @@ def revert_expreambles(document):
|
||||
revert_flex_inset(document.body, "Subexample Preamble", "\\subexpreamble")
|
||||
revert_flex_inset(document.body, "Example Postamble", "\\expostamble")
|
||||
revert_flex_inset(document.body, "Subexample Postamble", "\\subexpostamble")
|
||||
|
||||
|
||||
def revert_hequotes(document):
|
||||
" Revert Hebrew Quotation marks "
|
||||
|
||||
|
||||
i = find_token(document.header, "\\quotes_style hebrew", 0)
|
||||
if i != -1:
|
||||
document.header[i] = "\\quotes_style english"
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of lyx2lyx
|
||||
# Copyright (C) 2024 The LyX team
|
||||
#
|
||||
@ -31,7 +30,7 @@ from parser_tools import (find_end_of_inset, find_end_of_layout, find_token, fin
|
||||
# find_token_backwards, find_token_exact, get_bool_value,
|
||||
# get_containing_inset, get_containing_layout, get_option_value,
|
||||
# get_quoted_value, is_in_inset,
|
||||
# del_value,
|
||||
# del_value,
|
||||
# find_complete_lines,
|
||||
# find_re, find_substring,
|
||||
# set_bool_value
|
||||
@ -83,7 +82,7 @@ def convert_url_escapes(document):
|
||||
if document.body[surl - 1] == "\\backslash":
|
||||
del document.body[surl - 1]
|
||||
i = surl
|
||||
|
||||
|
||||
|
||||
def revert_url_escapes(document):
|
||||
"""Unescape # and % in URLs with hyperref."""
|
||||
@ -121,7 +120,7 @@ def convert_url_escapes2(document):
|
||||
"""Unescape backslashes in URLs with hyperref."""
|
||||
|
||||
i = find_token(document.header, "\\use_hyperref true", 0)
|
||||
|
||||
|
||||
if i == -1 and document.textclass not in ['beamer', 'scrarticle-beamer', 'beamerposter', 'article-beamer']:
|
||||
return
|
||||
|
||||
@ -147,7 +146,7 @@ def revert_url_escapes2(document):
|
||||
"""Escape backslashes in URLs with hyperref."""
|
||||
|
||||
i = find_token(document.header, "\\use_hyperref true", 0)
|
||||
|
||||
|
||||
if i == -1 and document.textclass not in ['beamer', 'scrarticle-beamer', 'beamerposter', 'article-beamer']:
|
||||
return
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2011 Dekel Tsur <dekel@lyx.org>,
|
||||
# José Matos <jamatos@lyx.org>, Richard Kimberly Heck <rikiheck@lyx.org>
|
||||
#
|
||||
@ -464,7 +463,7 @@ def set_bool_value(lines, token, value, start=0, end=0):
|
||||
if get_quoted_value(lines, token, i, i+1) in ('0', '1'):
|
||||
lines[i] = "%s %d" % (token, value)
|
||||
else:
|
||||
lines[i] = "%s %s" % (token, str(value).lower())
|
||||
lines[i] = f"{token} {str(value).lower()}"
|
||||
|
||||
return oldvalue
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
#! /usr/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2018 The LyX team
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
@ -25,47 +24,47 @@ import unittest
|
||||
class TestParserTools(unittest.TestCase):
|
||||
|
||||
def test_put_cmd_in_ert(self):
|
||||
ert = [u'\\begin_inset ERT',
|
||||
u'status collapsed',
|
||||
u'',
|
||||
u'\\begin_layout Plain Layout',
|
||||
u'',
|
||||
u'',
|
||||
u'\\backslash',
|
||||
u'texttt{Gr',
|
||||
u'\\backslash',
|
||||
u'"{u}',
|
||||
u'\\backslash',
|
||||
u'ss{}e}',
|
||||
u'\\end_layout',
|
||||
u'',
|
||||
u'\\end_inset']
|
||||
ert = ['\\begin_inset ERT',
|
||||
'status collapsed',
|
||||
'',
|
||||
'\\begin_layout Plain Layout',
|
||||
'',
|
||||
'',
|
||||
'\\backslash',
|
||||
'texttt{Gr',
|
||||
'\\backslash',
|
||||
'"{u}',
|
||||
'\\backslash',
|
||||
'ss{}e}',
|
||||
'\\end_layout',
|
||||
'',
|
||||
'\\end_inset']
|
||||
ert_open = ert[:]
|
||||
ert_open[1] = u'status open'
|
||||
ert_open[1] = 'status open'
|
||||
ert_paragraph = ["\\begin_layout Standard",
|
||||
u'\\begin_inset ERT',
|
||||
u'status collapsed',
|
||||
u'',
|
||||
u'\\begin_layout Plain Layout',
|
||||
u'',
|
||||
u'',
|
||||
u'\\backslash',
|
||||
u'texttt{Gr',
|
||||
u'\\backslash',
|
||||
u'"{u}',
|
||||
u'\\backslash',
|
||||
u'ss{}e}',
|
||||
u'\\end_layout',
|
||||
u'',
|
||||
u'\\end_inset',
|
||||
u'',
|
||||
u'',
|
||||
u'\\end_layout',
|
||||
u'']
|
||||
'\\begin_inset ERT',
|
||||
'status collapsed',
|
||||
'',
|
||||
'\\begin_layout Plain Layout',
|
||||
'',
|
||||
'',
|
||||
'\\backslash',
|
||||
'texttt{Gr',
|
||||
'\\backslash',
|
||||
'"{u}',
|
||||
'\\backslash',
|
||||
'ss{}e}',
|
||||
'\\end_layout',
|
||||
'',
|
||||
'\\end_inset',
|
||||
'',
|
||||
'',
|
||||
'\\end_layout',
|
||||
'']
|
||||
self.assertEqual(put_cmd_in_ert("\\texttt{Grüße}"), ert)
|
||||
self.assertEqual(put_cmd_in_ert([u"\\texttt{Grüße}"]), ert)
|
||||
self.assertEqual(put_cmd_in_ert(u"\\texttt{Grüße}", is_open=True), ert_open)
|
||||
self.assertEqual(put_cmd_in_ert(u"\\texttt{Grüße}", as_paragraph=True), ert_paragraph)
|
||||
self.assertEqual(put_cmd_in_ert(["\\texttt{Grüße}"]), ert)
|
||||
self.assertEqual(put_cmd_in_ert("\\texttt{Grüße}", is_open=True), ert_open)
|
||||
self.assertEqual(put_cmd_in_ert("\\texttt{Grüße}", as_paragraph=True), ert_paragraph)
|
||||
|
||||
def test_latex_length(self):
|
||||
self.assertEqual(latex_length("-30.5col%"), (True, "-0.305\\columnwidth"))
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
|
@ -1,5 +1,4 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2011 The LyX team
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
@ -20,11 +19,6 @@
|
||||
|
||||
import sys, os, re, codecs
|
||||
|
||||
# Provide support for both python 2 and 3
|
||||
PY2 = sys.version_info[0] == 2
|
||||
if not PY2:
|
||||
unichr = chr
|
||||
# End of code to support for both python 2 and 3
|
||||
|
||||
def read_unicodesymbols():
|
||||
" Read the unicodesymbols list of unicode characters and corresponding commands."
|
||||
@ -60,7 +54,7 @@ def read_unicodesymbols():
|
||||
[ucs4,command,dead] = line.split(None,2)
|
||||
if command[0:1] != "\\":
|
||||
continue
|
||||
literal_char = unichr(int(ucs4, 16))
|
||||
literal_char = chr(int(ucs4, 16))
|
||||
if (line.find("notermination=text") < 0 and
|
||||
line.find("notermination=both") < 0 and command[-1] != "}"):
|
||||
command = command + "{}"
|
||||
@ -80,4 +74,3 @@ def read_unicodesymbols():
|
||||
|
||||
|
||||
unicode_reps = read_unicodesymbols()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user