mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-11-22 01:59:02 +00:00
Reformat lyx2lyx code using ruff
This commit is contained in:
parent
aaef6d2693
commit
b4db3ea137
@ -17,10 +17,16 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
" The LyX module has all the rules related with different lyx file formats."
|
"The LyX module has all the rules related with different lyx file formats."
|
||||||
|
|
||||||
from parser_tools import (get_value, check_token, find_token, find_tokens,
|
from parser_tools import (
|
||||||
find_end_of, find_complete_lines)
|
get_value,
|
||||||
|
check_token,
|
||||||
|
find_token,
|
||||||
|
find_tokens,
|
||||||
|
find_end_of,
|
||||||
|
find_complete_lines,
|
||||||
|
)
|
||||||
import os.path
|
import os.path
|
||||||
import gzip
|
import gzip
|
||||||
import locale
|
import locale
|
||||||
@ -32,10 +38,11 @@ import codecs
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
import lyx2lyx_version
|
import lyx2lyx_version
|
||||||
|
|
||||||
version__ = lyx2lyx_version.version
|
version__ = lyx2lyx_version.version
|
||||||
stable_version = True
|
stable_version = True
|
||||||
except: # we are running from build directory so assume the last version
|
except: # we are running from build directory so assume the last version
|
||||||
version__ = '2.5'
|
version__ = "2.5"
|
||||||
stable_version = False
|
stable_version = False
|
||||||
|
|
||||||
default_debug__ = 2
|
default_debug__ = 2
|
||||||
@ -44,12 +51,14 @@ default_debug__ = 2
|
|||||||
####################################################################
|
####################################################################
|
||||||
# Private helper functions
|
# Private helper functions
|
||||||
|
|
||||||
|
|
||||||
def find_end_of_inset(lines, i):
|
def find_end_of_inset(lines, i):
|
||||||
" Find beginning of inset, where lines[i] is included."
|
"Find beginning of inset, where lines[i] is included."
|
||||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||||
|
|
||||||
|
|
||||||
def minor_versions(major, last_minor_version):
|
def minor_versions(major, last_minor_version):
|
||||||
""" Generate minor versions, using major as prefix and minor
|
"""Generate minor versions, using major as prefix and minor
|
||||||
versions from 0 until last_minor_version, plus the generic version.
|
versions from 0 until last_minor_version, plus the generic version.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
@ -73,28 +82,29 @@ original_tex2lyx_version = re.compile(b".*?tex2lyx ([\\d.]*)")
|
|||||||
##
|
##
|
||||||
# file format information:
|
# file format information:
|
||||||
# file, supported formats, stable release versions
|
# file, supported formats, stable release versions
|
||||||
format_relation = [("0_06", [200], minor_versions("0.6" , 4)),
|
format_relation = [
|
||||||
("0_08", [210], minor_versions("0.8" , 6) + ["0.7"]),
|
("0_06", [200], minor_versions("0.6", 4)),
|
||||||
("0_10", [210], minor_versions("0.10", 7) + ["0.9"]),
|
("0_08", [210], minor_versions("0.8", 6) + ["0.7"]),
|
||||||
("0_12", [215], minor_versions("0.12", 1) + ["0.11"]),
|
("0_10", [210], minor_versions("0.10", 7) + ["0.9"]),
|
||||||
("1_0", [215], minor_versions("1.0" , 4)),
|
("0_12", [215], minor_versions("0.12", 1) + ["0.11"]),
|
||||||
("1_1", [215], minor_versions("1.1" , 4)),
|
("1_0", [215], minor_versions("1.0", 4)),
|
||||||
("1_1_5", [216], ["1.1", "1.1.5","1.1.5.1","1.1.5.2"]),
|
("1_1", [215], minor_versions("1.1", 4)),
|
||||||
("1_1_6_0", [217], ["1.1", "1.1.6","1.1.6.1","1.1.6.2"]),
|
("1_1_5", [216], ["1.1", "1.1.5", "1.1.5.1", "1.1.5.2"]),
|
||||||
("1_1_6_3", [218], ["1.1", "1.1.6.3","1.1.6.4"]),
|
("1_1_6_0", [217], ["1.1", "1.1.6", "1.1.6.1", "1.1.6.2"]),
|
||||||
("1_2", [220], minor_versions("1.2" , 4)),
|
("1_1_6_3", [218], ["1.1", "1.1.6.3", "1.1.6.4"]),
|
||||||
("1_3", [221], minor_versions("1.3" , 7)),
|
("1_2", [220], minor_versions("1.2", 4)),
|
||||||
# Note that range(i,j) is up to j *excluded*.
|
("1_3", [221], minor_versions("1.3", 7)),
|
||||||
("1_4", list(range(222,246)), minor_versions("1.4" , 5)),
|
# Note that range(i,j) is up to j *excluded*.
|
||||||
("1_5", list(range(246,277)), minor_versions("1.5" , 7)),
|
("1_4", list(range(222, 246)), minor_versions("1.4", 5)),
|
||||||
("1_6", list(range(277,346)), minor_versions("1.6" , 10)),
|
("1_5", list(range(246, 277)), minor_versions("1.5", 7)),
|
||||||
("2_0", list(range(346,414)), minor_versions("2.0" , 8)),
|
("1_6", list(range(277, 346)), minor_versions("1.6", 10)),
|
||||||
("2_1", list(range(414,475)), minor_versions("2.1" , 5)),
|
("2_0", list(range(346, 414)), minor_versions("2.0", 8)),
|
||||||
("2_2", list(range(475,509)), minor_versions("2.2" , 4)),
|
("2_1", list(range(414, 475)), minor_versions("2.1", 5)),
|
||||||
("2_3", list(range(509,545)), minor_versions("2.3" , 7)),
|
("2_2", list(range(475, 509)), minor_versions("2.2", 4)),
|
||||||
("2_4", list(range(545,621)), minor_versions("2.4" , 0)),
|
("2_3", list(range(509, 545)), minor_versions("2.3", 7)),
|
||||||
("2_5", (), minor_versions("2.5" , 0))
|
("2_4", list(range(545, 621)), minor_versions("2.4", 0)),
|
||||||
]
|
("2_5", (), minor_versions("2.5", 0)),
|
||||||
|
]
|
||||||
|
|
||||||
####################################################################
|
####################################################################
|
||||||
# This is useful just for development versions #
|
# This is useful just for development versions #
|
||||||
@ -102,14 +112,13 @@ format_relation = [("0_06", [200], minor_versions("0.6" , 4)),
|
|||||||
if not format_relation[-1][1]:
|
if not format_relation[-1][1]:
|
||||||
step, mode = format_relation[-1][0], "convert"
|
step, mode = format_relation[-1][0], "convert"
|
||||||
convert = getattr(__import__("lyx_" + step), mode)
|
convert = getattr(__import__("lyx_" + step), mode)
|
||||||
format_relation[-1] = (step,
|
format_relation[-1] = (step, [conv[0] for conv in convert], format_relation[-1][2])
|
||||||
[conv[0] for conv in convert],
|
|
||||||
format_relation[-1][2])
|
|
||||||
# #
|
# #
|
||||||
####################################################################
|
####################################################################
|
||||||
|
|
||||||
|
|
||||||
def formats_list():
|
def formats_list():
|
||||||
" Returns a list with supported file formats."
|
"Returns a list with supported file formats."
|
||||||
formats = []
|
formats = []
|
||||||
for version in format_relation:
|
for version in format_relation:
|
||||||
for format in version[1]:
|
for format in version[1]:
|
||||||
@ -119,7 +128,7 @@ def formats_list():
|
|||||||
|
|
||||||
|
|
||||||
def format_info():
|
def format_info():
|
||||||
" Returns a list with the supported file formats."
|
"Returns a list with the supported file formats."
|
||||||
template = """
|
template = """
|
||||||
%s\tstable format: %s
|
%s\tstable format: %s
|
||||||
\tstable versions: %s
|
\tstable versions: %s
|
||||||
@ -142,20 +151,20 @@ def format_info():
|
|||||||
stable_format = str(version[1][-1])
|
stable_format = str(version[1][-1])
|
||||||
|
|
||||||
out += template % (major, stable_format, versions, formats)
|
out += template % (major, stable_format, versions, formats)
|
||||||
return out + '\n'
|
return out + "\n"
|
||||||
|
|
||||||
|
|
||||||
def get_end_format():
|
def get_end_format():
|
||||||
" Returns the more recent file format available."
|
"Returns the more recent file format available."
|
||||||
# this check will fail only when we have a new version
|
# this check will fail only when we have a new version
|
||||||
# and there is no format change yet.
|
# and there is no format change yet.
|
||||||
if format_relation[-1][1]:
|
if format_relation[-1][1]:
|
||||||
return format_relation[-1][1][-1]
|
return format_relation[-1][1][-1]
|
||||||
return format_relation[-2][1][-1]
|
return format_relation[-2][1][-1]
|
||||||
|
|
||||||
|
|
||||||
def get_backend(textclass):
|
def get_backend(textclass):
|
||||||
" For _textclass_ returns its backend."
|
"For _textclass_ returns its backend."
|
||||||
if textclass == "linuxdoc" or textclass == "manpage":
|
if textclass == "linuxdoc" or textclass == "manpage":
|
||||||
return "linuxdoc"
|
return "linuxdoc"
|
||||||
if textclass.startswith("docbook") or textclass.startswith("agu-"):
|
if textclass.startswith("docbook") or textclass.startswith("agu-"):
|
||||||
@ -164,18 +173,18 @@ def get_backend(textclass):
|
|||||||
|
|
||||||
|
|
||||||
def trim_eol(line):
|
def trim_eol(line):
|
||||||
" Remove end of line char(s)."
|
"Remove end of line char(s)."
|
||||||
if line[-1] != '\n' and line[-1] != '\r':
|
if line[-1] != "\n" and line[-1] != "\r":
|
||||||
# May happen for the last line of a document
|
# May happen for the last line of a document
|
||||||
return line
|
return line
|
||||||
if line[-2:-1] == '\r':
|
if line[-2:-1] == "\r":
|
||||||
return line[:-2]
|
return line[:-2]
|
||||||
else:
|
else:
|
||||||
return line[:-1]
|
return line[:-1]
|
||||||
|
|
||||||
|
|
||||||
def trim_eol_binary(line):
|
def trim_eol_binary(line):
|
||||||
" Remove end of line char(s)."
|
"Remove end of line char(s)."
|
||||||
if line[-1] != 10 and line[-1] != 13:
|
if line[-1] != 10 and line[-1] != 13:
|
||||||
# May happen for the last line of a document
|
# May happen for the last line of a document
|
||||||
return line
|
return line
|
||||||
@ -186,18 +195,19 @@ def trim_eol_binary(line):
|
|||||||
|
|
||||||
|
|
||||||
def get_encoding(language, inputencoding, format, cjk_encoding):
|
def get_encoding(language, inputencoding, format, cjk_encoding):
|
||||||
" Returns enconding of the lyx file"
|
"Returns enconding of the lyx file"
|
||||||
if format > 248:
|
if format > 248:
|
||||||
return "utf8"
|
return "utf8"
|
||||||
# CJK-LyX encodes files using the current locale encoding.
|
# CJK-LyX encodes files using the current locale encoding.
|
||||||
# This means that files created by CJK-LyX can only be converted using
|
# This means that files created by CJK-LyX can only be converted using
|
||||||
# the correct locale settings unless the encoding is given as commandline
|
# the correct locale settings unless the encoding is given as commandline
|
||||||
# argument.
|
# argument.
|
||||||
if cjk_encoding == 'auto':
|
if cjk_encoding == "auto":
|
||||||
return locale.getpreferredencoding()
|
return locale.getpreferredencoding()
|
||||||
elif cjk_encoding:
|
elif cjk_encoding:
|
||||||
return cjk_encoding
|
return cjk_encoding
|
||||||
from lyx2lyx_lang import lang
|
from lyx2lyx_lang import lang
|
||||||
|
|
||||||
if inputencoding == "auto" or inputencoding == "default":
|
if inputencoding == "auto" or inputencoding == "default":
|
||||||
return lang[language][3]
|
return lang[language][3]
|
||||||
if inputencoding == "":
|
if inputencoding == "":
|
||||||
@ -209,17 +219,27 @@ def get_encoding(language, inputencoding, format, cjk_encoding):
|
|||||||
return "iso-8859-15"
|
return "iso-8859-15"
|
||||||
return inputencoding
|
return inputencoding
|
||||||
|
|
||||||
|
|
||||||
##
|
##
|
||||||
# Class
|
# Class
|
||||||
#
|
#
|
||||||
class LyX_base:
|
class LyX_base:
|
||||||
"""This class carries all the information of the LyX file."""
|
"""This class carries all the information of the LyX file."""
|
||||||
|
|
||||||
def __init__(self, end_format = 0, input = '', output = '', error = '',
|
def __init__(
|
||||||
debug = default_debug__, try_hard = 0, cjk_encoding = '',
|
self,
|
||||||
final_version = '', systemlyxdir = '', language = 'english',
|
end_format=0,
|
||||||
encoding = 'auto'):
|
input="",
|
||||||
|
output="",
|
||||||
|
error="",
|
||||||
|
debug=default_debug__,
|
||||||
|
try_hard=0,
|
||||||
|
cjk_encoding="",
|
||||||
|
final_version="",
|
||||||
|
systemlyxdir="",
|
||||||
|
language="english",
|
||||||
|
encoding="auto",
|
||||||
|
):
|
||||||
"""Arguments:
|
"""Arguments:
|
||||||
end_format: final format that the file should be converted. (integer)
|
end_format: final format that the file should be converted. (integer)
|
||||||
input: the name of the input source, if empty resort to standard input.
|
input: the name of the input source, if empty resort to standard input.
|
||||||
@ -247,7 +267,9 @@ class LyX_base:
|
|||||||
# and ignore the version.
|
# and ignore the version.
|
||||||
if final_version:
|
if final_version:
|
||||||
message = "Incompatible version %s for specified format %d" % (
|
message = "Incompatible version %s for specified format %d" % (
|
||||||
final_version, self.end_format)
|
final_version,
|
||||||
|
self.end_format,
|
||||||
|
)
|
||||||
for version in format_relation:
|
for version in format_relation:
|
||||||
if self.end_format in version[1]:
|
if self.end_format in version[1]:
|
||||||
if final_version not in version[2]:
|
if final_version not in version[2]:
|
||||||
@ -277,7 +299,7 @@ class LyX_base:
|
|||||||
# This is a hack: We use '' since we don't know the default
|
# This is a hack: We use '' since we don't know the default
|
||||||
# layout of the text class. LyX will parse it as default layout.
|
# layout of the text class. LyX will parse it as default layout.
|
||||||
# FIXME: Read the layout file and use the real default layout
|
# FIXME: Read the layout file and use the real default layout
|
||||||
self.default_layout = ''
|
self.default_layout = ""
|
||||||
self.header = []
|
self.header = []
|
||||||
self.preamble = []
|
self.preamble = []
|
||||||
self.body = []
|
self.body = []
|
||||||
@ -286,16 +308,14 @@ class LyX_base:
|
|||||||
self.language = language
|
self.language = language
|
||||||
self.systemlyxdir = systemlyxdir
|
self.systemlyxdir = systemlyxdir
|
||||||
|
|
||||||
|
def warning(self, message, debug_level=default_debug__):
|
||||||
def warning(self, message, debug_level= default_debug__):
|
"""Emits warning to self.error, if the debug_level is less
|
||||||
""" Emits warning to self.error, if the debug_level is less
|
|
||||||
than the self.debug."""
|
than the self.debug."""
|
||||||
if debug_level <= self.debug:
|
if debug_level <= self.debug:
|
||||||
self.err.write("lyx2lyx warning: " + message + "\n")
|
self.err.write("lyx2lyx warning: " + message + "\n")
|
||||||
|
|
||||||
|
|
||||||
def error(self, message):
|
def error(self, message):
|
||||||
" Emits a warning and exits if not in try_hard mode."
|
"Emits a warning and exits if not in try_hard mode."
|
||||||
self.warning(message)
|
self.warning(message)
|
||||||
if not self.try_hard:
|
if not self.try_hard:
|
||||||
self.warning("Quitting.")
|
self.warning("Quitting.")
|
||||||
@ -303,7 +323,6 @@ class LyX_base:
|
|||||||
|
|
||||||
self.status = 2
|
self.status = 2
|
||||||
|
|
||||||
|
|
||||||
def read(self):
|
def read(self):
|
||||||
"""Reads a file into the self.header and
|
"""Reads a file into the self.header and
|
||||||
self.body parts, from self.input."""
|
self.body parts, from self.input."""
|
||||||
@ -325,13 +344,13 @@ class LyX_base:
|
|||||||
if first_line:
|
if first_line:
|
||||||
# Remove UTF8 BOM marker if present
|
# Remove UTF8 BOM marker if present
|
||||||
if line.startswith(codecs.BOM_UTF8):
|
if line.startswith(codecs.BOM_UTF8):
|
||||||
line = line[len(codecs.BOM_UTF8):]
|
line = line[len(codecs.BOM_UTF8) :]
|
||||||
|
|
||||||
first_line = False
|
first_line = False
|
||||||
|
|
||||||
line = trim_eol_binary(line)
|
line = trim_eol_binary(line)
|
||||||
decoded = line.decode('latin1')
|
decoded = line.decode("latin1")
|
||||||
if check_token(decoded, '\\begin_preamble'):
|
if check_token(decoded, "\\begin_preamble"):
|
||||||
while True:
|
while True:
|
||||||
line = self.input.readline()
|
line = self.input.readline()
|
||||||
if not line:
|
if not line:
|
||||||
@ -339,51 +358,60 @@ class LyX_base:
|
|||||||
self.error("Invalid LyX file: Missing body.")
|
self.error("Invalid LyX file: Missing body.")
|
||||||
|
|
||||||
line = trim_eol_binary(line)
|
line = trim_eol_binary(line)
|
||||||
decoded = line.decode('latin1')
|
decoded = line.decode("latin1")
|
||||||
if check_token(decoded, '\\end_preamble'):
|
if check_token(decoded, "\\end_preamble"):
|
||||||
break
|
break
|
||||||
|
|
||||||
if decoded.split()[:0] in ("\\layout",
|
if decoded.split()[:0] in (
|
||||||
"\\begin_layout", "\\begin_body"):
|
"\\layout",
|
||||||
|
"\\begin_layout",
|
||||||
self.warning("Malformed LyX file:"
|
"\\begin_body",
|
||||||
"Missing '\\end_preamble'."
|
):
|
||||||
"\nAdding it now and hoping"
|
self.warning(
|
||||||
"for the best.")
|
"Malformed LyX file:"
|
||||||
|
"Missing '\\end_preamble'."
|
||||||
|
"\nAdding it now and hoping"
|
||||||
|
"for the best."
|
||||||
|
)
|
||||||
|
|
||||||
self.preamble.append(line)
|
self.preamble.append(line)
|
||||||
|
|
||||||
if check_token(decoded, '\\end_preamble'):
|
if check_token(decoded, "\\end_preamble"):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
line = line.rstrip()
|
line = line.rstrip()
|
||||||
if not line:
|
if not line:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if decoded.split()[0] in ("\\layout", "\\begin_layout",
|
if decoded.split()[0] in (
|
||||||
"\\begin_body", "\\begin_deeper"):
|
"\\layout",
|
||||||
|
"\\begin_layout",
|
||||||
|
"\\begin_body",
|
||||||
|
"\\begin_deeper",
|
||||||
|
):
|
||||||
self.body.append(line)
|
self.body.append(line)
|
||||||
break
|
break
|
||||||
|
|
||||||
self.header.append(line)
|
self.header.append(line)
|
||||||
|
|
||||||
i = find_token(self.header, b'\\textclass', 0)
|
i = find_token(self.header, b"\\textclass", 0)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
self.warning("Malformed LyX file: Missing '\\textclass'.")
|
self.warning("Malformed LyX file: Missing '\\textclass'.")
|
||||||
i = find_token(self.header, b'\\lyxformat', 0) + 1
|
i = find_token(self.header, b"\\lyxformat", 0) + 1
|
||||||
self.header[i:i] = [b'\\textclass article']
|
self.header[i:i] = [b"\\textclass article"]
|
||||||
|
|
||||||
self.textclass = get_value(self.header, b"\\textclass", 0,
|
self.textclass = get_value(self.header, b"\\textclass", 0, default=b"")
|
||||||
default = b"")
|
self.language = get_value(self.header, b"\\language", 0, default=b"english").decode(
|
||||||
self.language = get_value(self.header, b"\\language", 0,
|
"ascii"
|
||||||
default = b"english").decode('ascii')
|
)
|
||||||
self.inputencoding = get_value(self.header, b"\\inputencoding", 0,
|
self.inputencoding = get_value(
|
||||||
default = b"auto").decode('ascii')
|
self.header, b"\\inputencoding", 0, default=b"auto"
|
||||||
|
).decode("ascii")
|
||||||
self.format = self.read_format()
|
self.format = self.read_format()
|
||||||
self.initial_format = self.format
|
self.initial_format = self.format
|
||||||
self.encoding = get_encoding(self.language,
|
self.encoding = get_encoding(
|
||||||
self.inputencoding, self.format,
|
self.language, self.inputencoding, self.format, self.cjk_encoding
|
||||||
self.cjk_encoding)
|
)
|
||||||
self.initial_version = self.read_version()
|
self.initial_version = self.read_version()
|
||||||
|
|
||||||
# Second pass over header and preamble, now we know the file encoding
|
# Second pass over header and preamble, now we know the file encoding
|
||||||
@ -404,26 +432,25 @@ class LyX_base:
|
|||||||
break
|
break
|
||||||
self.body.append(trim_eol(line))
|
self.body.append(trim_eol(line))
|
||||||
|
|
||||||
|
|
||||||
def write(self):
|
def write(self):
|
||||||
" Writes the LyX file to self.output."
|
"Writes the LyX file to self.output."
|
||||||
self.choose_output(self.output)
|
self.choose_output(self.output)
|
||||||
self.set_version()
|
self.set_version()
|
||||||
self.set_format()
|
self.set_format()
|
||||||
self.set_textclass()
|
self.set_textclass()
|
||||||
if self.encoding == "auto":
|
if self.encoding == "auto":
|
||||||
self.encoding = get_encoding(self.language, self.encoding,
|
self.encoding = get_encoding(
|
||||||
self.format, self.cjk_encoding)
|
self.language, self.encoding, self.format, self.cjk_encoding
|
||||||
|
)
|
||||||
if self.preamble:
|
if self.preamble:
|
||||||
i = find_token(self.header, '\\textclass', 0) + 1
|
i = find_token(self.header, "\\textclass", 0) + 1
|
||||||
preamble = ['\\begin_preamble'] + self.preamble + ['\\end_preamble']
|
preamble = ["\\begin_preamble"] + self.preamble + ["\\end_preamble"]
|
||||||
header = self.header[:i] + preamble + self.header[i:]
|
header = self.header[:i] + preamble + self.header[i:]
|
||||||
else:
|
else:
|
||||||
header = self.header
|
header = self.header
|
||||||
|
|
||||||
for line in header + [''] + self.body:
|
for line in header + [""] + self.body:
|
||||||
self.output.write(line+'\n')
|
self.output.write(line + "\n")
|
||||||
|
|
||||||
|
|
||||||
def choose_output(self, output):
|
def choose_output(self, output):
|
||||||
"""Choose output streams dealing transparently with
|
"""Choose output streams dealing transparently with
|
||||||
@ -435,20 +462,19 @@ class LyX_base:
|
|||||||
# interfaces.
|
# interfaces.
|
||||||
if self.compressed:
|
if self.compressed:
|
||||||
if output:
|
if output:
|
||||||
outputfileobj = open(output, 'wb')
|
outputfileobj = open(output, "wb")
|
||||||
else:
|
else:
|
||||||
# We cannot not use stdout directly since it needs text, not bytes in python 3
|
# We cannot not use stdout directly since it needs text, not bytes in python 3
|
||||||
outputfileobj = os.fdopen(sys.stdout.fileno(), 'wb')
|
outputfileobj = os.fdopen(sys.stdout.fileno(), "wb")
|
||||||
# We cannot not use gzip.open() since it is not supported by python 2
|
# We cannot not use gzip.open() since it is not supported by python 2
|
||||||
zipbuffer = gzip.GzipFile(mode='wb', fileobj=outputfileobj)
|
zipbuffer = gzip.GzipFile(mode="wb", fileobj=outputfileobj)
|
||||||
# We do not want to use different newlines on different OSes inside zipped files
|
# We do not want to use different newlines on different OSes inside zipped files
|
||||||
self.output = io.TextIOWrapper(zipbuffer, encoding=self.encoding, newline='\n')
|
self.output = io.TextIOWrapper(zipbuffer, encoding=self.encoding, newline="\n")
|
||||||
else:
|
else:
|
||||||
if output:
|
if output:
|
||||||
self.output = open(output, 'w', encoding=self.encoding)
|
self.output = open(output, "w", encoding=self.encoding)
|
||||||
else:
|
else:
|
||||||
self.output = open(sys.stdout.fileno(), 'w', encoding=self.encoding)
|
self.output = open(sys.stdout.fileno(), "w", encoding=self.encoding)
|
||||||
|
|
||||||
|
|
||||||
def choose_input(self, input):
|
def choose_input(self, input):
|
||||||
"""Choose input stream, dealing transparently with
|
"""Choose input stream, dealing transparently with
|
||||||
@ -456,27 +482,26 @@ class LyX_base:
|
|||||||
|
|
||||||
# Since we do not know the encoding yet we need to read the input as
|
# Since we do not know the encoding yet we need to read the input as
|
||||||
# bytes in binary mode, and convert later to unicode.
|
# bytes in binary mode, and convert later to unicode.
|
||||||
if input and input != '-':
|
if input and input != "-":
|
||||||
self.dir = os.path.dirname(os.path.abspath(input))
|
self.dir = os.path.dirname(os.path.abspath(input))
|
||||||
try:
|
try:
|
||||||
gzip.open(input).readline()
|
gzip.open(input).readline()
|
||||||
self.input = gzip.open(input)
|
self.input = gzip.open(input)
|
||||||
self.compressed = True
|
self.compressed = True
|
||||||
except:
|
except:
|
||||||
self.input = open(input, 'rb')
|
self.input = open(input, "rb")
|
||||||
self.compressed = False
|
self.compressed = False
|
||||||
else:
|
else:
|
||||||
self.dir = ''
|
self.dir = ""
|
||||||
self.input = os.fdopen(sys.stdin.fileno(), 'rb')
|
self.input = os.fdopen(sys.stdin.fileno(), "rb")
|
||||||
self.compressed = False
|
self.compressed = False
|
||||||
|
|
||||||
|
|
||||||
def lyxformat(self, format):
|
def lyxformat(self, format):
|
||||||
" Returns the file format representation, an integer."
|
"Returns the file format representation, an integer."
|
||||||
result = format_re.match(format)
|
result = format_re.match(format)
|
||||||
if result:
|
if result:
|
||||||
format = int(result.group(1) + result.group(2))
|
format = int(result.group(1) + result.group(2))
|
||||||
elif format == '2':
|
elif format == "2":
|
||||||
format = 200
|
format = 200
|
||||||
else:
|
else:
|
||||||
self.error(str(format) + ": " + "Invalid LyX file.")
|
self.error(str(format) + ": " + "Invalid LyX file.")
|
||||||
@ -487,16 +512,15 @@ class LyX_base:
|
|||||||
self.error(str(format) + ": " + "Format not supported.")
|
self.error(str(format) + ": " + "Format not supported.")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def read_version(self):
|
def read_version(self):
|
||||||
""" Searchs for clues of the LyX version used to write the
|
"""Searchs for clues of the LyX version used to write the
|
||||||
file, returns the most likely value, or None otherwise."""
|
file, returns the most likely value, or None otherwise."""
|
||||||
|
|
||||||
for line in self.header:
|
for line in self.header:
|
||||||
if line[0:1] != b"#":
|
if line[0:1] != b"#":
|
||||||
return None
|
return None
|
||||||
|
|
||||||
line = line.replace(b"fix",b".")
|
line = line.replace(b"fix", b".")
|
||||||
# need to test original_tex2lyx_version first because tex2lyx
|
# need to test original_tex2lyx_version first because tex2lyx
|
||||||
# writes "#LyX file created by tex2lyx 2.2"
|
# writes "#LyX file created by tex2lyx 2.2"
|
||||||
result = original_tex2lyx_version.match(line)
|
result = original_tex2lyx_version.match(line)
|
||||||
@ -510,17 +534,20 @@ class LyX_base:
|
|||||||
res = result.group(1)
|
res = result.group(1)
|
||||||
if not res:
|
if not res:
|
||||||
self.warning(line)
|
self.warning(line)
|
||||||
#self.warning("Version %s" % result.group(1))
|
# self.warning("Version %s" % result.group(1))
|
||||||
return res.decode('ascii')
|
return res.decode("ascii")
|
||||||
self.warning(str(self.header[:2]))
|
self.warning(str(self.header[:2]))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def set_version(self):
|
def set_version(self):
|
||||||
" Set the header with the version used."
|
"Set the header with the version used."
|
||||||
|
|
||||||
initial_comment = " ".join(["#LyX %s created this file." % version__,
|
initial_comment = " ".join(
|
||||||
"For more info see https://www.lyx.org/"])
|
[
|
||||||
|
"#LyX %s created this file." % version__,
|
||||||
|
"For more info see https://www.lyx.org/",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Simple heuristic to determine the comment that always starts
|
# Simple heuristic to determine the comment that always starts
|
||||||
# a lyx file
|
# a lyx file
|
||||||
@ -534,61 +561,56 @@ class LyX_base:
|
|||||||
# 2) the second line had the lyx version used
|
# 2) the second line had the lyx version used
|
||||||
# later we decided that 1) was a privacy risk for no gain
|
# later we decided that 1) was a privacy risk for no gain
|
||||||
# here we remove the second line effectively erasing 1)
|
# here we remove the second line effectively erasing 1)
|
||||||
if self.header[1][0] == '#':
|
if self.header[1][0] == "#":
|
||||||
del self.header[1]
|
del self.header[1]
|
||||||
|
|
||||||
|
|
||||||
def read_format(self):
|
def read_format(self):
|
||||||
" Read from the header the fileformat of the present LyX file."
|
"Read from the header the fileformat of the present LyX file."
|
||||||
for line in self.header:
|
for line in self.header:
|
||||||
result = fileformat.match(line.decode('ascii'))
|
result = fileformat.match(line.decode("ascii"))
|
||||||
if result:
|
if result:
|
||||||
return self.lyxformat(result.group(1))
|
return self.lyxformat(result.group(1))
|
||||||
else:
|
else:
|
||||||
self.error("Invalid LyX File: Missing format.")
|
self.error("Invalid LyX File: Missing format.")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def set_format(self):
|
def set_format(self):
|
||||||
" Set the file format of the file, in the header."
|
"Set the file format of the file, in the header."
|
||||||
if self.format <= 217:
|
if self.format <= 217:
|
||||||
format = str(float(self.format)/100)
|
format = str(float(self.format) / 100)
|
||||||
else:
|
else:
|
||||||
format = str(self.format)
|
format = str(self.format)
|
||||||
i = find_token(self.header, "\\lyxformat", 0)
|
i = find_token(self.header, "\\lyxformat", 0)
|
||||||
self.header[i] = "\\lyxformat %s" % format
|
self.header[i] = "\\lyxformat %s" % format
|
||||||
|
|
||||||
|
|
||||||
def set_textclass(self):
|
def set_textclass(self):
|
||||||
i = find_token(self.header, "\\textclass", 0)
|
i = find_token(self.header, "\\textclass", 0)
|
||||||
self.header[i] = "\\textclass %s" % self.textclass
|
self.header[i] = "\\textclass %s" % self.textclass
|
||||||
|
|
||||||
|
# Note that the module will be added at the END of the extant ones
|
||||||
#Note that the module will be added at the END of the extant ones
|
|
||||||
def add_module(self, module):
|
def add_module(self, module):
|
||||||
" Append module to the modules list."
|
"Append module to the modules list."
|
||||||
i = find_token(self.header, "\\begin_modules", 0)
|
i = find_token(self.header, "\\begin_modules", 0)
|
||||||
if i == -1:
|
|
||||||
#No modules yet included
|
|
||||||
i = find_token(self.header, "\\textclass", 0)
|
|
||||||
if i == -1:
|
if i == -1:
|
||||||
self.warning("Malformed LyX document: No \\textclass!!")
|
# No modules yet included
|
||||||
return
|
i = find_token(self.header, "\\textclass", 0)
|
||||||
modinfo = ["\\begin_modules", module, "\\end_modules"]
|
if i == -1:
|
||||||
self.header[i + 1: i + 1] = modinfo
|
self.warning("Malformed LyX document: No \\textclass!!")
|
||||||
return
|
return
|
||||||
j = find_token(self.header, "\\end_modules", i)
|
modinfo = ["\\begin_modules", module, "\\end_modules"]
|
||||||
if j == -1:
|
self.header[i + 1 : i + 1] = modinfo
|
||||||
self.warning("(add_module)Malformed LyX document: No \\end_modules.")
|
return
|
||||||
return
|
j = find_token(self.header, "\\end_modules", i)
|
||||||
k = find_token(self.header, module, i)
|
if j == -1:
|
||||||
if k != -1 and k < j:
|
self.warning("(add_module)Malformed LyX document: No \\end_modules.")
|
||||||
return
|
return
|
||||||
self.header.insert(j, module)
|
k = find_token(self.header, module, i)
|
||||||
|
if k != -1 and k < j:
|
||||||
|
return
|
||||||
|
self.header.insert(j, module)
|
||||||
|
|
||||||
def del_module(self, module):
|
def del_module(self, module):
|
||||||
" Delete `module` from module list, return success."
|
"Delete `module` from module list, return success."
|
||||||
modlist = self.get_module_list()
|
modlist = self.get_module_list()
|
||||||
if module not in modlist:
|
if module not in modlist:
|
||||||
return False
|
return False
|
||||||
@ -596,56 +618,55 @@ class LyX_base:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def get_module_list(self):
|
def get_module_list(self):
|
||||||
" Return list of modules."
|
"Return list of modules."
|
||||||
i = find_token(self.header, "\\begin_modules", 0)
|
i = find_token(self.header, "\\begin_modules", 0)
|
||||||
if (i == -1):
|
if i == -1:
|
||||||
return []
|
return []
|
||||||
j = find_token(self.header, "\\end_modules", i)
|
j = find_token(self.header, "\\end_modules", i)
|
||||||
return self.header[i + 1 : j]
|
return self.header[i + 1 : j]
|
||||||
|
|
||||||
|
|
||||||
def set_module_list(self, mlist):
|
def set_module_list(self, mlist):
|
||||||
i = find_token(self.header, "\\begin_modules", 0)
|
i = find_token(self.header, "\\begin_modules", 0)
|
||||||
if (i == -1):
|
if i == -1:
|
||||||
#No modules yet included
|
# No modules yet included
|
||||||
tclass = find_token(self.header, "\\textclass", 0)
|
tclass = find_token(self.header, "\\textclass", 0)
|
||||||
if tclass == -1:
|
if tclass == -1:
|
||||||
self.warning("Malformed LyX document: No \\textclass!!")
|
self.warning("Malformed LyX document: No \\textclass!!")
|
||||||
return
|
return
|
||||||
i = j = tclass + 1
|
i = j = tclass + 1
|
||||||
else:
|
else:
|
||||||
j = find_token(self.header, "\\end_modules", i)
|
j = find_token(self.header, "\\end_modules", i)
|
||||||
if j == -1:
|
if j == -1:
|
||||||
self.warning("(set_module_list) Malformed LyX document: No \\end_modules.")
|
self.warning("(set_module_list) Malformed LyX document: No \\end_modules.")
|
||||||
return
|
return
|
||||||
j += 1
|
j += 1
|
||||||
if mlist:
|
if mlist:
|
||||||
mlist = ['\\begin_modules'] + mlist + ['\\end_modules']
|
mlist = ["\\begin_modules"] + mlist + ["\\end_modules"]
|
||||||
self.header[i:j] = mlist
|
self.header[i:j] = mlist
|
||||||
|
|
||||||
|
|
||||||
def set_parameter(self, param, value):
|
def set_parameter(self, param, value):
|
||||||
" Set the value of the header parameter."
|
"Set the value of the header parameter."
|
||||||
i = find_token(self.header, '\\' + param, 0)
|
i = find_token(self.header, "\\" + param, 0)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
self.warning('Parameter not found in the header: %s' % param, 3)
|
self.warning("Parameter not found in the header: %s" % param, 3)
|
||||||
return
|
return
|
||||||
self.header[i] = f'\\{param} {str(value)}'
|
self.header[i] = f"\\{param} {str(value)}"
|
||||||
|
|
||||||
|
|
||||||
def is_default_layout(self, layout):
|
def is_default_layout(self, layout):
|
||||||
" Check whether a layout is the default layout of this class."
|
"Check whether a layout is the default layout of this class."
|
||||||
# FIXME: Check against the real text class default layout
|
# FIXME: Check against the real text class default layout
|
||||||
if layout == 'Standard' or layout == self.default_layout:
|
if layout == "Standard" or layout == self.default_layout:
|
||||||
return 1
|
return 1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def convert(self):
|
def convert(self):
|
||||||
"Convert from current (self.format) to self.end_format."
|
"Convert from current (self.format) to self.end_format."
|
||||||
if self.format == self.end_format:
|
if self.format == self.end_format:
|
||||||
self.warning("No conversion needed: Target format %s "
|
self.warning(
|
||||||
"same as current format!" % self.format, default_debug__)
|
"No conversion needed: Target format %s "
|
||||||
|
"same as current format!" % self.format,
|
||||||
|
default_debug__,
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
mode, conversion_chain = self.chain()
|
mode, conversion_chain = self.chain()
|
||||||
@ -654,17 +675,20 @@ class LyX_base:
|
|||||||
for step in conversion_chain:
|
for step in conversion_chain:
|
||||||
steps = getattr(__import__("lyx_" + step), mode)
|
steps = getattr(__import__("lyx_" + step), mode)
|
||||||
|
|
||||||
self.warning(f"Convertion step: {step} - {mode}",
|
self.warning(f"Convertion step: {step} - {mode}", default_debug__ + 1)
|
||||||
default_debug__ + 1)
|
|
||||||
if not steps:
|
if not steps:
|
||||||
self.error("The conversion to an older "
|
self.error(
|
||||||
"format (%s) is not implemented." % self.format)
|
"The conversion to an older "
|
||||||
|
"format (%s) is not implemented." % self.format
|
||||||
|
)
|
||||||
|
|
||||||
multi_conv = len(steps) != 1
|
multi_conv = len(steps) != 1
|
||||||
for version, table in steps:
|
for version, table in steps:
|
||||||
if multi_conv and \
|
if (
|
||||||
(self.format >= version and mode == "convert") or\
|
multi_conv
|
||||||
(self.format <= version and mode == "revert"):
|
and (self.format >= version and mode == "convert")
|
||||||
|
or (self.format <= version and mode == "revert")
|
||||||
|
):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for conv in table:
|
for conv in table:
|
||||||
@ -672,24 +696,24 @@ class LyX_base:
|
|||||||
try:
|
try:
|
||||||
conv(self)
|
conv(self)
|
||||||
except:
|
except:
|
||||||
self.warning("An error occurred in %s, %s" %
|
self.warning(
|
||||||
(version, str(conv)),
|
"An error occurred in %s, %s" % (version, str(conv)),
|
||||||
default_debug__)
|
default_debug__,
|
||||||
|
)
|
||||||
if not self.try_hard:
|
if not self.try_hard:
|
||||||
raise
|
raise
|
||||||
self.status = 2
|
self.status = 2
|
||||||
else:
|
else:
|
||||||
self.warning("%lf: Elapsed time on %s" %
|
self.warning(
|
||||||
(time.time() - init_t,
|
"%lf: Elapsed time on %s" % (time.time() - init_t, str(conv)),
|
||||||
str(conv)), default_debug__ +
|
default_debug__ + 1,
|
||||||
1)
|
)
|
||||||
self.format = version
|
self.format = version
|
||||||
if self.end_format == self.format:
|
if self.end_format == self.format:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def chain(self):
|
def chain(self):
|
||||||
""" This is where all the decisions related with the
|
"""This is where all the decisions related with the
|
||||||
conversion are taken. It returns a list of modules needed to
|
conversion are taken. It returns a list of modules needed to
|
||||||
convert the LyX file from self.format to self.end_format"""
|
convert the LyX file from self.format to self.end_format"""
|
||||||
|
|
||||||
@ -705,9 +729,11 @@ class LyX_base:
|
|||||||
|
|
||||||
if not correct_version:
|
if not correct_version:
|
||||||
if format <= 215:
|
if format <= 215:
|
||||||
self.warning("Version does not match file format, "
|
self.warning(
|
||||||
"discarding it. (Version %s, format %d)" %
|
"Version does not match file format, "
|
||||||
(self.initial_version, self.format))
|
"discarding it. (Version %s, format %d)"
|
||||||
|
% (self.initial_version, self.format)
|
||||||
|
)
|
||||||
for rel in format_relation:
|
for rel in format_relation:
|
||||||
if format in rel[1]:
|
if format in rel[1]:
|
||||||
initial_step = rel[0]
|
initial_step = rel[0]
|
||||||
@ -730,7 +756,7 @@ class LyX_base:
|
|||||||
mode = "convert"
|
mode = "convert"
|
||||||
full_steps = []
|
full_steps = []
|
||||||
for step in format_relation:
|
for step in format_relation:
|
||||||
if initial_step <= step[0] <= final_step and step[2][0] <= self.final_version:
|
if initial_step <= step[0] <= final_step and step[2][0] <= self.final_version:
|
||||||
full_steps.append(step)
|
full_steps.append(step)
|
||||||
if full_steps[0][1][-1] == self.format:
|
if full_steps[0][1][-1] == self.format:
|
||||||
full_steps = full_steps[1:]
|
full_steps = full_steps[1:]
|
||||||
@ -743,19 +769,18 @@ class LyX_base:
|
|||||||
last_step = None
|
last_step = None
|
||||||
|
|
||||||
for step in relation_format:
|
for step in relation_format:
|
||||||
if final_step <= step[0] <= initial_step:
|
if final_step <= step[0] <= initial_step:
|
||||||
steps.append(step[0])
|
steps.append(step[0])
|
||||||
last_step = step
|
last_step = step
|
||||||
|
|
||||||
if last_step[1][-1] == self.end_format:
|
if last_step[1][-1] == self.end_format:
|
||||||
steps.pop()
|
steps.pop()
|
||||||
|
|
||||||
self.warning("Convertion mode: %s\tsteps%s" %(mode, steps), 10)
|
self.warning("Convertion mode: %s\tsteps%s" % (mode, steps), 10)
|
||||||
return mode, steps
|
return mode, steps
|
||||||
|
|
||||||
|
|
||||||
def append_local_layout(self, new_layout):
|
def append_local_layout(self, new_layout):
|
||||||
" Append `new_layout` to the local layouts."
|
"Append `new_layout` to the local layouts."
|
||||||
# new_layout may be a string or a list of strings (lines)
|
# new_layout may be a string or a list of strings (lines)
|
||||||
try:
|
try:
|
||||||
new_layout = new_layout.splitlines()
|
new_layout = new_layout.splitlines()
|
||||||
@ -768,7 +793,7 @@ class LyX_base:
|
|||||||
# this should not happen
|
# this should not happen
|
||||||
self.warning("Malformed LyX document! No \\language header found!")
|
self.warning("Malformed LyX document! No \\language header found!")
|
||||||
return
|
return
|
||||||
self.header[k : k] = ["\\begin_local_layout", "\\end_local_layout"]
|
self.header[k:k] = ["\\begin_local_layout", "\\end_local_layout"]
|
||||||
i = k
|
i = k
|
||||||
|
|
||||||
j = find_end_of(self.header, i, "\\begin_local_layout", "\\end_local_layout")
|
j = find_end_of(self.header, i, "\\begin_local_layout", "\\end_local_layout")
|
||||||
@ -777,23 +802,25 @@ class LyX_base:
|
|||||||
self.warning("Malformed LyX document: Can't find end of local layout!")
|
self.warning("Malformed LyX document: Can't find end of local layout!")
|
||||||
return
|
return
|
||||||
|
|
||||||
self.header[i+1 : i+1] = new_layout
|
self.header[i + 1 : i + 1] = new_layout
|
||||||
|
|
||||||
def del_local_layout(self, layout_def):
|
def del_local_layout(self, layout_def):
|
||||||
" Delete `layout_def` from local layouts, return success."
|
"Delete `layout_def` from local layouts, return success."
|
||||||
i = find_complete_lines(self.header, layout_def)
|
i = find_complete_lines(self.header, layout_def)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return False
|
return False
|
||||||
j = i+len(layout_def)
|
j = i + len(layout_def)
|
||||||
if (self.header[i-1] == "\\begin_local_layout" and
|
if (
|
||||||
self.header[j] == "\\end_local_layout"):
|
self.header[i - 1] == "\\begin_local_layout"
|
||||||
i -=1
|
and self.header[j] == "\\end_local_layout"
|
||||||
j +=1
|
):
|
||||||
|
i -= 1
|
||||||
|
j += 1
|
||||||
self.header[i:j] = []
|
self.header[i:j] = []
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def del_from_header(self, lines):
|
def del_from_header(self, lines):
|
||||||
" Delete `lines` from the document header, return success."
|
"Delete `lines` from the document header, return success."
|
||||||
i = find_complete_lines(self.header, lines)
|
i = find_complete_lines(self.header, lines)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return False
|
return False
|
||||||
@ -801,6 +828,7 @@ class LyX_base:
|
|||||||
self.header[i:j] = []
|
self.header[i:j] = []
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
# Part of an unfinished attempt to make lyx2lyx gave a more
|
# Part of an unfinished attempt to make lyx2lyx gave a more
|
||||||
# structured view of the document.
|
# structured view of the document.
|
||||||
# def get_toc(self, depth = 4):
|
# def get_toc(self, depth = 4):
|
||||||
@ -865,19 +893,37 @@ class LyX_base:
|
|||||||
|
|
||||||
|
|
||||||
class File(LyX_base):
|
class File(LyX_base):
|
||||||
" This class reads existing LyX files."
|
"This class reads existing LyX files."
|
||||||
|
|
||||||
def __init__(self, end_format = 0, input = '', output = '', error = '',
|
def __init__(
|
||||||
debug = default_debug__, try_hard = 0, cjk_encoding = '',
|
self,
|
||||||
final_version = '', systemlyxdir = ''):
|
end_format=0,
|
||||||
LyX_base.__init__(self, end_format, input, output, error,
|
input="",
|
||||||
debug, try_hard, cjk_encoding, final_version,
|
output="",
|
||||||
systemlyxdir)
|
error="",
|
||||||
|
debug=default_debug__,
|
||||||
|
try_hard=0,
|
||||||
|
cjk_encoding="",
|
||||||
|
final_version="",
|
||||||
|
systemlyxdir="",
|
||||||
|
):
|
||||||
|
LyX_base.__init__(
|
||||||
|
self,
|
||||||
|
end_format,
|
||||||
|
input,
|
||||||
|
output,
|
||||||
|
error,
|
||||||
|
debug,
|
||||||
|
try_hard,
|
||||||
|
cjk_encoding,
|
||||||
|
final_version,
|
||||||
|
systemlyxdir,
|
||||||
|
)
|
||||||
self.read()
|
self.read()
|
||||||
|
|
||||||
|
|
||||||
# FIXME: header settings are completely outdated, don't use like this
|
# FIXME: header settings are completely outdated, don't use like this
|
||||||
#class NewFile(LyX_base):
|
# class NewFile(LyX_base):
|
||||||
# " This class is to create new LyX files."
|
# " This class is to create new LyX files."
|
||||||
# def set_header(self, **params):
|
# def set_header(self, **params):
|
||||||
# # set default values
|
# # set default values
|
||||||
@ -934,7 +980,7 @@ class File(LyX_base):
|
|||||||
|
|
||||||
# Part of an unfinished attempt to make lyx2lyx gave a more
|
# Part of an unfinished attempt to make lyx2lyx gave a more
|
||||||
# structured view of the document.
|
# structured view of the document.
|
||||||
#class Paragraph:
|
# class Paragraph:
|
||||||
# # unfinished implementation, it is missing the Text and Insets
|
# # unfinished implementation, it is missing the Text and Insets
|
||||||
# # representation.
|
# # representation.
|
||||||
# " This class represents the LyX paragraphs."
|
# " This class represents the LyX paragraphs."
|
||||||
|
@ -15,28 +15,29 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" This module parses lib/languages and prints it as a python
|
"""This module parses lib/languages and prints it as a python
|
||||||
dictionary, ready to use by other python modules"""
|
dictionary, ready to use by other python modules"""
|
||||||
|
|
||||||
import pprint
|
import pprint
|
||||||
|
|
||||||
|
|
||||||
def parse_line(line):
|
def parse_line(line):
|
||||||
" Parse line from languages and return it as a list. "
|
"Parse line from languages and return it as a list."
|
||||||
j = 0
|
j = 0
|
||||||
tmp = []
|
tmp = []
|
||||||
while j< len(line):
|
while j < len(line):
|
||||||
token = line[j:].split()[0]
|
token = line[j:].split()[0]
|
||||||
if not token:
|
if not token:
|
||||||
break
|
break
|
||||||
if token[0] != '"':
|
if token[0] != '"':
|
||||||
tmp.append(token)
|
tmp.append(token)
|
||||||
j += len(token) + 1
|
j += len(token) + 1
|
||||||
elif line[j+1:].find('"') != -1:
|
elif line[j + 1 :].find('"') != -1:
|
||||||
k = line.find('"', j + 1)
|
k = line.find('"', j + 1)
|
||||||
tmp.append(line[j+1:k])
|
tmp.append(line[j + 1 : k])
|
||||||
j = k + 1
|
j = k + 1
|
||||||
else:
|
else:
|
||||||
tmp.append(line[j+1:])
|
tmp.append(line[j + 1 :])
|
||||||
break
|
break
|
||||||
|
|
||||||
while j < len(line) and line[j].isspace():
|
while j < len(line) and line[j].isspace():
|
||||||
@ -45,17 +46,16 @@ def parse_line(line):
|
|||||||
return tmp
|
return tmp
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
lines = open("../languages", "rb")
|
lines = open("../languages", "rb")
|
||||||
lang = {}
|
lang = {}
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if line[:1] != '#':
|
if line[:1] != "#":
|
||||||
tmp = parse_line(line[:-1])
|
tmp = parse_line(line[:-1])
|
||||||
lang[tmp[0]] = tmp[1:]
|
lang[tmp[0]] = tmp[1:]
|
||||||
|
|
||||||
|
print("# This file is generated by generate_incoding_info.py from lib/languages file.")
|
||||||
print ("# This file is generated by generate_incoding_info.py from lib/languages file.")
|
print("# Do not change this file directly.")
|
||||||
print ("# Do not change this file directly.")
|
print()
|
||||||
print ()
|
print("lang = ", end=" ")
|
||||||
print ("lang = ", end = " ")
|
|
||||||
pprint.pprint(lang)
|
pprint.pprint(lang)
|
||||||
|
@ -1,103 +1,88 @@
|
|||||||
# This file is generated by generate_incoding_info.py from lib/languages file.
|
# This file is generated by generate_incoding_info.py from lib/languages file.
|
||||||
# Do not change this file directly.
|
# Do not change this file directly.
|
||||||
|
|
||||||
lang = {'afrikaans': ['afrikaans', 'Afrikaans', 'false', 'iso8859-1', 'af_ZA', ''],
|
lang = {
|
||||||
'american': ['american', 'American', 'false', 'iso8859-1', 'en_US', ''],
|
"afrikaans": ["afrikaans", "Afrikaans", "false", "iso8859-1", "af_ZA", ""],
|
||||||
'arabic': ['arabic', 'Arabic', 'true', 'iso8859-6', 'ar_SA', ''],
|
"american": ["american", "American", "false", "iso8859-1", "en_US", ""],
|
||||||
'austrian': ['austrian', 'Austrian', 'false', 'iso8859-1', 'de_AT', ''],
|
"arabic": ["arabic", "Arabic", "true", "iso8859-6", "ar_SA", ""],
|
||||||
'bahasa': ['bahasa', 'Bahasa', 'false', 'iso8859-1', 'in_ID', ''],
|
"austrian": ["austrian", "Austrian", "false", "iso8859-1", "de_AT", ""],
|
||||||
'basque': ['basque', 'Basque', 'false', 'iso8859-1', 'eu_ES', ''],
|
"bahasa": ["bahasa", "Bahasa", "false", "iso8859-1", "in_ID", ""],
|
||||||
'belarusian': ['belarusian', 'Belarusian', 'false', 'cp1251', 'be_BY', ''],
|
"basque": ["basque", "Basque", "false", "iso8859-1", "eu_ES", ""],
|
||||||
'brazil': ['brazil',
|
"belarusian": ["belarusian", "Belarusian", "false", "cp1251", "be_BY", ""],
|
||||||
'Portuguese (Brazil)',
|
"brazil": ["brazil", "Portuguese (Brazil)", "false", "iso8859-1", "pt_BR", ""],
|
||||||
'false',
|
"breton": ["breton", "Breton", "false", "iso8859-1", "br_FR", ""],
|
||||||
'iso8859-1',
|
"british": ["british", "British", "false", "iso8859-1", "en_GB", ""],
|
||||||
'pt_BR',
|
"bulgarian": ["bulgarian", "Bulgarian", "false", "cp1251", "bg_BG", ""],
|
||||||
''],
|
"canadian": ["canadian", "Canadian", "false", "iso8859-1", "en_CA", ""],
|
||||||
'breton': ['breton', 'Breton', 'false', 'iso8859-1', 'br_FR', ''],
|
"canadien": ["canadien", "French Canadian", "false", "iso8859-1", "fr_CA", ""],
|
||||||
'british': ['british', 'British', 'false', 'iso8859-1', 'en_GB', ''],
|
"catalan": ["catalan", "Catalan", "false", "iso8859-1", "ca_ES", ""],
|
||||||
'bulgarian': ['bulgarian', 'Bulgarian', 'false', 'cp1251', 'bg_BG', ''],
|
"croatian": ["croatian", "Croatian", "false", "iso8859-2", "hr_HR", ""],
|
||||||
'canadian': ['canadian', 'Canadian', 'false', 'iso8859-1', 'en_CA', ''],
|
"czech": ["czech", "Czech", "false", "iso8859-2", "cs_CZ", ""],
|
||||||
'canadien': ['canadien',
|
"danish": ["danish", "Danish", "false", "iso8859-1", "da_DK", ""],
|
||||||
'French Canadian',
|
"default": ["default", "default", "false", "iso8859-1", "C", ""],
|
||||||
'false',
|
"dutch": ["dutch", "Dutch", "false", "iso8859-1", "nl_NL", ""],
|
||||||
'iso8859-1',
|
"english": ["english", "English", "false", "iso8859-1", "en_US", ""],
|
||||||
'fr_CA',
|
"esperanto": ["esperanto", "Esperanto", "false", "iso8859-3", "eo", ""],
|
||||||
''],
|
"estonian": ["estonian", "Estonian", "false", "iso8859-1", "et_EE", ""],
|
||||||
'catalan': ['catalan', 'Catalan', 'false', 'iso8859-1', 'ca_ES', ''],
|
"finnish": ["finnish", "Finnish", "false", "iso8859-1", "fi_FI", ""],
|
||||||
'croatian': ['croatian', 'Croatian', 'false', 'iso8859-2', 'hr_HR', ''],
|
"french": [
|
||||||
'czech': ['czech', 'Czech', 'false', 'iso8859-2', 'cs_CZ', ''],
|
"french",
|
||||||
'danish': ['danish', 'Danish', 'false', 'iso8859-1', 'da_DK', ''],
|
"French",
|
||||||
'default': ['default', 'default', 'false', 'iso8859-1', 'C', ''],
|
"false",
|
||||||
'dutch': ['dutch', 'Dutch', 'false', 'iso8859-1', 'nl_NL', ''],
|
"iso8859-1",
|
||||||
'english': ['english', 'English', 'false', 'iso8859-1', 'en_US', ''],
|
"fr_FR",
|
||||||
'esperanto': ['esperanto', 'Esperanto', 'false', 'iso8859-3', 'eo', ''],
|
"\\addto\\extrasfrench{\\providecommand{\\og}{\\leavevmode\\flqq~}\\providecommand{\\fg}{\\ifdim\\lastskip>\\z@\\unskip\\fi~\\frqq}}",
|
||||||
'estonian': ['estonian', 'Estonian', 'false', 'iso8859-1', 'et_EE', ''],
|
],
|
||||||
'finnish': ['finnish', 'Finnish', 'false', 'iso8859-1', 'fi_FI', ''],
|
"frenchb": [
|
||||||
'french': ['french',
|
"french",
|
||||||
'French',
|
"French",
|
||||||
'false',
|
"false",
|
||||||
'iso8859-1',
|
"iso8859-1",
|
||||||
'fr_FR',
|
"fr_FR",
|
||||||
'\\addto\\extrasfrench{\\providecommand{\\og}{\\leavevmode\\flqq~}\\providecommand{\\fg}{\\ifdim\\lastskip>\\z@\\unskip\\fi~\\frqq}}'],
|
"",
|
||||||
'frenchb': ['french', 'French', 'false', 'iso8859-1', 'fr_FR', ''], # for compatibility reasons
|
], # for compatibility reasons
|
||||||
'galician': ['galician', 'Galician', 'false', 'iso8859-1', 'gl_ES', ''],
|
"galician": ["galician", "Galician", "false", "iso8859-1", "gl_ES", ""],
|
||||||
'german': ['german', 'German', 'false', 'iso8859-1', 'de_DE', ''],
|
"german": ["german", "German", "false", "iso8859-1", "de_DE", ""],
|
||||||
'greek': ['greek', 'Greek', 'false', 'iso8859-7', 'el_GR', ''],
|
"greek": ["greek", "Greek", "false", "iso8859-7", "el_GR", ""],
|
||||||
'hebrew': ['hebrew', 'Hebrew', 'true', 'cp1255', 'he_IL', ''],
|
"hebrew": ["hebrew", "Hebrew", "true", "cp1255", "he_IL", ""],
|
||||||
'icelandic': ['icelandic', 'Icelandic', 'false', 'iso8859-1', 'is_IS', ''],
|
"icelandic": ["icelandic", "Icelandic", "false", "iso8859-1", "is_IS", ""],
|
||||||
'irish': ['irish', 'Irish', 'false', 'iso8859-1', 'ga_IE', ''],
|
"irish": ["irish", "Irish", "false", "iso8859-1", "ga_IE", ""],
|
||||||
'italian': ['italian', 'Italian', 'false', 'iso8859-1', 'it_IT', ''],
|
"italian": ["italian", "Italian", "false", "iso8859-1", "it_IT", ""],
|
||||||
'kazakh': ['kazakh', 'Kazakh', 'false', 'pt154', 'kk_KZ', ''],
|
"kazakh": ["kazakh", "Kazakh", "false", "pt154", "kk_KZ", ""],
|
||||||
'latvian': ['latvian', 'Latvian', 'false', 'iso8859-13', 'lv_LV', ''],
|
"latvian": ["latvian", "Latvian", "false", "iso8859-13", "lv_LV", ""],
|
||||||
'lithuanian': ['lithuanian',
|
"lithuanian": ["lithuanian", "Lithuanian", "false", "iso8859-13", "lt_LT", ""],
|
||||||
'Lithuanian',
|
"magyar": ["magyar", "Magyar", "false", "iso8859-2", "hu_HU", ""],
|
||||||
'false',
|
"naustrian": [
|
||||||
'iso8859-13',
|
"naustrian",
|
||||||
'lt_LT',
|
"Austrian (new spelling)",
|
||||||
''],
|
"false",
|
||||||
'magyar': ['magyar', 'Magyar', 'false', 'iso8859-2', 'hu_HU', ''],
|
"iso8859-1",
|
||||||
'naustrian': ['naustrian',
|
"de_AT",
|
||||||
'Austrian (new spelling)',
|
"",
|
||||||
'false',
|
],
|
||||||
'iso8859-1',
|
"ngerman": ["ngerman", "German (new spelling)", "false", "iso8859-1", "de_DE", ""],
|
||||||
'de_AT',
|
"norsk": ["norsk", "Norsk", "false", "iso8859-1", "no_NO", ""],
|
||||||
''],
|
"nynorsk": ["nynorsk", "Nynorsk", "false", "iso8859-1", "nn_NO", ""],
|
||||||
'ngerman': ['ngerman',
|
"polish": ["polish", "Polish", "false", "iso8859-2", "pl_PL", ""],
|
||||||
'German (new spelling)',
|
"portuges": ["portuges", "Portugese", "false", "iso8859-1", "pt_PT", ""],
|
||||||
'false',
|
"romanian": ["romanian", "Romanian", "false", "iso8859-2", "ro_RO", ""],
|
||||||
'iso8859-1',
|
"russian": ["russian", "Russian", "false", "koi8-r", "ru_RU", ""],
|
||||||
'de_DE',
|
"scottish": ["scottish", "Scottish", "false", "iso8859-1", "gd_GB", ""],
|
||||||
''],
|
"serbian": ["croatian", "Serbian", "false", "iso8859-5", "sr_HR", ""],
|
||||||
'norsk': ['norsk', 'Norsk', 'false', 'iso8859-1', 'no_NO', ''],
|
"serbocroatian": ["croatian", "Serbo-Croatian", "false", "iso8859-2", "sh_HR", ""],
|
||||||
'nynorsk': ['nynorsk', 'Nynorsk', 'false', 'iso8859-1', 'nn_NO', ''],
|
"slovak": ["slovak", "Slovak", "false", "iso8859-2", "sk_SK", ""],
|
||||||
'polish': ['polish', 'Polish', 'false', 'iso8859-2', 'pl_PL', ''],
|
"slovene": ["slovene", "Slovene", "false", "iso8859-2", "sl_SI", ""],
|
||||||
'portuges': ['portuges', 'Portugese', 'false', 'iso8859-1', 'pt_PT', ''],
|
"spanish": [
|
||||||
'romanian': ['romanian', 'Romanian', 'false', 'iso8859-2', 'ro_RO', ''],
|
"spanish",
|
||||||
'russian': ['russian', 'Russian', 'false', 'koi8-r', 'ru_RU', ''],
|
"Spanish",
|
||||||
'scottish': ['scottish', 'Scottish', 'false', 'iso8859-1', 'gd_GB', ''],
|
"false",
|
||||||
'serbian': ['croatian', 'Serbian', 'false', 'iso8859-5', 'sr_HR', ''],
|
"iso8859-1",
|
||||||
'serbocroatian': ['croatian',
|
"es_ES",
|
||||||
'Serbo-Croatian',
|
"\\deactivatetilden",
|
||||||
'false',
|
],
|
||||||
'iso8859-2',
|
"swedish": ["swedish", "Swedish", "false", "iso8859-1", "sv_SE", ""],
|
||||||
'sh_HR',
|
"thai": ["thai", "Thai", "false", "tis620-0", "th_TH", "\\usepackage{thswitch}"],
|
||||||
''],
|
"turkish": ["turkish", "Turkish", "false", "iso8859-9", "tr_TR", ""],
|
||||||
'slovak': ['slovak', 'Slovak', 'false', 'iso8859-2', 'sk_SK', ''],
|
"ukrainian": ["ukrainian", "Ukrainian", "false", "koi8-u", "uk_UA", ""],
|
||||||
'slovene': ['slovene', 'Slovene', 'false', 'iso8859-2', 'sl_SI', ''],
|
"welsh": ["welsh", "Welsh", "false", "iso8859-1", "cy_GB", ""],
|
||||||
'spanish': ['spanish',
|
}
|
||||||
'Spanish',
|
|
||||||
'false',
|
|
||||||
'iso8859-1',
|
|
||||||
'es_ES',
|
|
||||||
'\\deactivatetilden'],
|
|
||||||
'swedish': ['swedish', 'Swedish', 'false', 'iso8859-1', 'sv_SE', ''],
|
|
||||||
'thai': ['thai',
|
|
||||||
'Thai',
|
|
||||||
'false',
|
|
||||||
'tis620-0',
|
|
||||||
'th_TH',
|
|
||||||
'\\usepackage{thswitch}'],
|
|
||||||
'turkish': ['turkish', 'Turkish', 'false', 'iso8859-9', 'tr_TR', ''],
|
|
||||||
'ukrainian': ['ukrainian', 'Ukrainian', 'false', 'koi8-u', 'uk_UA', ''],
|
|
||||||
'welsh': ['welsh', 'Welsh', 'false', 'iso8859-1', 'cy_GB', '']}
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
'''
|
"""
|
||||||
This module offers several free functions to help with lyx2lyx'ing.
|
This module offers several free functions to help with lyx2lyx'ing.
|
||||||
More documentaton is below, but here is a quick guide to what
|
More documentaton is below, but here is a quick guide to what
|
||||||
they do. Optional arguments are marked by brackets.
|
they do. Optional arguments are marked by brackets.
|
||||||
@ -87,41 +87,48 @@ revert_language(document, lyxname, babelname="", polyglossianame=""):
|
|||||||
Reverts native language support to ERT
|
Reverts native language support to ERT
|
||||||
If babelname or polyglossianame is empty, it is assumed
|
If babelname or polyglossianame is empty, it is assumed
|
||||||
this language package is not supported for the given language.
|
this language package is not supported for the given language.
|
||||||
'''
|
"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
from parser_tools import (find_token, find_end_of_inset, get_containing_layout,
|
from parser_tools import (
|
||||||
get_containing_inset, get_value, get_bool_value)
|
find_token,
|
||||||
|
find_end_of_inset,
|
||||||
|
get_containing_layout,
|
||||||
|
get_containing_inset,
|
||||||
|
get_value,
|
||||||
|
get_bool_value,
|
||||||
|
)
|
||||||
from unicode_symbols import unicode_reps
|
from unicode_symbols import unicode_reps
|
||||||
|
|
||||||
|
|
||||||
# This will accept either a list of lines or a single line.
|
# This will accept either a list of lines or a single line.
|
||||||
# It is bad practice to pass something with embedded newlines,
|
# It is bad practice to pass something with embedded newlines,
|
||||||
# though we will handle that.
|
# though we will handle that.
|
||||||
def add_to_preamble(document, text):
|
def add_to_preamble(document, text):
|
||||||
" Add text to the preamble if it is not already there. "
|
"Add text to the preamble if it is not already there."
|
||||||
|
|
||||||
if not type(text) is list:
|
if not type(text) is list:
|
||||||
# split on \n just in case
|
# split on \n just in case
|
||||||
# it'll give us the one element list we want
|
# it'll give us the one element list we want
|
||||||
# if there's no \n, too
|
# if there's no \n, too
|
||||||
text = text.split('\n')
|
text = text.split("\n")
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
prelen = len(document.preamble)
|
prelen = len(document.preamble)
|
||||||
while True:
|
while True:
|
||||||
i = find_token(document.preamble, text[0], i)
|
i = find_token(document.preamble, text[0], i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
# we need a perfect match
|
# we need a perfect match
|
||||||
matched = True
|
matched = True
|
||||||
for line in text:
|
for line in text:
|
||||||
if i >= prelen or line != document.preamble[i]:
|
if i >= prelen or line != document.preamble[i]:
|
||||||
matched = False
|
matched = False
|
||||||
break
|
break
|
||||||
i += 1
|
i += 1
|
||||||
if matched:
|
if matched:
|
||||||
return
|
return
|
||||||
|
|
||||||
document.preamble.extend(["% Added by lyx2lyx"])
|
document.preamble.extend(["% Added by lyx2lyx"])
|
||||||
document.preamble.extend(text)
|
document.preamble.extend(text)
|
||||||
@ -129,14 +136,14 @@ def add_to_preamble(document, text):
|
|||||||
|
|
||||||
# Note that text can be either a list of lines or a single line.
|
# Note that text can be either a list of lines or a single line.
|
||||||
# It should really be a list.
|
# It should really be a list.
|
||||||
def insert_to_preamble(document, text, index = 0):
|
def insert_to_preamble(document, text, index=0):
|
||||||
""" Insert text to the preamble at a given line"""
|
"""Insert text to the preamble at a given line"""
|
||||||
|
|
||||||
if not type(text) is list:
|
if not type(text) is list:
|
||||||
# split on \n just in case
|
# split on \n just in case
|
||||||
# it'll give us the one element list we want
|
# it'll give us the one element list we want
|
||||||
# if there's no \n, too
|
# if there's no \n, too
|
||||||
text = text.split('\n')
|
text = text.split("\n")
|
||||||
|
|
||||||
text.insert(0, "% Added by lyx2lyx")
|
text.insert(0, "% Added by lyx2lyx")
|
||||||
document.preamble[index:index] = text
|
document.preamble[index:index] = text
|
||||||
@ -146,6 +153,7 @@ def insert_to_preamble(document, text, index = 0):
|
|||||||
# Created from the reversed list to keep the first of alternative definitions.
|
# Created from the reversed list to keep the first of alternative definitions.
|
||||||
licr_table = {ord(ch): cmd for cmd, ch in unicode_reps[::-1]}
|
licr_table = {ord(ch): cmd for cmd, ch in unicode_reps[::-1]}
|
||||||
|
|
||||||
|
|
||||||
def put_cmd_in_ert(cmd, is_open=False, as_paragraph=False):
|
def put_cmd_in_ert(cmd, is_open=False, as_paragraph=False):
|
||||||
"""
|
"""
|
||||||
Return ERT inset wrapping `cmd` as a list of strings.
|
Return ERT inset wrapping `cmd` as a list of strings.
|
||||||
@ -156,15 +164,27 @@ def put_cmd_in_ert(cmd, is_open=False, as_paragraph=False):
|
|||||||
`as_paragraph` wraps the ERT inset in a Standard paragraph.
|
`as_paragraph` wraps the ERT inset in a Standard paragraph.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
status = {False:"collapsed", True:"open"}
|
status = {False: "collapsed", True: "open"}
|
||||||
ert_inset = ["\\begin_inset ERT", "status %s"%status[is_open], "",
|
ert_inset = [
|
||||||
"\\begin_layout Plain Layout", "",
|
"\\begin_inset ERT",
|
||||||
# content here ([5:5])
|
"status %s" % status[is_open],
|
||||||
"\\end_layout", "", "\\end_inset"]
|
"",
|
||||||
|
"\\begin_layout Plain Layout",
|
||||||
|
"",
|
||||||
|
# content here ([5:5])
|
||||||
|
"\\end_layout",
|
||||||
|
"",
|
||||||
|
"\\end_inset",
|
||||||
|
]
|
||||||
|
|
||||||
paragraph = ["\\begin_layout Standard",
|
paragraph = [
|
||||||
# content here ([1:1])
|
"\\begin_layout Standard",
|
||||||
"", "", "\\end_layout", ""]
|
# content here ([1:1])
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
"\\end_layout",
|
||||||
|
"",
|
||||||
|
]
|
||||||
# ensure cmd is an unicode instance and make it "LyX safe".
|
# ensure cmd is an unicode instance and make it "LyX safe".
|
||||||
if isinstance(cmd, list):
|
if isinstance(cmd, list):
|
||||||
cmd = "\n".join(cmd)
|
cmd = "\n".join(cmd)
|
||||||
@ -178,8 +198,8 @@ def put_cmd_in_ert(cmd, is_open=False, as_paragraph=False):
|
|||||||
return paragraph
|
return paragraph
|
||||||
|
|
||||||
|
|
||||||
def get_ert(lines, i, verbatim = False):
|
def get_ert(lines, i, verbatim=False):
|
||||||
'Convert an ERT inset into LaTeX.'
|
"Convert an ERT inset into LaTeX."
|
||||||
if not lines[i].startswith("\\begin_inset ERT"):
|
if not lines[i].startswith("\\begin_inset ERT"):
|
||||||
return ""
|
return ""
|
||||||
j = find_end_of_inset(lines, i)
|
j = find_end_of_inset(lines, i)
|
||||||
@ -196,10 +216,10 @@ def get_ert(lines, i, verbatim = False):
|
|||||||
first = False
|
first = False
|
||||||
else:
|
else:
|
||||||
ret = ret + "\n"
|
ret = ret + "\n"
|
||||||
while i + 1 < j and lines[i+1] == "":
|
while i + 1 < j and lines[i + 1] == "":
|
||||||
i = i + 1
|
i = i + 1
|
||||||
elif lines[i] == "\\end_layout":
|
elif lines[i] == "\\end_layout":
|
||||||
while i + 1 < j and lines[i+1] == "":
|
while i + 1 < j and lines[i + 1] == "":
|
||||||
i = i + 1
|
i = i + 1
|
||||||
elif lines[i] == "\\backslash":
|
elif lines[i] == "\\backslash":
|
||||||
if verbatim:
|
if verbatim:
|
||||||
@ -213,7 +233,7 @@ def get_ert(lines, i, verbatim = False):
|
|||||||
|
|
||||||
|
|
||||||
def lyx2latex(document, lines):
|
def lyx2latex(document, lines):
|
||||||
'Convert some LyX stuff into corresponding LaTeX stuff, as best we can.'
|
"Convert some LyX stuff into corresponding LaTeX stuff, as best we can."
|
||||||
|
|
||||||
content = ""
|
content = ""
|
||||||
ert_end = 0
|
ert_end = 0
|
||||||
@ -221,118 +241,137 @@ def lyx2latex(document, lines):
|
|||||||
hspace = ""
|
hspace = ""
|
||||||
|
|
||||||
for curline in range(len(lines)):
|
for curline in range(len(lines)):
|
||||||
line = lines[curline]
|
line = lines[curline]
|
||||||
if line.startswith("\\begin_inset Note Note"):
|
if line.startswith("\\begin_inset Note Note"):
|
||||||
# We want to skip LyX notes, so remember where the inset ends
|
# We want to skip LyX notes, so remember where the inset ends
|
||||||
note_end = find_end_of_inset(lines, curline + 1)
|
note_end = find_end_of_inset(lines, curline + 1)
|
||||||
continue
|
continue
|
||||||
elif note_end >= curline:
|
elif note_end >= curline:
|
||||||
# Skip LyX notes
|
# Skip LyX notes
|
||||||
continue
|
continue
|
||||||
elif line.startswith("\\begin_inset ERT"):
|
elif line.startswith("\\begin_inset ERT"):
|
||||||
# We don't want to replace things inside ERT, so figure out
|
# We don't want to replace things inside ERT, so figure out
|
||||||
# where the end of the inset is.
|
# where the end of the inset is.
|
||||||
ert_end = find_end_of_inset(lines, curline + 1)
|
ert_end = find_end_of_inset(lines, curline + 1)
|
||||||
continue
|
continue
|
||||||
elif line.startswith("\\begin_inset Formula"):
|
elif line.startswith("\\begin_inset Formula"):
|
||||||
line = line[20:]
|
line = line[20:]
|
||||||
elif line.startswith("\\begin_inset Quotes"):
|
elif line.startswith("\\begin_inset Quotes"):
|
||||||
# For now, we do a very basic reversion. Someone who understands
|
# For now, we do a very basic reversion. Someone who understands
|
||||||
# quotes is welcome to fix it up.
|
# quotes is welcome to fix it up.
|
||||||
qtype = line[20:].strip()
|
qtype = line[20:].strip()
|
||||||
# lang = qtype[0]
|
# lang = qtype[0]
|
||||||
side = qtype[1]
|
side = qtype[1]
|
||||||
dbls = qtype[2]
|
dbls = qtype[2]
|
||||||
if side == "l":
|
if side == "l":
|
||||||
if dbls == "d":
|
if dbls == "d":
|
||||||
line = "``"
|
line = "``"
|
||||||
else:
|
else:
|
||||||
line = "`"
|
line = "`"
|
||||||
else:
|
else:
|
||||||
if dbls == "d":
|
if dbls == "d":
|
||||||
line = "''"
|
line = "''"
|
||||||
else:
|
else:
|
||||||
line = "'"
|
line = "'"
|
||||||
elif line.startswith("\\begin_inset Newline newline"):
|
elif line.startswith("\\begin_inset Newline newline"):
|
||||||
line = "\\\\ "
|
line = "\\\\ "
|
||||||
elif line.startswith("\\noindent"):
|
elif line.startswith("\\noindent"):
|
||||||
line = "\\noindent " # we need the space behind the command
|
line = "\\noindent " # we need the space behind the command
|
||||||
elif line.startswith("\\begin_inset space"):
|
elif line.startswith("\\begin_inset space"):
|
||||||
line = line[18:].strip()
|
line = line[18:].strip()
|
||||||
if line.startswith("\\hspace"):
|
if line.startswith("\\hspace"):
|
||||||
# Account for both \hspace and \hspace*
|
# Account for both \hspace and \hspace*
|
||||||
hspace = line[:-2]
|
hspace = line[:-2]
|
||||||
continue
|
continue
|
||||||
elif line == "\\space{}":
|
elif line == "\\space{}":
|
||||||
line = "\\ "
|
line = "\\ "
|
||||||
elif line == "\\thinspace{}":
|
elif line == "\\thinspace{}":
|
||||||
line = "\\,"
|
line = "\\,"
|
||||||
elif hspace != "":
|
elif hspace != "":
|
||||||
# The LyX length is in line[8:], after the \length keyword
|
# The LyX length is in line[8:], after the \length keyword
|
||||||
length = latex_length(line[8:])[1]
|
length = latex_length(line[8:])[1]
|
||||||
line = hspace + "{" + length + "}"
|
line = hspace + "{" + length + "}"
|
||||||
hspace = ""
|
hspace = ""
|
||||||
elif line.isspace() or \
|
elif (
|
||||||
line.startswith("\\begin_layout") or \
|
line.isspace()
|
||||||
line.startswith("\\end_layout") or \
|
or line.startswith("\\begin_layout")
|
||||||
line.startswith("\\begin_inset") or \
|
or line.startswith("\\end_layout")
|
||||||
line.startswith("\\end_inset") or \
|
or line.startswith("\\begin_inset")
|
||||||
line.startswith("\\lang") or \
|
or line.startswith("\\end_inset")
|
||||||
line.strip() == "status collapsed" or \
|
or line.startswith("\\lang")
|
||||||
line.strip() == "status open":
|
or line.strip() == "status collapsed"
|
||||||
#skip all that stuff
|
or line.strip() == "status open"
|
||||||
continue
|
):
|
||||||
|
# skip all that stuff
|
||||||
|
continue
|
||||||
|
|
||||||
# this needs to be added to the preamble because of cases like
|
# this needs to be added to the preamble because of cases like
|
||||||
# \textmu, \textbackslash, etc.
|
# \textmu, \textbackslash, etc.
|
||||||
add_to_preamble(document, ['% added by lyx2lyx for converted index entries',
|
add_to_preamble(
|
||||||
'\\@ifundefined{textmu}',
|
document,
|
||||||
' {\\usepackage{textcomp}}{}'])
|
[
|
||||||
# a lossless reversion is not possible
|
"% added by lyx2lyx for converted index entries",
|
||||||
# try at least to handle some common insets and settings
|
"\\@ifundefined{textmu}",
|
||||||
if ert_end >= curline:
|
" {\\usepackage{textcomp}}{}",
|
||||||
line = line.replace(r'\backslash', '\\')
|
],
|
||||||
else:
|
)
|
||||||
# No need to add "{}" after single-nonletter macros
|
# a lossless reversion is not possible
|
||||||
line = line.replace('&', '\\&')
|
# try at least to handle some common insets and settings
|
||||||
line = line.replace('#', '\\#')
|
if ert_end >= curline:
|
||||||
line = line.replace('^', '\\textasciicircum{}')
|
line = line.replace(r"\backslash", "\\")
|
||||||
line = line.replace('%', '\\%')
|
else:
|
||||||
line = line.replace('_', '\\_')
|
# No need to add "{}" after single-nonletter macros
|
||||||
line = line.replace('$', '\\$')
|
line = line.replace("&", "\\&")
|
||||||
|
line = line.replace("#", "\\#")
|
||||||
|
line = line.replace("^", "\\textasciicircum{}")
|
||||||
|
line = line.replace("%", "\\%")
|
||||||
|
line = line.replace("_", "\\_")
|
||||||
|
line = line.replace("$", "\\$")
|
||||||
|
|
||||||
# Do the LyX text --> LaTeX conversion
|
# Do the LyX text --> LaTeX conversion
|
||||||
for rep in unicode_reps:
|
for rep in unicode_reps:
|
||||||
line = line.replace(rep[1], rep[0])
|
line = line.replace(rep[1], rep[0])
|
||||||
line = line.replace(r'\backslash', r'\textbackslash{}')
|
line = line.replace(r"\backslash", r"\textbackslash{}")
|
||||||
line = line.replace(r'\series bold', r'\bfseries{}').replace(r'\series default', r'\mdseries{}')
|
line = line.replace(r"\series bold", r"\bfseries{}").replace(
|
||||||
line = line.replace(r'\shape italic', r'\itshape{}').replace(r'\shape smallcaps', r'\scshape{}')
|
r"\series default", r"\mdseries{}"
|
||||||
line = line.replace(r'\shape slanted', r'\slshape{}').replace(r'\shape default', r'\upshape{}')
|
)
|
||||||
line = line.replace(r'\emph on', r'\em{}').replace(r'\emph default', r'\em{}')
|
line = line.replace(r"\shape italic", r"\itshape{}").replace(
|
||||||
line = line.replace(r'\noun on', r'\scshape{}').replace(r'\noun default', r'\upshape{}')
|
r"\shape smallcaps", r"\scshape{}"
|
||||||
line = line.replace(r'\bar under', r'\underbar{').replace(r'\bar default', r'}')
|
)
|
||||||
line = line.replace(r'\family sans', r'\sffamily{}').replace(r'\family default', r'\normalfont{}')
|
line = line.replace(r"\shape slanted", r"\slshape{}").replace(
|
||||||
line = line.replace(r'\family typewriter', r'\ttfamily{}').replace(r'\family roman', r'\rmfamily{}')
|
r"\shape default", r"\upshape{}"
|
||||||
line = line.replace(r'\InsetSpace ', r'').replace(r'\SpecialChar ', r'')
|
)
|
||||||
content += line
|
line = line.replace(r"\emph on", r"\em{}").replace(r"\emph default", r"\em{}")
|
||||||
|
line = line.replace(r"\noun on", r"\scshape{}").replace(
|
||||||
|
r"\noun default", r"\upshape{}"
|
||||||
|
)
|
||||||
|
line = line.replace(r"\bar under", r"\underbar{").replace(r"\bar default", r"}")
|
||||||
|
line = line.replace(r"\family sans", r"\sffamily{}").replace(
|
||||||
|
r"\family default", r"\normalfont{}"
|
||||||
|
)
|
||||||
|
line = line.replace(r"\family typewriter", r"\ttfamily{}").replace(
|
||||||
|
r"\family roman", r"\rmfamily{}"
|
||||||
|
)
|
||||||
|
line = line.replace(r"\InsetSpace ", r"").replace(r"\SpecialChar ", r"")
|
||||||
|
content += line
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
|
||||||
def lyx2verbatim(document, lines):
|
def lyx2verbatim(document, lines):
|
||||||
'Convert some LyX stuff into corresponding verbatim stuff, as best we can.'
|
"Convert some LyX stuff into corresponding verbatim stuff, as best we can."
|
||||||
|
|
||||||
content = lyx2latex(document, lines)
|
content = lyx2latex(document, lines)
|
||||||
content = re.sub(r'\\(?!backslash)', r'\n\\backslash\n', content)
|
content = re.sub(r"\\(?!backslash)", r"\n\\backslash\n", content)
|
||||||
|
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
|
||||||
def latex_length(slen):
|
def latex_length(slen):
|
||||||
'''
|
"""
|
||||||
Convert lengths to their LaTeX representation. Returns (bool, length),
|
Convert lengths to their LaTeX representation. Returns (bool, length),
|
||||||
where the bool tells us if it was a percentage, and the length is the
|
where the bool tells us if it was a percentage, and the length is the
|
||||||
LaTeX representation.
|
LaTeX representation.
|
||||||
'''
|
"""
|
||||||
i = 0
|
i = 0
|
||||||
percent = False
|
percent = False
|
||||||
# the slen has the form
|
# the slen has the form
|
||||||
@ -342,14 +381,15 @@ def latex_length(slen):
|
|||||||
# the + always precedes the -
|
# the + always precedes the -
|
||||||
|
|
||||||
# Convert relative lengths to LaTeX units
|
# Convert relative lengths to LaTeX units
|
||||||
units = {"col%": "\\columnwidth",
|
units = {
|
||||||
"text%": "\\textwidth",
|
"col%": "\\columnwidth",
|
||||||
"page%": "\\paperwidth",
|
"text%": "\\textwidth",
|
||||||
"line%": "\\linewidth",
|
"page%": "\\paperwidth",
|
||||||
"theight%": "\\textheight",
|
"line%": "\\linewidth",
|
||||||
"pheight%": "\\paperheight",
|
"theight%": "\\textheight",
|
||||||
"baselineskip%": "\\baselineskip"
|
"pheight%": "\\paperheight",
|
||||||
}
|
"baselineskip%": "\\baselineskip",
|
||||||
|
}
|
||||||
for unit in list(units.keys()):
|
for unit in list(units.keys()):
|
||||||
i = slen.find(unit)
|
i = slen.find(unit)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
@ -360,19 +400,19 @@ def latex_length(slen):
|
|||||||
latex_unit = units[unit]
|
latex_unit = units[unit]
|
||||||
if plus == -1 and minus == -1:
|
if plus == -1 and minus == -1:
|
||||||
value = slen[:i]
|
value = slen[:i]
|
||||||
value = str(float(value)/100)
|
value = str(float(value) / 100)
|
||||||
end = slen[i + len(unit):]
|
end = slen[i + len(unit) :]
|
||||||
slen = value + latex_unit + end
|
slen = value + latex_unit + end
|
||||||
if plus > minus:
|
if plus > minus:
|
||||||
value = slen[plus + 1:i]
|
value = slen[plus + 1 : i]
|
||||||
value = str(float(value)/100)
|
value = str(float(value) / 100)
|
||||||
begin = slen[:plus + 1]
|
begin = slen[: plus + 1]
|
||||||
end = slen[i+len(unit):]
|
end = slen[i + len(unit) :]
|
||||||
slen = begin + value + latex_unit + end
|
slen = begin + value + latex_unit + end
|
||||||
if plus < minus:
|
if plus < minus:
|
||||||
value = slen[minus + 1:i]
|
value = slen[minus + 1 : i]
|
||||||
value = str(float(value)/100)
|
value = str(float(value) / 100)
|
||||||
begin = slen[:minus + 1]
|
begin = slen[: minus + 1]
|
||||||
slen = begin + value + latex_unit
|
slen = begin + value + latex_unit
|
||||||
|
|
||||||
# replace + and -, but only if the - is not the first character
|
# replace + and -, but only if the - is not the first character
|
||||||
@ -387,31 +427,33 @@ def latex_length(slen):
|
|||||||
|
|
||||||
|
|
||||||
def length_in_bp(length):
|
def length_in_bp(length):
|
||||||
" Convert a length in LyX format to its value in bp units "
|
"Convert a length in LyX format to its value in bp units"
|
||||||
|
|
||||||
em_width = 10.0 / 72.27 # assume 10pt font size
|
em_width = 10.0 / 72.27 # assume 10pt font size
|
||||||
text_width = 8.27 / 1.7 # assume A4 with default margins
|
text_width = 8.27 / 1.7 # assume A4 with default margins
|
||||||
# scale factors are taken from Length::inInch()
|
# scale factors are taken from Length::inInch()
|
||||||
scales = {"bp" : 1.0,
|
scales = {
|
||||||
"cc" : (72.0 / (72.27 / (12.0 * 0.376 * 2.845))),
|
"bp": 1.0,
|
||||||
"cm" : (72.0 / 2.54),
|
"cc": (72.0 / (72.27 / (12.0 * 0.376 * 2.845))),
|
||||||
"dd" : (72.0 / (72.27 / (0.376 * 2.845))),
|
"cm": (72.0 / 2.54),
|
||||||
"em" : (72.0 * em_width),
|
"dd": (72.0 / (72.27 / (0.376 * 2.845))),
|
||||||
"ex" : (72.0 * em_width * 0.4305),
|
"em": (72.0 * em_width),
|
||||||
"in" : 72.0,
|
"ex": (72.0 * em_width * 0.4305),
|
||||||
"mm" : (72.0 / 25.4),
|
"in": 72.0,
|
||||||
"mu" : (72.0 * em_width / 18.0),
|
"mm": (72.0 / 25.4),
|
||||||
"pc" : (72.0 / (72.27 / 12.0)),
|
"mu": (72.0 * em_width / 18.0),
|
||||||
"pt" : (72.0 / (72.27)),
|
"pc": (72.0 / (72.27 / 12.0)),
|
||||||
"sp" : (72.0 / (72.27 * 65536.0)),
|
"pt": (72.0 / (72.27)),
|
||||||
"text%" : (72.0 * text_width / 100.0),
|
"sp": (72.0 / (72.27 * 65536.0)),
|
||||||
"col%" : (72.0 * text_width / 100.0), # assume 1 column
|
"text%": (72.0 * text_width / 100.0),
|
||||||
"page%" : (72.0 * text_width * 1.7 / 100.0),
|
"col%": (72.0 * text_width / 100.0), # assume 1 column
|
||||||
"line%" : (72.0 * text_width / 100.0),
|
"page%": (72.0 * text_width * 1.7 / 100.0),
|
||||||
"theight%" : (72.0 * text_width * 1.787 / 100.0),
|
"line%": (72.0 * text_width / 100.0),
|
||||||
"pheight%" : (72.0 * text_width * 2.2 / 100.0)}
|
"theight%": (72.0 * text_width * 1.787 / 100.0),
|
||||||
|
"pheight%": (72.0 * text_width * 2.2 / 100.0),
|
||||||
|
}
|
||||||
|
|
||||||
rx = re.compile(r'^\s*([^a-zA-Z%]+)([a-zA-Z%]+)\s*$')
|
rx = re.compile(r"^\s*([^a-zA-Z%]+)([a-zA-Z%]+)\s*$")
|
||||||
m = rx.match(length)
|
m = rx.match(length)
|
||||||
if not m:
|
if not m:
|
||||||
document.warning("Invalid length value: " + length + ".")
|
document.warning("Invalid length value: " + length + ".")
|
||||||
@ -425,132 +467,132 @@ def length_in_bp(length):
|
|||||||
|
|
||||||
|
|
||||||
def revert_flex_inset(lines, name, LaTeXname):
|
def revert_flex_inset(lines, name, LaTeXname):
|
||||||
" Convert flex insets to TeX code "
|
"Convert flex insets to TeX code"
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, '\\begin_inset Flex ' + name, i)
|
i = find_token(lines, "\\begin_inset Flex " + name, i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
z = find_end_of_inset(lines, i)
|
z = find_end_of_inset(lines, i)
|
||||||
if z == -1:
|
if z == -1:
|
||||||
document.warning("Can't find end of Flex " + name + " inset.")
|
document.warning("Can't find end of Flex " + name + " inset.")
|
||||||
i += 1
|
i += 1
|
||||||
continue
|
continue
|
||||||
# remove the \end_inset
|
# remove the \end_inset
|
||||||
lines[z - 2:z + 1] = put_cmd_in_ert("}")
|
lines[z - 2 : z + 1] = put_cmd_in_ert("}")
|
||||||
# we need to reset character layouts if necessary
|
# we need to reset character layouts if necessary
|
||||||
j = find_token(lines, '\\emph on', i, z)
|
j = find_token(lines, "\\emph on", i, z)
|
||||||
k = find_token(lines, '\\noun on', i, z)
|
k = find_token(lines, "\\noun on", i, z)
|
||||||
l = find_token(lines, '\\series', i, z)
|
l = find_token(lines, "\\series", i, z)
|
||||||
m = find_token(lines, '\\family', i, z)
|
m = find_token(lines, "\\family", i, z)
|
||||||
n = find_token(lines, '\\shape', i, z)
|
n = find_token(lines, "\\shape", i, z)
|
||||||
o = find_token(lines, '\\color', i, z)
|
o = find_token(lines, "\\color", i, z)
|
||||||
p = find_token(lines, '\\size', i, z)
|
p = find_token(lines, "\\size", i, z)
|
||||||
q = find_token(lines, '\\bar under', i, z)
|
q = find_token(lines, "\\bar under", i, z)
|
||||||
r = find_token(lines, '\\uuline on', i, z)
|
r = find_token(lines, "\\uuline on", i, z)
|
||||||
s = find_token(lines, '\\uwave on', i, z)
|
s = find_token(lines, "\\uwave on", i, z)
|
||||||
t = find_token(lines, '\\strikeout on', i, z)
|
t = find_token(lines, "\\strikeout on", i, z)
|
||||||
if j != -1:
|
if j != -1:
|
||||||
lines.insert(z - 2, "\\emph default")
|
lines.insert(z - 2, "\\emph default")
|
||||||
if k != -1:
|
if k != -1:
|
||||||
lines.insert(z - 2, "\\noun default")
|
lines.insert(z - 2, "\\noun default")
|
||||||
if l != -1:
|
if l != -1:
|
||||||
lines.insert(z - 2, "\\series default")
|
lines.insert(z - 2, "\\series default")
|
||||||
if m != -1:
|
if m != -1:
|
||||||
lines.insert(z - 2, "\\family default")
|
lines.insert(z - 2, "\\family default")
|
||||||
if n != -1:
|
if n != -1:
|
||||||
lines.insert(z - 2, "\\shape default")
|
lines.insert(z - 2, "\\shape default")
|
||||||
if o != -1:
|
if o != -1:
|
||||||
lines.insert(z - 2, "\\color inherit")
|
lines.insert(z - 2, "\\color inherit")
|
||||||
if p != -1:
|
if p != -1:
|
||||||
lines.insert(z - 2, "\\size default")
|
lines.insert(z - 2, "\\size default")
|
||||||
if q != -1:
|
if q != -1:
|
||||||
lines.insert(z - 2, "\\bar default")
|
lines.insert(z - 2, "\\bar default")
|
||||||
if r != -1:
|
if r != -1:
|
||||||
lines.insert(z - 2, "\\uuline default")
|
lines.insert(z - 2, "\\uuline default")
|
||||||
if s != -1:
|
if s != -1:
|
||||||
lines.insert(z - 2, "\\uwave default")
|
lines.insert(z - 2, "\\uwave default")
|
||||||
if t != -1:
|
if t != -1:
|
||||||
lines.insert(z - 2, "\\strikeout default")
|
lines.insert(z - 2, "\\strikeout default")
|
||||||
lines[i:i + 4] = put_cmd_in_ert(LaTeXname + "{")
|
lines[i : i + 4] = put_cmd_in_ert(LaTeXname + "{")
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
|
|
||||||
def revert_font_attrs(lines, name, LaTeXname):
|
def revert_font_attrs(lines, name, LaTeXname):
|
||||||
" Reverts font changes to TeX code "
|
"Reverts font changes to TeX code"
|
||||||
i = 0
|
i = 0
|
||||||
changed = False
|
changed = False
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, name + ' on', i)
|
i = find_token(lines, name + " on", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
j = find_token(lines, name + ' default', i)
|
j = find_token(lines, name + " default", i)
|
||||||
k = find_token(lines, name + ' on', i + 1)
|
k = find_token(lines, name + " on", i + 1)
|
||||||
# if there is no default set, the style ends with the layout
|
# if there is no default set, the style ends with the layout
|
||||||
# assure hereby that we found the correct layout end
|
# assure hereby that we found the correct layout end
|
||||||
if j != -1 and (j < k or k == -1):
|
if j != -1 and (j < k or k == -1):
|
||||||
lines[j:j + 1] = put_cmd_in_ert("}")
|
lines[j : j + 1] = put_cmd_in_ert("}")
|
||||||
else:
|
else:
|
||||||
j = find_token(lines, '\\end_layout', i)
|
j = find_token(lines, "\\end_layout", i)
|
||||||
lines[j:j] = put_cmd_in_ert("}")
|
lines[j:j] = put_cmd_in_ert("}")
|
||||||
lines[i:i + 1] = put_cmd_in_ert(LaTeXname + "{")
|
lines[i : i + 1] = put_cmd_in_ert(LaTeXname + "{")
|
||||||
changed = True
|
changed = True
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
# now delete all remaining lines that manipulate this attribute
|
# now delete all remaining lines that manipulate this attribute
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, name, i)
|
i = find_token(lines, name, i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
return changed
|
return changed
|
||||||
|
|
||||||
|
|
||||||
def revert_layout_command(lines, name, LaTeXname):
|
def revert_layout_command(lines, name, LaTeXname):
|
||||||
" Reverts a command from a layout to TeX code "
|
"Reverts a command from a layout to TeX code"
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, '\\begin_layout ' + name, i)
|
i = find_token(lines, "\\begin_layout " + name, i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
k = -1
|
k = -1
|
||||||
# find the next layout
|
# find the next layout
|
||||||
j = i + 1
|
j = i + 1
|
||||||
while k == -1:
|
while k == -1:
|
||||||
j = find_token(lines, '\\begin_layout', j)
|
j = find_token(lines, "\\begin_layout", j)
|
||||||
l = len(lines)
|
l = len(lines)
|
||||||
# if nothing was found it was the last layout of the document
|
# if nothing was found it was the last layout of the document
|
||||||
if j == -1:
|
if j == -1:
|
||||||
lines[l - 4:l - 4] = put_cmd_in_ert("}")
|
lines[l - 4 : l - 4] = put_cmd_in_ert("}")
|
||||||
k = 0
|
k = 0
|
||||||
# exclude plain layout because this can be TeX code or another inset
|
# exclude plain layout because this can be TeX code or another inset
|
||||||
elif lines[j] != '\\begin_layout Plain Layout':
|
elif lines[j] != "\\begin_layout Plain Layout":
|
||||||
lines[j - 2:j - 2] = put_cmd_in_ert("}")
|
lines[j - 2 : j - 2] = put_cmd_in_ert("}")
|
||||||
k = 0
|
k = 0
|
||||||
else:
|
else:
|
||||||
j += 1
|
j += 1
|
||||||
lines[i] = '\\begin_layout Standard'
|
lines[i] = "\\begin_layout Standard"
|
||||||
lines[i + 1:i + 1] = put_cmd_in_ert(LaTeXname + "{")
|
lines[i + 1 : i + 1] = put_cmd_in_ert(LaTeXname + "{")
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
|
|
||||||
def hex2ratio(s):
|
def hex2ratio(s):
|
||||||
" Converts an RRGGBB-type hexadecimal string to a float in [0.0,1.0] "
|
"Converts an RRGGBB-type hexadecimal string to a float in [0.0,1.0]"
|
||||||
try:
|
try:
|
||||||
val = int(s, 16)
|
val = int(s, 16)
|
||||||
except:
|
except:
|
||||||
val = 0
|
val = 0
|
||||||
if val != 0:
|
if val != 0:
|
||||||
val += 1
|
val += 1
|
||||||
return str(val / 256.0)
|
return str(val / 256.0)
|
||||||
|
|
||||||
|
|
||||||
def str2bool(s):
|
def str2bool(s):
|
||||||
"'true' goes to True, case-insensitively, and we strip whitespace."
|
"'true' goes to True, case-insensitively, and we strip whitespace."
|
||||||
s = s.strip().lower()
|
s = s.strip().lower()
|
||||||
return s == "true"
|
return s == "true"
|
||||||
|
|
||||||
|
|
||||||
def convert_info_insets(document, type, func):
|
def convert_info_insets(document, type, func):
|
||||||
@ -587,17 +629,17 @@ def insert_document_option(document, option):
|
|||||||
|
|
||||||
|
|
||||||
def remove_document_option(document, option):
|
def remove_document_option(document, option):
|
||||||
""" Remove _option_ as a document option."""
|
"""Remove _option_ as a document option."""
|
||||||
|
|
||||||
i = find_token(document.header, "\\options")
|
i = find_token(document.header, "\\options")
|
||||||
options = get_value(document.header, "\\options", i)
|
options = get_value(document.header, "\\options", i)
|
||||||
options = [op.strip() for op in options.split(',')]
|
options = [op.strip() for op in options.split(",")]
|
||||||
|
|
||||||
# Remove `option` from \options
|
# Remove `option` from \options
|
||||||
options = [op for op in options if op != option]
|
options = [op for op in options if op != option]
|
||||||
|
|
||||||
if options:
|
if options:
|
||||||
document.header[i] = "\\options " + ','.join(options)
|
document.header[i] = "\\options " + ",".join(options)
|
||||||
else:
|
else:
|
||||||
del document.header[i]
|
del document.header[i]
|
||||||
|
|
||||||
@ -606,22 +648,25 @@ def is_document_option(document, option):
|
|||||||
"Find if _option_ is a document option"
|
"Find if _option_ is a document option"
|
||||||
|
|
||||||
options = get_value(document.header, "\\options")
|
options = get_value(document.header, "\\options")
|
||||||
options = [op.strip() for op in options.split(',')]
|
options = [op.strip() for op in options.split(",")]
|
||||||
return option in options
|
return option in options
|
||||||
|
|
||||||
|
|
||||||
singlepar_insets = [s.strip() for s in
|
singlepar_insets = [
|
||||||
"Argument, Caption Above, Caption Below, Caption Bicaption,"
|
s.strip()
|
||||||
|
for s in "Argument, Caption Above, Caption Below, Caption Bicaption,"
|
||||||
"Caption Centered, Caption FigCaption, Caption Standard, Caption Table,"
|
"Caption Centered, Caption FigCaption, Caption Standard, Caption Table,"
|
||||||
"Flex Chemistry, Flex Fixme_Note, Flex Latin, Flex ListOfSlides,"
|
"Flex Chemistry, Flex Fixme_Note, Flex Latin, Flex ListOfSlides,"
|
||||||
"Flex Missing_Figure, Flex PDF-Annotation, Flex PDF-Comment-Setup,"
|
"Flex Missing_Figure, Flex PDF-Annotation, Flex PDF-Comment-Setup,"
|
||||||
"Flex Reflectbox, Flex S/R expression, Flex Sweave Input File,"
|
"Flex Reflectbox, Flex S/R expression, Flex Sweave Input File,"
|
||||||
"Flex Sweave Options, Flex Thanks_Reference, Flex URL, Foot InTitle,"
|
"Flex Sweave Options, Flex Thanks_Reference, Flex URL, Foot InTitle,"
|
||||||
"IPADeco, Index, Info, Phantom, Script".split(',')]
|
"IPADeco, Index, Info, Phantom, Script".split(",")
|
||||||
|
]
|
||||||
# print(singlepar_insets)
|
# print(singlepar_insets)
|
||||||
|
|
||||||
|
|
||||||
def revert_language(document, lyxname, babelname="", polyglossianame=""):
|
def revert_language(document, lyxname, babelname="", polyglossianame=""):
|
||||||
" Revert native language support "
|
"Revert native language support"
|
||||||
|
|
||||||
# Does the document use polyglossia?
|
# Does the document use polyglossia?
|
||||||
use_polyglossia = False
|
use_polyglossia = False
|
||||||
@ -656,7 +701,7 @@ def revert_language(document, lyxname, babelname="", polyglossianame=""):
|
|||||||
# Now look for occurences in the body
|
# Now look for occurences in the body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(document.body, "\\lang", i+1)
|
i = find_token(document.body, "\\lang", i + 1)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
if document.body[i].startswith("\\lang %s" % lyxname):
|
if document.body[i].startswith("\\lang %s" % lyxname):
|
||||||
@ -669,12 +714,12 @@ def revert_language(document, lyxname, babelname="", polyglossianame=""):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
parent = get_containing_layout(document.body, i)
|
parent = get_containing_layout(document.body, i)
|
||||||
i_e = parent[2] # end line no,
|
i_e = parent[2] # end line no,
|
||||||
# print(i, texname, parent, document.body[i+1], file=sys.stderr)
|
# print(i, texname, parent, document.body[i+1], file=sys.stderr)
|
||||||
|
|
||||||
# Move leading space to the previous line:
|
# Move leading space to the previous line:
|
||||||
if document.body[i+1].startswith(" "):
|
if document.body[i + 1].startswith(" "):
|
||||||
document.body[i+1] = document.body[i+1][1:]
|
document.body[i + 1] = document.body[i + 1][1:]
|
||||||
document.body.insert(i, " ")
|
document.body.insert(i, " ")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -691,35 +736,42 @@ def revert_language(document, lyxname, babelname="", polyglossianame=""):
|
|||||||
# \end_layout
|
# \end_layout
|
||||||
|
|
||||||
# Ensure correct handling of list labels
|
# Ensure correct handling of list labels
|
||||||
if (parent[0] in ["Labeling", "Description"]
|
if parent[0] in ["Labeling", "Description"] and not " " in "\n".join(
|
||||||
and not " " in "\n".join(document.body[parent[3]:i])):
|
document.body[parent[3] : i]
|
||||||
|
):
|
||||||
# line `i+1` is first line of a list item,
|
# line `i+1` is first line of a list item,
|
||||||
# part before a space character is the label
|
# part before a space character is the label
|
||||||
# TODO: insets or language change before first space character
|
# TODO: insets or language change before first space character
|
||||||
labelline = document.body[i+1].split(' ', 1)
|
labelline = document.body[i + 1].split(" ", 1)
|
||||||
if len(labelline) > 1:
|
if len(labelline) > 1:
|
||||||
# Insert a space in the (original) document language
|
# Insert a space in the (original) document language
|
||||||
# between label and remainder.
|
# between label and remainder.
|
||||||
# print(" Label:", labelline, file=sys.stderr)
|
# print(" Label:", labelline, file=sys.stderr)
|
||||||
lines = [labelline[0],
|
lines = [
|
||||||
|
labelline[0],
|
||||||
"\\lang %s" % orig_doc_language,
|
"\\lang %s" % orig_doc_language,
|
||||||
" ",
|
" ",
|
||||||
"\\lang %s" % (primary and "english" or lyxname),
|
"\\lang %s" % (primary and "english" or lyxname),
|
||||||
labelline[1]]
|
labelline[1],
|
||||||
document.body[i+1:i+2] = lines
|
]
|
||||||
|
document.body[i + 1 : i + 2] = lines
|
||||||
i_e += 4
|
i_e += 4
|
||||||
|
|
||||||
# Find out where to end the language change.
|
# Find out where to end the language change.
|
||||||
langswitch = i
|
langswitch = i
|
||||||
while True:
|
while True:
|
||||||
langswitch = find_token(document.body, "\\lang", langswitch+1, i_e)
|
langswitch = find_token(document.body, "\\lang", langswitch + 1, i_e)
|
||||||
if langswitch == -1:
|
if langswitch == -1:
|
||||||
break
|
break
|
||||||
# print(" ", langswitch, document.body[langswitch], file=sys.stderr)
|
# print(" ", langswitch, document.body[langswitch], file=sys.stderr)
|
||||||
# skip insets
|
# skip insets
|
||||||
i_a = parent[3] # paragraph start line
|
i_a = parent[3] # paragraph start line
|
||||||
container = get_containing_inset(document.body[i_a:i_e], langswitch-i_a)
|
container = get_containing_inset(document.body[i_a:i_e], langswitch - i_a)
|
||||||
if container and container[1] < langswitch-i_a and container[2] > langswitch-i_a:
|
if (
|
||||||
|
container
|
||||||
|
and container[1] < langswitch - i_a
|
||||||
|
and container[2] > langswitch - i_a
|
||||||
|
):
|
||||||
# print(" inset", container, file=sys.stderr)
|
# print(" inset", container, file=sys.stderr)
|
||||||
continue
|
continue
|
||||||
i_e = langswitch
|
i_e = langswitch
|
||||||
@ -733,29 +785,29 @@ def revert_language(document, lyxname, babelname="", polyglossianame=""):
|
|||||||
singlepar = container[0] in singlepar_insets
|
singlepar = container[0] in singlepar_insets
|
||||||
|
|
||||||
# Delete empty language switches:
|
# Delete empty language switches:
|
||||||
if not "".join(document.body[i+1:i_e]):
|
if not "".join(document.body[i + 1 : i_e]):
|
||||||
del document.body[i:i_e]
|
del document.body[i:i_e]
|
||||||
i -= 1
|
i -= 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if singlepar:
|
if singlepar:
|
||||||
if with_polyglossia:
|
if with_polyglossia:
|
||||||
begin_cmd = "\\text%s{"%texname
|
begin_cmd = "\\text%s{" % texname
|
||||||
elif with_babel:
|
elif with_babel:
|
||||||
begin_cmd = "\\foreignlanguage{%s}{" % texname
|
begin_cmd = "\\foreignlanguage{%s}{" % texname
|
||||||
end_cmd = "}"
|
end_cmd = "}"
|
||||||
else:
|
else:
|
||||||
if with_polyglossia:
|
if with_polyglossia:
|
||||||
begin_cmd = "\\begin{%s}"%texname
|
begin_cmd = "\\begin{%s}" % texname
|
||||||
end_cmd = "\\end{%s}"%texname
|
end_cmd = "\\end{%s}" % texname
|
||||||
elif with_babel:
|
elif with_babel:
|
||||||
begin_cmd = "\\begin{otherlanguage}{%s}" % texname
|
begin_cmd = "\\begin{otherlanguage}{%s}" % texname
|
||||||
end_cmd = "\\end{otherlanguage}"
|
end_cmd = "\\end{otherlanguage}"
|
||||||
|
|
||||||
if (not primary or texname == "english"):
|
if not primary or texname == "english":
|
||||||
try:
|
try:
|
||||||
document.body[i_e:i_e] = put_cmd_in_ert(end_cmd)
|
document.body[i_e:i_e] = put_cmd_in_ert(end_cmd)
|
||||||
document.body[i+1:i+1] = put_cmd_in_ert(begin_cmd)
|
document.body[i + 1 : i + 1] = put_cmd_in_ert(begin_cmd)
|
||||||
except UnboundLocalError:
|
except UnboundLocalError:
|
||||||
pass
|
pass
|
||||||
del document.body[i]
|
del document.body[i]
|
||||||
@ -776,13 +828,14 @@ def revert_language(document, lyxname, babelname="", polyglossianame=""):
|
|||||||
if with_polyglossia:
|
if with_polyglossia:
|
||||||
# Define language in the user preamble
|
# Define language in the user preamble
|
||||||
# (don't use \AtBeginDocument, this fails with some languages).
|
# (don't use \AtBeginDocument, this fails with some languages).
|
||||||
add_to_preamble(document, ["\\usepackage{polyglossia}",
|
add_to_preamble(
|
||||||
"\\setotherlanguage{%s}" % polyglossianame])
|
document,
|
||||||
|
["\\usepackage{polyglossia}", "\\setotherlanguage{%s}" % polyglossianame],
|
||||||
|
)
|
||||||
if primary:
|
if primary:
|
||||||
# Changing the main language must be done in the document body.
|
# Changing the main language must be done in the document body.
|
||||||
doc_lang_switch = "\\resetdefaultlanguage{%s}" % polyglossianame
|
doc_lang_switch = "\\resetdefaultlanguage{%s}" % polyglossianame
|
||||||
|
|
||||||
# Reset LaTeX main language if required and not already done
|
# Reset LaTeX main language if required and not already done
|
||||||
if doc_lang_switch and doc_lang_switch[1:] not in document.body[8:20]:
|
if doc_lang_switch and doc_lang_switch[1:] not in document.body[8:20]:
|
||||||
document.body[2:2] = put_cmd_in_ert(doc_lang_switch,
|
document.body[2:2] = put_cmd_in_ert(doc_lang_switch, is_open=True, as_paragraph=True)
|
||||||
is_open=True, as_paragraph=True)
|
|
||||||
|
@ -15,13 +15,12 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 0.6"""
|
"""Convert files to the file format generated by lyx 0.6"""
|
||||||
|
|
||||||
supported_versions = ["0.6.%d" % i for i in range(5)] + ["0.6"]
|
supported_versions = ["0.6.%d" % i for i in range(5)] + ["0.6"]
|
||||||
convert = [[200, []]]
|
convert = [[200, []]]
|
||||||
revert = []
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -15,19 +15,19 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 0.8"""
|
"""Convert files to the file format generated by lyx 0.8"""
|
||||||
|
|
||||||
|
|
||||||
def add_inputencoding(document):
|
def add_inputencoding(document):
|
||||||
" Add the input encoding, latin1"
|
"Add the input encoding, latin1"
|
||||||
document.header.append('\\inputencoding latin1')
|
document.header.append("\\inputencoding latin1")
|
||||||
document.inputencoding = "latin1"
|
document.inputencoding = "latin1"
|
||||||
|
|
||||||
|
|
||||||
supported_versions = ["0.8.%d" % i for i in range(7)] + ["0.8"]
|
supported_versions = ["0.8.%d" % i for i in range(7)] + ["0.8"]
|
||||||
convert = [[210, [add_inputencoding]]]
|
convert = [[210, [add_inputencoding]]]
|
||||||
revert = []
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -15,29 +15,30 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 0.10"""
|
"""Convert files to the file format generated by lyx 0.10"""
|
||||||
|
|
||||||
|
|
||||||
def regularise_header(document):
|
def regularise_header(document):
|
||||||
" Put each entry in header into a separate line. "
|
"Put each entry in header into a separate line."
|
||||||
i = 0
|
i = 0
|
||||||
while i < len(document.header):
|
while i < len(document.header):
|
||||||
line = document.header[i]
|
line = document.header[i]
|
||||||
if len(line.split('\\')) > 1:
|
if len(line.split("\\")) > 1:
|
||||||
tmp = [ '\\'+ token.strip() for token in line.split('\\')][1:]
|
tmp = ["\\" + token.strip() for token in line.split("\\")][1:]
|
||||||
document.header[i: i+1] = tmp
|
document.header[i : i + 1] = tmp
|
||||||
i += len(tmp)
|
i += len(tmp)
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
|
|
||||||
def find_next_space(line, j):
|
def find_next_space(line, j):
|
||||||
""" Return position of next space or backslash, which one comes
|
"""Return position of next space or backslash, which one comes
|
||||||
first, starting from position j, if none exists returns last
|
first, starting from position j, if none exists returns last
|
||||||
position in line (+1)."""
|
position in line (+1)."""
|
||||||
space_pos = line.find(' ', j)
|
space_pos = line.find(" ", j)
|
||||||
if space_pos == -1:
|
if space_pos == -1:
|
||||||
space_pos = len(line)
|
space_pos = len(line)
|
||||||
|
|
||||||
bksl_pos = line.find('\\', j)
|
bksl_pos = line.find("\\", j)
|
||||||
if bksl_pos == -1:
|
if bksl_pos == -1:
|
||||||
bksl_pos = len(line)
|
bksl_pos = len(line)
|
||||||
|
|
||||||
@ -45,19 +46,42 @@ def find_next_space(line, j):
|
|||||||
|
|
||||||
|
|
||||||
def regularise_body(document):
|
def regularise_body(document):
|
||||||
""" Place tokens starting with a backslash into a separate line. """
|
"""Place tokens starting with a backslash into a separate line."""
|
||||||
|
|
||||||
getline_tokens = ["added_space_bottom", "added_space_top",
|
getline_tokens = [
|
||||||
"align", "layout", "fill_bottom", "fill_top",
|
"added_space_bottom",
|
||||||
"labelwidthstring", "pagebreak_top",
|
"added_space_top",
|
||||||
"pagebreak_bottom", "noindent"]
|
"align",
|
||||||
|
"layout",
|
||||||
|
"fill_bottom",
|
||||||
|
"fill_top",
|
||||||
|
"labelwidthstring",
|
||||||
|
"pagebreak_top",
|
||||||
|
"pagebreak_bottom",
|
||||||
|
"noindent",
|
||||||
|
]
|
||||||
|
|
||||||
noargs_tokens = ["backslash", "begin_deeper", "end_deeper",
|
noargs_tokens = [
|
||||||
"end_float", "end_inset", "hfill", "newline",
|
"backslash",
|
||||||
"protected_separator"]
|
"begin_deeper",
|
||||||
|
"end_deeper",
|
||||||
|
"end_float",
|
||||||
|
"end_inset",
|
||||||
|
"hfill",
|
||||||
|
"newline",
|
||||||
|
"protected_separator",
|
||||||
|
]
|
||||||
|
|
||||||
onearg_tokens = ["bar", "begin_float", "family", "latex", "shape",
|
onearg_tokens = [
|
||||||
"size", "series", "cursor"]
|
"bar",
|
||||||
|
"begin_float",
|
||||||
|
"family",
|
||||||
|
"latex",
|
||||||
|
"shape",
|
||||||
|
"size",
|
||||||
|
"series",
|
||||||
|
"cursor",
|
||||||
|
]
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
while i < len(document.body):
|
while i < len(document.body):
|
||||||
@ -65,23 +89,23 @@ def regularise_body(document):
|
|||||||
j = 0
|
j = 0
|
||||||
new_block = []
|
new_block = []
|
||||||
while j < len(line):
|
while j < len(line):
|
||||||
k = line.find('\\', j)
|
k = line.find("\\", j)
|
||||||
|
|
||||||
if k == -1:
|
if k == -1:
|
||||||
new_block += [line[j:]]
|
new_block += [line[j:]]
|
||||||
break
|
break
|
||||||
|
|
||||||
if k != j:
|
if k != j:
|
||||||
#document.warning("j=%d\tk=%d\t#%s#%s#" % (j,k,line,line[j: k]))
|
# document.warning("j=%d\tk=%d\t#%s#%s#" % (j,k,line,line[j: k]))
|
||||||
new_block += [line[j: k]]
|
new_block += [line[j:k]]
|
||||||
j = k
|
j = k
|
||||||
|
|
||||||
k = find_next_space(line, j+1)
|
k = find_next_space(line, j + 1)
|
||||||
|
|
||||||
token = line[j+1:k]
|
token = line[j + 1 : k]
|
||||||
# These tokens take the rest of the line
|
# These tokens take the rest of the line
|
||||||
if token in getline_tokens:
|
if token in getline_tokens:
|
||||||
#document.warning("getline_token:%s\tj=%d\t\t#%s#%s#" % (token,j,line,line[j:]))
|
# document.warning("getline_token:%s\tj=%d\t\t#%s#%s#" % (token,j,line,line[j:]))
|
||||||
new_block += [line[j:]]
|
new_block += [line[j:]]
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -101,15 +125,14 @@ def regularise_body(document):
|
|||||||
# Special treatment for insets
|
# Special treatment for insets
|
||||||
if token in ["begin_inset"]:
|
if token in ["begin_inset"]:
|
||||||
l = find_next_space(line, k + 1)
|
l = find_next_space(line, k + 1)
|
||||||
inset = line[k+1: l]
|
inset = line[k + 1 : l]
|
||||||
|
|
||||||
if inset == "Latex":
|
if inset == "Latex":
|
||||||
new_block += [line[j:l]]
|
new_block += [line[j:l]]
|
||||||
j = l
|
j = l
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if inset in ["LatexCommand", "LatexDel", "Label", "Figure",
|
if inset in ["LatexCommand", "LatexDel", "Label", "Figure", "Formula"]:
|
||||||
"Formula"]:
|
|
||||||
new_block += [line[j:]]
|
new_block += [line[j:]]
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -120,19 +143,19 @@ def regularise_body(document):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
document.warning("unkown inset %s" % inset)
|
document.warning("unkown inset %s" % inset)
|
||||||
assert(False)
|
assert False
|
||||||
|
|
||||||
# We are inside a latex inset, pass the text verbatim
|
# We are inside a latex inset, pass the text verbatim
|
||||||
new_block += [line[j:]]
|
new_block += [line[j:]]
|
||||||
break
|
break
|
||||||
|
|
||||||
document.body[i: i+1] = new_block
|
document.body[i : i + 1] = new_block
|
||||||
i += len(new_block)
|
i += len(new_block)
|
||||||
|
|
||||||
|
|
||||||
supported_versions = ["0.10.%d" % i for i in range(8)] + ["0.10"]
|
supported_versions = ["0.10.%d" % i for i in range(8)] + ["0.10"]
|
||||||
convert = [[210, [regularise_header, regularise_body]]]
|
convert = [[210, [regularise_header, regularise_body]]]
|
||||||
revert = []
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -15,29 +15,29 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 0.12"""
|
"""Convert files to the file format generated by lyx 0.12"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from parser_tools import find_token, find_re, check_token
|
from parser_tools import find_token, find_re, check_token
|
||||||
|
|
||||||
|
|
||||||
def space_before_layout(document):
|
def space_before_layout(document):
|
||||||
" Remove empty line before \\layout. "
|
"Remove empty line before \\layout."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 2 # skip first layout
|
i = 2 # skip first layout
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, '\\layout', i)
|
i = find_token(lines, "\\layout", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
|
|
||||||
prot_space = lines[i-2].find('\\protected_separator')
|
prot_space = lines[i - 2].find("\\protected_separator")
|
||||||
if lines[i - 1] == '' and prot_space == -1:
|
if lines[i - 1] == "" and prot_space == -1:
|
||||||
del lines[i-1]
|
del lines[i - 1]
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def formula_inset_space_eat(document):
|
def formula_inset_space_eat(document):
|
||||||
" Remove space after inset formula."
|
"Remove space after inset formula."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -45,13 +45,13 @@ def formula_inset_space_eat(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
|
|
||||||
if len(lines[i]) > 22 and lines[i][21] == ' ':
|
if len(lines[i]) > 22 and lines[i][21] == " ":
|
||||||
lines[i] = lines[i][:20] + lines[i][21:]
|
lines[i] = lines[i][:20] + lines[i][21:]
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def update_tabular(document):
|
def update_tabular(document):
|
||||||
" Update from tabular format 1 or 2 to 4."
|
"Update from tabular format 1 or 2 to 4."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||||
i = 0
|
i = 0
|
||||||
@ -62,201 +62,199 @@ def update_tabular(document):
|
|||||||
i = i + 1
|
i = i + 1
|
||||||
format = lines[i][8:]
|
format = lines[i][8:]
|
||||||
|
|
||||||
lines[i] = 'multicol4'
|
lines[i] = "multicol4"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
rows = int(lines[i].split()[0])
|
rows = int(lines[i].split()[0])
|
||||||
columns = int(lines[i].split()[1])
|
columns = int(lines[i].split()[1])
|
||||||
|
|
||||||
lines[i] = lines[i] + ' 0 0 -1 -1 -1 -1'
|
lines[i] = lines[i] + " 0 0 -1 -1 -1 -1"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
for j in range(rows):
|
for j in range(rows):
|
||||||
lines[i] = lines[i] + ' 0 0'
|
lines[i] = lines[i] + " 0 0"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
for j in range(columns):
|
for j in range(columns):
|
||||||
lines[i] = lines[i] + ' '
|
lines[i] = lines[i] + " "
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
while lines[i].strip():
|
while lines[i].strip():
|
||||||
if not format:
|
if not format:
|
||||||
lines[i] = lines[i] + ' 1 1'
|
lines[i] = lines[i] + " 1 1"
|
||||||
lines[i] = lines[i] + ' 0 0 0'
|
lines[i] = lines[i] + " 0 0 0"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
lines[i] = lines[i].strip()
|
lines[i] = lines[i].strip()
|
||||||
|
|
||||||
|
|
||||||
def final_dot(document):
|
def final_dot(document):
|
||||||
" Merge lines if the dot is the final character."
|
"Merge lines if the dot is the final character."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while i < len(lines):
|
while i < len(lines):
|
||||||
|
if (
|
||||||
if lines[i][-1:] == '.' and lines[i+1][:1] != '\\' and \
|
lines[i][-1:] == "."
|
||||||
lines[i+1][:1] != ' ' and len(lines[i]) + len(lines[i+1])<= 72 \
|
and lines[i + 1][:1] != "\\"
|
||||||
and lines[i+1] != '':
|
and lines[i + 1][:1] != " "
|
||||||
|
and len(lines[i]) + len(lines[i + 1]) <= 72
|
||||||
lines[i] = lines[i] + lines[i+1]
|
and lines[i + 1] != ""
|
||||||
del lines[i+1]
|
):
|
||||||
|
lines[i] = lines[i] + lines[i + 1]
|
||||||
|
del lines[i + 1]
|
||||||
else:
|
else:
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def update_inset_label(document):
|
def update_inset_label(document):
|
||||||
" Update inset Label."
|
"Update inset Label."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, '\\begin_inset Label', i)
|
i = find_token(lines, "\\begin_inset Label", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
lines[i] = '\\begin_inset LatexCommand \\label{' + lines[i][19:] + '}'
|
lines[i] = "\\begin_inset LatexCommand \\label{" + lines[i][19:] + "}"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def update_latexdel(document):
|
def update_latexdel(document):
|
||||||
" Update inset LatexDel."
|
"Update inset LatexDel."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, '\\begin_inset LatexDel', i)
|
i = find_token(lines, "\\begin_inset LatexDel", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
lines[i] = lines[i].replace('\\begin_inset LatexDel',
|
lines[i] = lines[i].replace("\\begin_inset LatexDel", "\\begin_inset LatexCommand")
|
||||||
'\\begin_inset LatexCommand')
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def update_vfill(document):
|
def update_vfill(document):
|
||||||
" Update fill_top and fill_bottom."
|
"Update fill_top and fill_bottom."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
for i in range(len(lines)):
|
for i in range(len(lines)):
|
||||||
lines[i] = lines[i].replace('\\fill_top',
|
lines[i] = lines[i].replace("\\fill_top", "\\added_space_top vfill")
|
||||||
'\\added_space_top vfill')
|
lines[i] = lines[i].replace("\\fill_bottom", "\\added_space_bottom vfill")
|
||||||
lines[i] = lines[i].replace('\\fill_bottom',
|
|
||||||
'\\added_space_bottom vfill')
|
|
||||||
|
|
||||||
|
|
||||||
def update_space_units(document):
|
def update_space_units(document):
|
||||||
" Update space units."
|
"Update space units."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
added_space_bottom = re.compile(r'\\added_space_bottom ([^ ]*)')
|
added_space_bottom = re.compile(r"\\added_space_bottom ([^ ]*)")
|
||||||
added_space_top = re.compile(r'\\added_space_top ([^ ]*)')
|
added_space_top = re.compile(r"\\added_space_top ([^ ]*)")
|
||||||
for i in range(len(lines)):
|
for i in range(len(lines)):
|
||||||
result = added_space_bottom.search(lines[i])
|
result = added_space_bottom.search(lines[i])
|
||||||
if result:
|
if result:
|
||||||
old = '\\added_space_bottom ' + result.group(1)
|
old = "\\added_space_bottom " + result.group(1)
|
||||||
new = '\\added_space_bottom ' + str(float(result.group(1))) + 'cm'
|
new = "\\added_space_bottom " + str(float(result.group(1))) + "cm"
|
||||||
lines[i] = lines[i].replace(old, new)
|
lines[i] = lines[i].replace(old, new)
|
||||||
|
|
||||||
result = added_space_top.search(lines[i])
|
result = added_space_top.search(lines[i])
|
||||||
if result:
|
if result:
|
||||||
old = '\\added_space_top ' + result.group(1)
|
old = "\\added_space_top " + result.group(1)
|
||||||
new = '\\added_space_top ' + str(float(result.group(1))) + 'cm'
|
new = "\\added_space_top " + str(float(result.group(1))) + "cm"
|
||||||
lines[i] = lines[i].replace(old, new)
|
lines[i] = lines[i].replace(old, new)
|
||||||
|
|
||||||
|
|
||||||
def remove_cursor(document):
|
def remove_cursor(document):
|
||||||
" Remove cursor, it is not saved on the file anymore."
|
"Remove cursor, it is not saved on the file anymore."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
cursor_re = re.compile(r'.*(\\cursor \d*)')
|
cursor_re = re.compile(r".*(\\cursor \d*)")
|
||||||
while True:
|
while True:
|
||||||
i = find_re(lines, cursor_re, i)
|
i = find_re(lines, cursor_re, i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
cursor = cursor_re.search(lines[i]).group(1)
|
cursor = cursor_re.search(lines[i]).group(1)
|
||||||
lines[i] = lines[i].replace(cursor, '')
|
lines[i] = lines[i].replace(cursor, "")
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def remove_empty_insets(document):
|
def remove_empty_insets(document):
|
||||||
" Remove empty insets."
|
"Remove empty insets."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, '\\begin_inset ', i)
|
i = find_token(lines, "\\begin_inset ", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
if lines[i] == '\\begin_inset ' and lines[i+1] == '\\end_inset ':
|
if lines[i] == "\\begin_inset " and lines[i + 1] == "\\end_inset ":
|
||||||
del lines[i]
|
del lines[i]
|
||||||
del lines[i]
|
del lines[i]
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def remove_formula_latex(document):
|
def remove_formula_latex(document):
|
||||||
" Remove formula latex."
|
"Remove formula latex."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, '\\latex formula_latex ', i)
|
i = find_token(lines, "\\latex formula_latex ", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
i = find_token(lines, '\\latex default', i)
|
i = find_token(lines, "\\latex default", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
|
|
||||||
def add_end_document(document):
|
def add_end_document(document):
|
||||||
" Add \\the_end to the end of the document."
|
"Add \\the_end to the end of the document."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = find_token(lines, '\\the_end', 0)
|
i = find_token(lines, "\\the_end", 0)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
lines.append('\\the_end')
|
lines.append("\\the_end")
|
||||||
|
|
||||||
|
|
||||||
def header_update(document):
|
def header_update(document):
|
||||||
" Update document header."
|
"Update document header."
|
||||||
lines = document.header
|
lines = document.header
|
||||||
i = 0
|
i = 0
|
||||||
l = len(lines)
|
l = len(lines)
|
||||||
while i < l:
|
while i < l:
|
||||||
if lines[i][-1:] == ' ':
|
if lines[i][-1:] == " ":
|
||||||
lines[i] = lines[i][:-1]
|
lines[i] = lines[i][:-1]
|
||||||
|
|
||||||
if check_token(lines[i], '\\epsfig'):
|
if check_token(lines[i], "\\epsfig"):
|
||||||
lines[i] = lines[i].replace('\\epsfig', '\\graphics')
|
lines[i] = lines[i].replace("\\epsfig", "\\graphics")
|
||||||
i = i + 1
|
i = i + 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if check_token(lines[i], '\\papersize'):
|
if check_token(lines[i], "\\papersize"):
|
||||||
size = lines[i].split()[1]
|
size = lines[i].split()[1]
|
||||||
new_size = size
|
new_size = size
|
||||||
paperpackage = ""
|
paperpackage = ""
|
||||||
|
|
||||||
if size == 'usletter':
|
if size == "usletter":
|
||||||
new_size = 'letterpaper'
|
new_size = "letterpaper"
|
||||||
if size == 'a4wide':
|
if size == "a4wide":
|
||||||
new_size = 'Default'
|
new_size = "Default"
|
||||||
paperpackage = "widemarginsa4"
|
paperpackage = "widemarginsa4"
|
||||||
|
|
||||||
lines[i] = '\\papersize ' + new_size
|
lines[i] = "\\papersize " + new_size
|
||||||
i = i + 1
|
i = i + 1
|
||||||
if paperpackage:
|
if paperpackage:
|
||||||
lines.insert(i, '\\paperpackage ' + paperpackage)
|
lines.insert(i, "\\paperpackage " + paperpackage)
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
lines.insert(i,'\\use_geometry 0')
|
lines.insert(i, "\\use_geometry 0")
|
||||||
lines.insert(i + 1,'\\use_amsmath 0')
|
lines.insert(i + 1, "\\use_amsmath 0")
|
||||||
i = i + 2
|
i = i + 2
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if check_token(lines[i], "\\baselinestretch"):
|
||||||
if check_token(lines[i], '\\baselinestretch'):
|
|
||||||
size = lines[i].split()[1]
|
size = lines[i].split()[1]
|
||||||
if size == '1.00':
|
if size == "1.00":
|
||||||
name = 'single'
|
name = "single"
|
||||||
elif size == '1.50':
|
elif size == "1.50":
|
||||||
name = 'onehalf'
|
name = "onehalf"
|
||||||
elif size == '2.00':
|
elif size == "2.00":
|
||||||
name = 'double'
|
name = "double"
|
||||||
else:
|
else:
|
||||||
name = 'other ' + size
|
name = "other " + size
|
||||||
lines[i] = '\\spacing %s ' % name
|
lines[i] = "\\spacing %s " % name
|
||||||
i = i + 1
|
i = i + 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -264,45 +262,45 @@ def header_update(document):
|
|||||||
|
|
||||||
|
|
||||||
def update_latexaccents(document):
|
def update_latexaccents(document):
|
||||||
" Update latex accent insets."
|
"Update latex accent insets."
|
||||||
body = document.body
|
body = document.body
|
||||||
i = 1
|
i = 1
|
||||||
while True:
|
while True:
|
||||||
i = find_token(body, '\\i ', i)
|
i = find_token(body, "\\i ", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
contents = body[i][2:].strip()
|
contents = body[i][2:].strip()
|
||||||
|
|
||||||
if contents.find('{') != -1 and contents.find('}') != -1:
|
if contents.find("{") != -1 and contents.find("}") != -1:
|
||||||
i = i + 1
|
i = i + 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if len(contents) == 2:
|
if len(contents) == 2:
|
||||||
contents = contents + '{}'
|
contents = contents + "{}"
|
||||||
elif len(contents) == 3:
|
elif len(contents) == 3:
|
||||||
contents = contents[:2] + '{' + contents[2] + '}'
|
contents = contents[:2] + "{" + contents[2] + "}"
|
||||||
elif len(contents) == 4:
|
elif len(contents) == 4:
|
||||||
if contents[2] == ' ':
|
if contents[2] == " ":
|
||||||
contents = contents[:2] + '{' + contents[3] + '}'
|
contents = contents[:2] + "{" + contents[3] + "}"
|
||||||
elif contents[2:4] == '\\i' or contents[2:4] == '\\j':
|
elif contents[2:4] == "\\i" or contents[2:4] == "\\j":
|
||||||
contents = contents[:2] + '{' + contents[2:] + '}'
|
contents = contents[:2] + "{" + contents[2:] + "}"
|
||||||
|
|
||||||
body[i] = '\\i ' + contents
|
body[i] = "\\i " + contents
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def obsolete_latex_title(document):
|
def obsolete_latex_title(document):
|
||||||
" Replace layout Latex_Title with Title."
|
"Replace layout Latex_Title with Title."
|
||||||
body = document.body
|
body = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(body, '\\layout', i)
|
i = find_token(body, "\\layout", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
if body[i].lower().find('latex_title') != -1:
|
if body[i].lower().find("latex_title") != -1:
|
||||||
body[i] = '\\layout Title'
|
body[i] = "\\layout Title"
|
||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
@ -313,27 +311,43 @@ def remove_inset_latex(document):
|
|||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(body, '\\begin_inset Latex', i)
|
i = find_token(body, "\\begin_inset Latex", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
body[i] = body[i].replace('\\begin_inset Latex', '\\layout LaTeX')
|
body[i] = body[i].replace("\\begin_inset Latex", "\\layout LaTeX")
|
||||||
i = find_token(body, '\\end_inset', i)
|
i = find_token(body, "\\end_inset", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
#this should not happen
|
# this should not happen
|
||||||
return
|
return
|
||||||
del body[i]
|
del body[i]
|
||||||
|
|
||||||
|
|
||||||
supported_versions = ["0.12.0","0.12.1","0.12"]
|
supported_versions = ["0.12.0", "0.12.1", "0.12"]
|
||||||
convert = [[215, [header_update, add_end_document, remove_cursor,
|
convert = [
|
||||||
final_dot, update_inset_label, update_latexdel,
|
[
|
||||||
update_space_units, space_before_layout,
|
215,
|
||||||
formula_inset_space_eat, update_tabular,
|
[
|
||||||
update_vfill, remove_empty_insets,
|
header_update,
|
||||||
remove_formula_latex, update_latexaccents,
|
add_end_document,
|
||||||
obsolete_latex_title, remove_inset_latex]]]
|
remove_cursor,
|
||||||
revert = []
|
final_dot,
|
||||||
|
update_inset_label,
|
||||||
|
update_latexdel,
|
||||||
|
update_space_units,
|
||||||
|
space_before_layout,
|
||||||
|
formula_inset_space_eat,
|
||||||
|
update_tabular,
|
||||||
|
update_vfill,
|
||||||
|
remove_empty_insets,
|
||||||
|
remove_formula_latex,
|
||||||
|
update_latexaccents,
|
||||||
|
obsolete_latex_title,
|
||||||
|
remove_inset_latex,
|
||||||
|
],
|
||||||
|
]
|
||||||
|
]
|
||||||
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -15,29 +15,30 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 1.0"""
|
"""Convert files to the file format generated by lyx 1.0"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from parser_tools import find_token, find_re
|
from parser_tools import find_token, find_re
|
||||||
|
|
||||||
|
|
||||||
def obsolete_latex_title(document):
|
def obsolete_latex_title(document):
|
||||||
" Replace LatexTitle layout with Title. "
|
"Replace LatexTitle layout with Title."
|
||||||
|
|
||||||
body = document.body
|
body = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(body, '\\layout', i)
|
i = find_token(body, "\\layout", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
if body[i].lower().find('latex title') != -1:
|
if body[i].lower().find("latex title") != -1:
|
||||||
body[i] = '\\layout Title'
|
body[i] = "\\layout Title"
|
||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def update_tabular(document):
|
def update_tabular(document):
|
||||||
" Update from tabular format 3 to 4 if necessary."
|
"Update from tabular format 3 to 4 if necessary."
|
||||||
|
|
||||||
lines = document.body
|
lines = document.body
|
||||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||||
@ -49,27 +50,27 @@ def update_tabular(document):
|
|||||||
i = i + 1
|
i = i + 1
|
||||||
format = lines[i][8:]
|
format = lines[i][8:]
|
||||||
|
|
||||||
if format != '3':
|
if format != "3":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
lines[i] = 'multicol4'
|
lines[i] = "multicol4"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
rows = int(lines[i].split()[0])
|
rows = int(lines[i].split()[0])
|
||||||
columns = int(lines[i].split()[1])
|
columns = int(lines[i].split()[1])
|
||||||
|
|
||||||
lines[i] = lines[i] + ' 0 0 -1 -1 -1 -1'
|
lines[i] = lines[i] + " 0 0 -1 -1 -1 -1"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
for j in range(rows):
|
for j in range(rows):
|
||||||
lines[i] = lines[i] + ' 0 0'
|
lines[i] = lines[i] + " 0 0"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
for j in range(columns):
|
for j in range(columns):
|
||||||
lines[i] = lines[i] + ' '
|
lines[i] = lines[i] + " "
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
while lines[i].strip():
|
while lines[i].strip():
|
||||||
lines[i] = lines[i] + ' 0 0 0'
|
lines[i] = lines[i] + " 0 0 0"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
lines[i] = lines[i].strip()
|
lines[i] = lines[i].strip()
|
||||||
@ -77,9 +78,8 @@ def update_tabular(document):
|
|||||||
|
|
||||||
supported_versions = ["1.0.%d" % i for i in range(5)] + ["1.0"]
|
supported_versions = ["1.0.%d" % i for i in range(5)] + ["1.0"]
|
||||||
convert = [[215, [obsolete_latex_title, update_tabular]]]
|
convert = [[215, [obsolete_latex_title, update_tabular]]]
|
||||||
revert = []
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -15,13 +15,12 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 1.1 series, until 1.1.4"""
|
"""Convert files to the file format generated by lyx 1.1 series, until 1.1.4"""
|
||||||
|
|
||||||
supported_versions = ["1.1.%d" % i for i in range(5)] + ["1.1"]
|
supported_versions = ["1.1.%d" % i for i in range(5)] + ["1.1"]
|
||||||
convert = [[215, []]]
|
convert = [[215, []]]
|
||||||
revert = []
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 1.1.5"""
|
"""Convert files to the file format generated by lyx 1.1.5"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from parser_tools import find_token, find_token_backwards, find_re
|
from parser_tools import find_token, find_token_backwards, find_re
|
||||||
@ -23,8 +23,9 @@ from parser_tools import find_token, find_token_backwards, find_re
|
|||||||
####################################################################
|
####################################################################
|
||||||
# Private helper functions
|
# Private helper functions
|
||||||
|
|
||||||
|
|
||||||
def get_layout(line, default_layout):
|
def get_layout(line, default_layout):
|
||||||
" Get the line layout, beware of the empty layout."
|
"Get the line layout, beware of the empty layout."
|
||||||
tokens = line.split()
|
tokens = line.split()
|
||||||
if len(tokens) > 1:
|
if len(tokens) > 1:
|
||||||
return tokens[1]
|
return tokens[1]
|
||||||
@ -33,18 +34,19 @@ def get_layout(line, default_layout):
|
|||||||
|
|
||||||
####################################################################
|
####################################################################
|
||||||
|
|
||||||
math_env = ["\\[","\\begin{eqnarray*}","\\begin{eqnarray}","\\begin{equation}"]
|
math_env = ["\\[", "\\begin{eqnarray*}", "\\begin{eqnarray}", "\\begin{equation}"]
|
||||||
|
|
||||||
|
|
||||||
def replace_protected_separator(document):
|
def replace_protected_separator(document):
|
||||||
" Replace protected separator. "
|
"Replace protected separator."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i=0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, "\\protected_separator", i)
|
i = find_token(lines, "\\protected_separator", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
j = find_token_backwards(lines, "\\layout", i)
|
j = find_token_backwards(lines, "\\layout", i)
|
||||||
#if j == -1: print error
|
# if j == -1: print error
|
||||||
layout = get_layout(lines[j], document.default_layout)
|
layout = get_layout(lines[j], document.default_layout)
|
||||||
|
|
||||||
if layout == "LyX-Code":
|
if layout == "LyX-Code":
|
||||||
@ -53,41 +55,42 @@ def replace_protected_separator(document):
|
|||||||
result = result + " "
|
result = result + " "
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
lines[i-1] = lines[i-1] + result + lines[i]
|
lines[i - 1] = lines[i - 1] + result + lines[i]
|
||||||
else:
|
else:
|
||||||
lines[i-1] = lines[i-1]+ "\\SpecialChar ~"
|
lines[i - 1] = lines[i - 1] + "\\SpecialChar ~"
|
||||||
|
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
|
|
||||||
def merge_formula_inset(document):
|
def merge_formula_inset(document):
|
||||||
" Merge formula insets. "
|
"Merge formula insets."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i=0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, "\\begin_inset Formula", i)
|
i = find_token(lines, "\\begin_inset Formula", i)
|
||||||
if i == -1: break
|
if i == -1:
|
||||||
if lines[i+1] in math_env:
|
break
|
||||||
lines[i] = lines[i] + lines[i+1]
|
if lines[i + 1] in math_env:
|
||||||
del lines[i+1]
|
lines[i] = lines[i] + lines[i + 1]
|
||||||
|
del lines[i + 1]
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def update_tabular(document):
|
def update_tabular(document):
|
||||||
" Update from tabular format 4 to 5 if necessary. "
|
"Update from tabular format 4 to 5 if necessary."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||||
i=0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_re(lines, lyxtable_re, i)
|
i = find_re(lines, lyxtable_re, i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
i = i + 1
|
i = i + 1
|
||||||
format = lines[i][8]
|
format = lines[i][8]
|
||||||
if format != '4':
|
if format != "4":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
lines[i]='multicol5'
|
lines[i] = "multicol5"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
rows = int(lines[i].split()[0])
|
rows = int(lines[i].split()[0])
|
||||||
columns = int(lines[i].split()[1])
|
columns = int(lines[i].split()[1])
|
||||||
@ -107,53 +110,51 @@ def update_tabular(document):
|
|||||||
|
|
||||||
|
|
||||||
def update_toc(document):
|
def update_toc(document):
|
||||||
" Update table of contents. "
|
"Update table of contents."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines,
|
i = find_token(lines, "\\begin_inset LatexCommand \\tableofcontents", i)
|
||||||
'\\begin_inset LatexCommand \\tableofcontents', i)
|
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
lines[i] = lines[i] + '{}'
|
lines[i] = lines[i] + "{}"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def remove_cursor(document):
|
def remove_cursor(document):
|
||||||
" Remove cursor. "
|
"Remove cursor."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = find_token(lines, '\\cursor', 0)
|
i = find_token(lines, "\\cursor", 0)
|
||||||
if i != -1:
|
if i != -1:
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
|
|
||||||
def remove_vcid(document):
|
def remove_vcid(document):
|
||||||
" Remove \\lyxvcid and \\lyxrcsid. "
|
"Remove \\lyxvcid and \\lyxrcsid."
|
||||||
lines = document.header
|
lines = document.header
|
||||||
i = find_token(lines, '\\lyxvcid', 0)
|
i = find_token(lines, "\\lyxvcid", 0)
|
||||||
if i != -1:
|
if i != -1:
|
||||||
del lines[i]
|
del lines[i]
|
||||||
i = find_token(lines, '\\lyxrcsid', 0)
|
i = find_token(lines, "\\lyxrcsid", 0)
|
||||||
if i != -1:
|
if i != -1:
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
|
|
||||||
def first_layout(document):
|
def first_layout(document):
|
||||||
" Fix first layout, if empty use the default layout."
|
"Fix first layout, if empty use the default layout."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
while (lines[0] == ""):
|
while lines[0] == "":
|
||||||
del lines[0]
|
del lines[0]
|
||||||
if lines[0][:7] != "\\layout":
|
if lines[0][:7] != "\\layout":
|
||||||
lines[:0] = ['\\layout %s' % document.default_layout, '']
|
lines[:0] = ["\\layout %s" % document.default_layout, ""]
|
||||||
|
|
||||||
|
|
||||||
def remove_space_in_units(document):
|
def remove_space_in_units(document):
|
||||||
" Remove space in units. "
|
"Remove space in units."
|
||||||
lines = document.header
|
lines = document.header
|
||||||
margins = ["\\topmargin","\\rightmargin",
|
margins = ["\\topmargin", "\\rightmargin", "\\leftmargin", "\\bottommargin"]
|
||||||
"\\leftmargin","\\bottommargin"]
|
|
||||||
|
|
||||||
unit_rexp = re.compile(r'[^ ]* (.*) (.*)')
|
unit_rexp = re.compile(r"[^ ]* (.*) (.*)")
|
||||||
|
|
||||||
for margin in margins:
|
for margin in margins:
|
||||||
i = 0
|
i = 0
|
||||||
@ -169,7 +170,7 @@ def remove_space_in_units(document):
|
|||||||
|
|
||||||
|
|
||||||
def latexdel_getargs(document, i):
|
def latexdel_getargs(document, i):
|
||||||
" Get arguments from latexdel insets. "
|
"Get arguments from latexdel insets."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
|
|
||||||
# play safe, clean empty lines
|
# play safe, clean empty lines
|
||||||
@ -178,16 +179,16 @@ def latexdel_getargs(document, i):
|
|||||||
break
|
break
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
j = find_token(lines, '\\end_inset', i)
|
j = find_token(lines, "\\end_inset", i)
|
||||||
|
|
||||||
if i == j:
|
if i == j:
|
||||||
del lines[i]
|
del lines[i]
|
||||||
else:
|
else:
|
||||||
document.warning("Unexpected end of inset.")
|
document.warning("Unexpected end of inset.")
|
||||||
j = find_token(lines, '\\begin_inset LatexDel }{', i)
|
j = find_token(lines, "\\begin_inset LatexDel }{", i)
|
||||||
|
|
||||||
ref = " ".join(lines[i:j])
|
ref = " ".join(lines[i:j])
|
||||||
del lines[i:j + 1]
|
del lines[i : j + 1]
|
||||||
|
|
||||||
# play safe, clean empty lines
|
# play safe, clean empty lines
|
||||||
while True:
|
while True:
|
||||||
@ -195,24 +196,24 @@ def latexdel_getargs(document, i):
|
|||||||
break
|
break
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
j = find_token(lines, '\\end_inset', i - 1)
|
j = find_token(lines, "\\end_inset", i - 1)
|
||||||
if i == j:
|
if i == j:
|
||||||
del lines[i]
|
del lines[i]
|
||||||
else:
|
else:
|
||||||
document.warning("Unexpected end of inset.")
|
document.warning("Unexpected end of inset.")
|
||||||
j = find_token(lines, '\\begin_inset LatexDel }', i)
|
j = find_token(lines, "\\begin_inset LatexDel }", i)
|
||||||
label = " ".join(lines[i:j])
|
label = " ".join(lines[i:j])
|
||||||
del lines[i:j + 1]
|
del lines[i : j + 1]
|
||||||
|
|
||||||
return ref, label
|
return ref, label
|
||||||
|
|
||||||
|
|
||||||
def update_ref(document):
|
def update_ref(document):
|
||||||
" Update reference inset. "
|
"Update reference inset."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(lines, '\\begin_inset LatexCommand', i)
|
i = find_token(lines, "\\begin_inset LatexCommand", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -225,7 +226,7 @@ def update_ref(document):
|
|||||||
|
|
||||||
|
|
||||||
def update_latexdel(document):
|
def update_latexdel(document):
|
||||||
" Remove latexdel insets. "
|
"Remove latexdel insets."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
latexdel_re = re.compile(r".*\\begin_inset LatexDel")
|
latexdel_re = re.compile(r".*\\begin_inset LatexDel")
|
||||||
@ -233,11 +234,10 @@ def update_latexdel(document):
|
|||||||
i = find_re(lines, latexdel_re, i)
|
i = find_re(lines, latexdel_re, i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
lines[i] = lines[i].replace('\\begin_inset LatexDel',
|
lines[i] = lines[i].replace("\\begin_inset LatexDel", "\\begin_inset LatexCommand")
|
||||||
'\\begin_inset LatexCommand')
|
|
||||||
|
|
||||||
j = lines[i].find('\\begin_inset')
|
j = lines[i].find("\\begin_inset")
|
||||||
lines.insert(i+1, lines[i][j:])
|
lines.insert(i + 1, lines[i][j:])
|
||||||
lines[i] = lines[i][:j].strip()
|
lines[i] = lines[i][:j].strip()
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
@ -245,19 +245,31 @@ def update_latexdel(document):
|
|||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
ref, label = latexdel_getargs(document, i)
|
ref, label = latexdel_getargs(document, i)
|
||||||
lines[i -1] = f"{lines[i-1][:-1]}[{label}]{{{ref}}}"
|
lines[i - 1] = f"{lines[i-1][:-1]}[{label}]{{{ref}}}"
|
||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
supported_versions = ["1.1.5","1.1.5fix1","1.1.5fix2","1.1"]
|
supported_versions = ["1.1.5", "1.1.5fix1", "1.1.5fix2", "1.1"]
|
||||||
convert = [[216, [first_layout, remove_vcid, remove_cursor,
|
convert = [
|
||||||
update_toc, replace_protected_separator,
|
[
|
||||||
merge_formula_inset, update_tabular,
|
216,
|
||||||
remove_space_in_units, update_ref,
|
[
|
||||||
update_latexdel]]]
|
first_layout,
|
||||||
|
remove_vcid,
|
||||||
|
remove_cursor,
|
||||||
|
update_toc,
|
||||||
|
replace_protected_separator,
|
||||||
|
merge_formula_inset,
|
||||||
|
update_tabular,
|
||||||
|
remove_space_in_units,
|
||||||
|
update_ref,
|
||||||
|
update_latexdel,
|
||||||
|
],
|
||||||
|
]
|
||||||
|
]
|
||||||
|
|
||||||
revert = []
|
revert = []
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
pass
|
pass
|
||||||
|
@ -15,29 +15,38 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 1.1.6, until fix2"""
|
"""Convert files to the file format generated by lyx 1.1.6, until fix2"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from parser_tools import find_re, find_tokens, find_token, check_token
|
from parser_tools import find_re, find_tokens, find_token, check_token
|
||||||
|
|
||||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||||
|
|
||||||
|
|
||||||
def update_tabular(document):
|
def update_tabular(document):
|
||||||
" Update tabular to version 1 (xml like syntax). "
|
"Update tabular to version 1 (xml like syntax)."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i=0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_re(lines, lyxtable_re, i)
|
i = find_re(lines, lyxtable_re, i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
prop_dict = {"family" : "default", "series" : "default",
|
prop_dict = {
|
||||||
"shape" : "default", "size" : "default",
|
"family": "default",
|
||||||
"emph" : "default", "bar" : "default",
|
"series": "default",
|
||||||
"noun" : "default", "latex" : "default", "color" : "default"}
|
"shape": "default",
|
||||||
|
"size": "default",
|
||||||
|
"emph": "default",
|
||||||
|
"bar": "default",
|
||||||
|
"noun": "default",
|
||||||
|
"latex": "default",
|
||||||
|
"color": "default",
|
||||||
|
}
|
||||||
|
|
||||||
# remove \LyXTable
|
# remove \LyXTable
|
||||||
lines[i] = lines[i][:-9]
|
lines[i] = lines[i][:-9]
|
||||||
i = i + 1
|
i = i + 1
|
||||||
lines.insert(i,'')
|
lines.insert(i, "")
|
||||||
i = i + 1
|
i = i + 1
|
||||||
lines[i] = "\\begin_inset Tabular"
|
lines[i] = "\\begin_inset Tabular"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
@ -46,16 +55,19 @@ def update_tabular(document):
|
|||||||
columns = int(head[1])
|
columns = int(head[1])
|
||||||
|
|
||||||
tabular_line = i
|
tabular_line = i
|
||||||
i = i +1
|
i = i + 1
|
||||||
lines.insert(i, f'<Features rotate="{head[2]}" islongtable="{head[3]}" endhead="{head[4]}" endfirsthead="{head[5]}" endfoot="{head[6]}" endlastfoot="{head[7]}">')
|
lines.insert(
|
||||||
|
i,
|
||||||
|
f'<Features rotate="{head[2]}" islongtable="{head[3]}" endhead="{head[4]}" endfirsthead="{head[5]}" endfoot="{head[6]}" endlastfoot="{head[7]}">',
|
||||||
|
)
|
||||||
|
|
||||||
i = i +1
|
i = i + 1
|
||||||
|
|
||||||
row_info = []
|
row_info = []
|
||||||
cont_row = []
|
cont_row = []
|
||||||
for j in range(rows):
|
for j in range(rows):
|
||||||
row_info.append(lines[i].split())
|
row_info.append(lines[i].split())
|
||||||
if lines[i].split()[2] == '1':
|
if lines[i].split()[2] == "1":
|
||||||
cont_row.append(j)
|
cont_row.append(j)
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
@ -71,14 +83,16 @@ def update_tabular(document):
|
|||||||
cell_re = re.compile(r'(\d) (\d) (\d) (\d) (\d) (\d) (\d) (".*") (".*")')
|
cell_re = re.compile(r'(\d) (\d) (\d) (\d) (\d) (\d) (\d) (".*") (".*")')
|
||||||
for j in range(rows):
|
for j in range(rows):
|
||||||
for k in range(columns):
|
for k in range(columns):
|
||||||
#add column location to read properties
|
# add column location to read properties
|
||||||
cell_info.append(cell_re.match(lines[i]).groups())
|
cell_info.append(cell_re.match(lines[i]).groups())
|
||||||
cell_col.append(k)
|
cell_col.append(k)
|
||||||
if lines[i][0] != "2":
|
if lines[i][0] != "2":
|
||||||
ncells = ncells + 1
|
ncells = ncells + 1
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
lines[tabular_line] = f'<LyXTabular version="1" rows="{rows-len(cont_row)}" columns="{columns}">'
|
lines[tabular_line] = (
|
||||||
|
f'<LyXTabular version="1" rows="{rows-len(cont_row)}" columns="{columns}">'
|
||||||
|
)
|
||||||
del lines[i]
|
del lines[i]
|
||||||
if not lines[i]:
|
if not lines[i]:
|
||||||
del lines[i]
|
del lines[i]
|
||||||
@ -95,15 +109,19 @@ def update_tabular(document):
|
|||||||
|
|
||||||
for j in range(rows):
|
for j in range(rows):
|
||||||
for k in range(columns):
|
for k in range(columns):
|
||||||
m = j*columns + k
|
m = j * columns + k
|
||||||
if cell_info[m][0] == '2':
|
if cell_info[m][0] == "2":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if l == ncells -1:
|
if l == ncells - 1:
|
||||||
# the end variable refers to cell end, not to document end.
|
# the end variable refers to cell end, not to document end.
|
||||||
end = find_tokens(lines, ['\\layout','\\the_end','\\end_deeper','\\end_float'], i)
|
end = find_tokens(
|
||||||
|
lines,
|
||||||
|
["\\layout", "\\the_end", "\\end_deeper", "\\end_float"],
|
||||||
|
i,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
end = find_token(lines, '\\newline', i)
|
end = find_token(lines, "\\newline", i)
|
||||||
|
|
||||||
if end == -1:
|
if end == -1:
|
||||||
document.error("Malformed LyX file.")
|
document.error("Malformed LyX file.")
|
||||||
@ -112,9 +130,9 @@ def update_tabular(document):
|
|||||||
while end > 0:
|
while end > 0:
|
||||||
cell_content[j][k].append(lines[i])
|
cell_content[j][k].append(lines[i])
|
||||||
del lines[i]
|
del lines[i]
|
||||||
end = end -1
|
end = end - 1
|
||||||
|
|
||||||
if lines[i].find('\\newline') != -1:
|
if lines[i].find("\\newline") != -1:
|
||||||
del lines[i]
|
del lines[i]
|
||||||
l = l + 1
|
l = l + 1
|
||||||
|
|
||||||
@ -124,83 +142,120 @@ def update_tabular(document):
|
|||||||
for j in range(rows):
|
for j in range(rows):
|
||||||
if j in cont_row:
|
if j in cont_row:
|
||||||
continue
|
continue
|
||||||
tmp.append(f'<Row topline="{row_info[j][0]}" bottomline="{row_info[j][1]}" newpage="{row_info[j][3]}">')
|
tmp.append(
|
||||||
|
f'<Row topline="{row_info[j][0]}" bottomline="{row_info[j][1]}" newpage="{row_info[j][3]}">'
|
||||||
|
)
|
||||||
|
|
||||||
for k in range(columns):
|
for k in range(columns):
|
||||||
if j:
|
if j:
|
||||||
tmp.append('<Column>')
|
tmp.append("<Column>")
|
||||||
else:
|
else:
|
||||||
tmp.append(f'<Column alignment="{column_info[k][0]}" valignment="0" leftline="{column_info[k][1]}" rightline="{column_info[k][2]}" width={column_info[k][3]} special={column_info[k][4]}>')
|
tmp.append(
|
||||||
m = j*columns + k
|
f'<Column alignment="{column_info[k][0]}" valignment="0" leftline="{column_info[k][1]}" rightline="{column_info[k][2]}" width={column_info[k][3]} special={column_info[k][4]}>'
|
||||||
|
)
|
||||||
|
m = j * columns + k
|
||||||
|
|
||||||
leftline = int(column_info[k][1])
|
leftline = int(column_info[k][1])
|
||||||
if cell_info[m][0] == '1':
|
if cell_info[m][0] == "1":
|
||||||
n = m + 1
|
n = m + 1
|
||||||
while n < rows * columns - 1 and cell_info[n][0] == '2':
|
while n < rows * columns - 1 and cell_info[n][0] == "2":
|
||||||
n = n + 1
|
n = n + 1
|
||||||
rightline = int(column_info[cell_col[n-1]][2])
|
rightline = int(column_info[cell_col[n - 1]][2])
|
||||||
else:
|
else:
|
||||||
# not a multicolumn main cell
|
# not a multicolumn main cell
|
||||||
rightline = int(column_info[k][2])
|
rightline = int(column_info[k][2])
|
||||||
|
|
||||||
tmp.append('<Cell multicolumn="%s" alignment="%s" valignment="0" topline="%s" bottomline="%s" leftline="%d" rightline="%d" rotate="%s" usebox="%s" width=%s special=%s>' % (cell_info[m][0],cell_info[m][1],cell_info[m][2],cell_info[m][3],leftline,rightline,cell_info[m][5],cell_info[m][6],cell_info[m][7],cell_info[m][8]))
|
tmp.append(
|
||||||
tmp.append('\\begin_inset Text')
|
'<Cell multicolumn="%s" alignment="%s" valignment="0" topline="%s" bottomline="%s" leftline="%d" rightline="%d" rotate="%s" usebox="%s" width=%s special=%s>'
|
||||||
tmp.append('')
|
% (
|
||||||
tmp.append('\\layout %s' % document.default_layout)
|
cell_info[m][0],
|
||||||
tmp.append('')
|
cell_info[m][1],
|
||||||
|
cell_info[m][2],
|
||||||
|
cell_info[m][3],
|
||||||
|
leftline,
|
||||||
|
rightline,
|
||||||
|
cell_info[m][5],
|
||||||
|
cell_info[m][6],
|
||||||
|
cell_info[m][7],
|
||||||
|
cell_info[m][8],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
tmp.append("\\begin_inset Text")
|
||||||
|
tmp.append("")
|
||||||
|
tmp.append("\\layout %s" % document.default_layout)
|
||||||
|
tmp.append("")
|
||||||
|
|
||||||
if cell_info[m][0] != '2':
|
if cell_info[m][0] != "2":
|
||||||
paragraph = []
|
paragraph = []
|
||||||
if cell_info[m][4] == '1':
|
if cell_info[m][4] == "1":
|
||||||
l = j
|
l = j
|
||||||
paragraph = paragraph + cell_content[j][k]
|
paragraph = paragraph + cell_content[j][k]
|
||||||
while cell_info[m][4] == '1':
|
while cell_info[m][4] == "1":
|
||||||
m = m + columns
|
m = m + columns
|
||||||
l = l + 1
|
l = l + 1
|
||||||
if l >= rows: break
|
if l >= rows:
|
||||||
|
break
|
||||||
paragraph = paragraph + cell_content[l][k]
|
paragraph = paragraph + cell_content[l][k]
|
||||||
else:
|
else:
|
||||||
paragraph = cell_content[j][k]
|
paragraph = cell_content[j][k]
|
||||||
tmp = tmp + set_paragraph_properties(paragraph, prop_dict)
|
tmp = tmp + set_paragraph_properties(paragraph, prop_dict)
|
||||||
|
|
||||||
tmp.append('\\end_inset ')
|
tmp.append("\\end_inset ")
|
||||||
tmp.append('</Cell>')
|
tmp.append("</Cell>")
|
||||||
tmp.append('</Column>')
|
tmp.append("</Column>")
|
||||||
tmp.append('</Row>')
|
tmp.append("</Row>")
|
||||||
|
|
||||||
tmp.append('</LyXTabular>')
|
tmp.append("</LyXTabular>")
|
||||||
tmp.append('')
|
tmp.append("")
|
||||||
tmp.append('\\end_inset ')
|
tmp.append("\\end_inset ")
|
||||||
tmp.append('')
|
tmp.append("")
|
||||||
tmp.append('')
|
tmp.append("")
|
||||||
lines[i:i] = tmp
|
lines[i:i] = tmp
|
||||||
|
|
||||||
i = i + len(tmp)
|
i = i + len(tmp)
|
||||||
|
|
||||||
|
|
||||||
prop_exp = re.compile(r"\\(\S*)\s*(\S*)")
|
prop_exp = re.compile(r"\\(\S*)\s*(\S*)")
|
||||||
|
|
||||||
|
|
||||||
def set_paragraph_properties(lines, prop_dict):
|
def set_paragraph_properties(lines, prop_dict):
|
||||||
" Set paragraph properties."
|
"Set paragraph properties."
|
||||||
# we need to preserve the order of options
|
# we need to preserve the order of options
|
||||||
properties = ["family","series","shape","size",
|
properties = [
|
||||||
"emph","bar","noun","latex","color"]
|
"family",
|
||||||
prop_value = {"family" : "default", "series" : "medium",
|
"series",
|
||||||
"shape" : "up", "size" : "normal",
|
"shape",
|
||||||
"emph" : "off", "bar" : "no",
|
"size",
|
||||||
"noun" : "off", "latex" : "no_latex", "color" : "none"}
|
"emph",
|
||||||
|
"bar",
|
||||||
|
"noun",
|
||||||
|
"latex",
|
||||||
|
"color",
|
||||||
|
]
|
||||||
|
prop_value = {
|
||||||
|
"family": "default",
|
||||||
|
"series": "medium",
|
||||||
|
"shape": "up",
|
||||||
|
"size": "normal",
|
||||||
|
"emph": "off",
|
||||||
|
"bar": "no",
|
||||||
|
"noun": "off",
|
||||||
|
"latex": "no_latex",
|
||||||
|
"color": "none",
|
||||||
|
}
|
||||||
|
|
||||||
start = 0
|
start = 0
|
||||||
end = 0
|
end = 0
|
||||||
i = 0
|
i = 0
|
||||||
n = len(lines)
|
n = len(lines)
|
||||||
|
|
||||||
#skip empty lines
|
# skip empty lines
|
||||||
while i<n and lines[i] == "":
|
while i < n and lines[i] == "":
|
||||||
i = i + 1
|
i = i + 1
|
||||||
start = i
|
start = i
|
||||||
|
|
||||||
#catch open char properties
|
# catch open char properties
|
||||||
while i<n and lines[i][:1] == "\\":
|
while i < n and lines[i][:1] == "\\":
|
||||||
result = prop_exp.match(lines[i])
|
result = prop_exp.match(lines[i])
|
||||||
# sys.stderr.write(lines[i]+"\n")
|
# sys.stderr.write(lines[i]+"\n")
|
||||||
prop = result.group(1)
|
prop = result.group(1)
|
||||||
@ -214,12 +269,12 @@ def set_paragraph_properties(lines, prop_dict):
|
|||||||
aux = []
|
aux = []
|
||||||
insert = 0
|
insert = 0
|
||||||
for prop in properties:
|
for prop in properties:
|
||||||
if prop_dict[prop] != 'default':
|
if prop_dict[prop] != "default":
|
||||||
insert = 1
|
insert = 1
|
||||||
if prop == "color":
|
if prop == "color":
|
||||||
aux.append(f"\\{prop} {prop_dict[prop]}")
|
aux.append(f"\\{prop} {prop_dict[prop]}")
|
||||||
elif prop != "family" or prop_dict[prop] != "roman":
|
elif prop != "family" or prop_dict[prop] != "roman":
|
||||||
aux.append(f"\\{prop} {prop_dict[prop]} ")
|
aux.append(f"\\{prop} {prop_dict[prop]} ")
|
||||||
|
|
||||||
# remove final char properties
|
# remove final char properties
|
||||||
n = len(lines)
|
n = len(lines)
|
||||||
@ -231,7 +286,7 @@ def set_paragraph_properties(lines, prop_dict):
|
|||||||
del lines[n]
|
del lines[n]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if lines[n][:1] == '\\':
|
if lines[n][:1] == "\\":
|
||||||
result = prop_exp.match(lines[n])
|
result = prop_exp.match(lines[n])
|
||||||
prop = result.group(1)
|
prop = result.group(1)
|
||||||
if prop in properties:
|
if prop in properties:
|
||||||
@ -240,14 +295,14 @@ def set_paragraph_properties(lines, prop_dict):
|
|||||||
del lines[n]
|
del lines[n]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if check_token(lines[n],'\\end_inset'):
|
if check_token(lines[n], "\\end_inset"):
|
||||||
# ensure proper newlines after inset end
|
# ensure proper newlines after inset end
|
||||||
lines.append('')
|
lines.append("")
|
||||||
lines.append('')
|
lines.append("")
|
||||||
break
|
break
|
||||||
|
|
||||||
for line in lines[end:]:
|
for line in lines[end:]:
|
||||||
if line[:1] == '\\':
|
if line[:1] == "\\":
|
||||||
result = prop_exp.match(line)
|
result = prop_exp.match(line)
|
||||||
prop = result.group(1)
|
prop = result.group(1)
|
||||||
if prop in properties and prop not in changed_prop:
|
if prop in properties and prop not in changed_prop:
|
||||||
@ -257,30 +312,30 @@ def set_paragraph_properties(lines, prop_dict):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
result = lines[:start] + aux[:] + lines[end:]
|
result = lines[:start] + aux[:] + lines[end:]
|
||||||
if insert and result[0] != '':
|
if insert and result[0] != "":
|
||||||
return [''] + result[:]
|
return [""] + result[:]
|
||||||
|
|
||||||
return result[:]
|
return result[:]
|
||||||
|
|
||||||
|
|
||||||
def update_language(document):
|
def update_language(document):
|
||||||
""" Update document language, if language is default convert it to
|
"""Update document language, if language is default convert it to
|
||||||
english."""
|
english."""
|
||||||
header = document.header
|
header = document.header
|
||||||
i = find_token(header, "\\language", 0)
|
i = find_token(header, "\\language", 0)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
# no language, should emit a warning
|
# no language, should emit a warning
|
||||||
header.append('\\language english')
|
header.append("\\language english")
|
||||||
return
|
return
|
||||||
# This is the lyx behaviour: defaults to english
|
# This is the lyx behaviour: defaults to english
|
||||||
if header[i].split()[1] == 'default':
|
if header[i].split()[1] == "default":
|
||||||
header[i] = '\\language english'
|
header[i] = "\\language english"
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
supported_versions = ["1.1.6","1.1.6fix1","1.1.6fix2","1.1"]
|
supported_versions = ["1.1.6", "1.1.6fix1", "1.1.6fix2", "1.1"]
|
||||||
convert = [[217, [update_tabular, update_language]]]
|
convert = [[217, [update_tabular, update_language]]]
|
||||||
revert = []
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -15,13 +15,14 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 1.1.6, fix3 and fix4"""
|
"""Convert files to the file format generated by lyx 1.1.6, fix3 and fix4"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from parser_tools import find_token, find_re
|
from parser_tools import find_token, find_re
|
||||||
|
|
||||||
|
|
||||||
def bool_table(item):
|
def bool_table(item):
|
||||||
" Convert 0, 1 to false, true."
|
"Convert 0, 1 to false, true."
|
||||||
if item == "0":
|
if item == "0":
|
||||||
return "false"
|
return "false"
|
||||||
# should emit a warning if item != "1"
|
# should emit a warning if item != "1"
|
||||||
@ -33,27 +34,28 @@ align_table = {"0": "top", "2": "left", "4": "right", "8": "center"}
|
|||||||
use_table = {"0": "none", "1": "parbox"}
|
use_table = {"0": "none", "1": "parbox"}
|
||||||
table_meta_re = re.compile(r'<LyXTabular version="?1"? rows="?(\d*)"? columns="?(\d*)"?>')
|
table_meta_re = re.compile(r'<LyXTabular version="?1"? rows="?(\d*)"? columns="?(\d*)"?>')
|
||||||
|
|
||||||
|
|
||||||
def update_tabular(document):
|
def update_tabular(document):
|
||||||
" Update tabular format to version 2 (xml like syntax)."
|
"Update tabular format to version 2 (xml like syntax)."
|
||||||
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
regexp = re.compile(r"^\\begin_inset\s+Tabular")
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i=0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_re(lines, regexp, i)
|
i = find_re(lines, regexp, i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
|
|
||||||
i = i +1
|
i = i + 1
|
||||||
|
|
||||||
# scan table header meta-info
|
# scan table header meta-info
|
||||||
res = table_meta_re.match( lines[i] )
|
res = table_meta_re.match(lines[i])
|
||||||
if res:
|
if res:
|
||||||
val = res.groups()
|
val = res.groups()
|
||||||
lines[i] = '<lyxtabular version="2" rows="%s" columns="%s">' % val
|
lines[i] = '<lyxtabular version="2" rows="%s" columns="%s">' % val
|
||||||
|
|
||||||
j = find_token(lines, '</LyXTabular>', i) + 1
|
j = find_token(lines, "</LyXTabular>", i) + 1
|
||||||
if j == 0:
|
if j == 0:
|
||||||
document.warning( "Error: Bad lyx format i=%d j=%d" % (i,j))
|
document.warning("Error: Bad lyx format i=%d j=%d" % (i, j))
|
||||||
break
|
break
|
||||||
|
|
||||||
new_table = table_update(lines[i:j])
|
new_table = table_update(lines[i:j])
|
||||||
@ -61,50 +63,63 @@ def update_tabular(document):
|
|||||||
i = i + len(new_table)
|
i = i + len(new_table)
|
||||||
|
|
||||||
|
|
||||||
col_re = re.compile(r'<column alignment="?(\d)"? valignment="?(\d)"? leftline="?(\d)"? rightline="?(\d)"? width="(.*)" special="(.*)">')
|
col_re = re.compile(
|
||||||
cell_re = re.compile(r'<cell multicolumn="?(\d)"? alignment="?(\d)"? valignment="?(\d)"? topline="?(\d)"? bottomline="?(\d)"? leftline="?(\d)"? rightline="?(\d)"? rotate="?(\d)"? usebox="?(\d)"? width="(.*)" special="(.*)">')
|
r'<column alignment="?(\d)"? valignment="?(\d)"? leftline="?(\d)"? rightline="?(\d)"? width="(.*)" special="(.*)">'
|
||||||
features_re = re.compile(r'<features rotate="?(\d)"? islongtable="?(\d)"? endhead="?(-?\d)"? endfirsthead="?(-?\d)"? endfoot="?(-?\d)"? endlastfoot="?(-?\d)"?>')
|
)
|
||||||
|
cell_re = re.compile(
|
||||||
|
r'<cell multicolumn="?(\d)"? alignment="?(\d)"? valignment="?(\d)"? topline="?(\d)"? bottomline="?(\d)"? leftline="?(\d)"? rightline="?(\d)"? rotate="?(\d)"? usebox="?(\d)"? width="(.*)" special="(.*)">'
|
||||||
|
)
|
||||||
|
features_re = re.compile(
|
||||||
|
r'<features rotate="?(\d)"? islongtable="?(\d)"? endhead="?(-?\d)"? endfirsthead="?(-?\d)"? endfoot="?(-?\d)"? endlastfoot="?(-?\d)"?>'
|
||||||
|
)
|
||||||
row_re = re.compile(r'<row topline="?(\d)"? bottomline="?(\d)"? newpage="?(\d)"?>')
|
row_re = re.compile(r'<row topline="?(\d)"? bottomline="?(\d)"? newpage="?(\d)"?>')
|
||||||
|
|
||||||
|
|
||||||
def table_update(lines):
|
def table_update(lines):
|
||||||
" Update table's internal content to format 2."
|
"Update table's internal content to format 2."
|
||||||
lines[1] = lines[1].replace('<Features', '<features')
|
lines[1] = lines[1].replace("<Features", "<features")
|
||||||
res = features_re.match( lines[1] )
|
res = features_re.match(lines[1])
|
||||||
if res:
|
if res:
|
||||||
val = res.groups()
|
val = res.groups()
|
||||||
lines[1] = f'<features rotate="{bool_table(val[0])}" islongtable="{bool_table(val[1])}" endhead="{val[2]}" endfirsthead="{val[3]}" endfoot="{val[4]}" endlastfoot="{val[5]}">'
|
lines[1] = (
|
||||||
|
f'<features rotate="{bool_table(val[0])}" islongtable="{bool_table(val[1])}" endhead="{val[2]}" endfirsthead="{val[3]}" endfoot="{val[4]}" endlastfoot="{val[5]}">'
|
||||||
|
)
|
||||||
|
|
||||||
if lines[2]=="":
|
if lines[2] == "":
|
||||||
del lines[2]
|
del lines[2]
|
||||||
i = 2
|
i = 2
|
||||||
col_info = []
|
col_info = []
|
||||||
while i < len(lines):
|
while i < len(lines):
|
||||||
lines[i] = lines[i].replace('<Cell', '<cell')
|
lines[i] = lines[i].replace("<Cell", "<cell")
|
||||||
lines[i] = lines[i].replace('</Cell', '</cell')
|
lines[i] = lines[i].replace("</Cell", "</cell")
|
||||||
lines[i] = lines[i].replace('<Row', '<row')
|
lines[i] = lines[i].replace("<Row", "<row")
|
||||||
lines[i] = lines[i].replace('</Row', '</row')
|
lines[i] = lines[i].replace("</Row", "</row")
|
||||||
lines[i] = lines[i].replace('<Column', '<column')
|
lines[i] = lines[i].replace("<Column", "<column")
|
||||||
lines[i] = lines[i].replace('</Column', '</column')
|
lines[i] = lines[i].replace("</Column", "</column")
|
||||||
lines[i] = lines[i].replace('</LyXTabular', '</lyxtabular')
|
lines[i] = lines[i].replace("</LyXTabular", "</lyxtabular")
|
||||||
k = lines[i].find ('<column ')
|
k = lines[i].find("<column ")
|
||||||
if k != -1:
|
if k != -1:
|
||||||
col_info.append(lines[i])
|
col_info.append(lines[i])
|
||||||
del lines[i]
|
del lines[i]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if lines[i] == '</column>' or lines[i] == '<column>':
|
if lines[i] == "</column>" or lines[i] == "<column>":
|
||||||
del lines[i]
|
del lines[i]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
res = cell_re.match(lines[i])
|
res = cell_re.match(lines[i])
|
||||||
if res:
|
if res:
|
||||||
val = res.groups()
|
val = res.groups()
|
||||||
lines[i] = f'<cell multicolumn="{val[0]}" alignment="{align_table[val[1]]}" valignment="{align_vertical[val[2]]}" topline="{bool_table(val[3])}" bottomline="{bool_table(val[4])}" leftline="{bool_table(val[5])}" rightline="{bool_table(val[6])}" rotate="{bool_table(val[7])}" usebox="{use_table[val[8]]}" width="{val[9]}" special="{val[10]}">'
|
lines[i] = (
|
||||||
|
f'<cell multicolumn="{val[0]}" alignment="{align_table[val[1]]}" valignment="{align_vertical[val[2]]}" topline="{bool_table(val[3])}" bottomline="{bool_table(val[4])}" leftline="{bool_table(val[5])}" rightline="{bool_table(val[6])}" rotate="{bool_table(val[7])}" usebox="{use_table[val[8]]}" width="{val[9]}" special="{val[10]}">'
|
||||||
|
)
|
||||||
|
|
||||||
res = row_re.match(lines[i])
|
res = row_re.match(lines[i])
|
||||||
if res:
|
if res:
|
||||||
val = res.groups()
|
val = res.groups()
|
||||||
lines[i] = f'<row topline="{bool_table(val[0])}" bottomline="{bool_table(val[1])}" newpage="{bool_table(val[2])}">'
|
lines[i] = (
|
||||||
|
f'<row topline="{bool_table(val[0])}" bottomline="{bool_table(val[1])}" newpage="{bool_table(val[2])}">'
|
||||||
|
)
|
||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
@ -113,15 +128,24 @@ def table_update(lines):
|
|||||||
res = col_re.match(col_info[i])
|
res = col_re.match(col_info[i])
|
||||||
if res:
|
if res:
|
||||||
val = res.groups()
|
val = res.groups()
|
||||||
col_info[i] = '<column alignment="%s" valignment="%s" leftline="%s" rightline="%s" width="%s" special="%s">' \
|
col_info[i] = (
|
||||||
% ( align_table[val[0]], align_vertical[val[1]], bool_table(val[2]), bool_table(val[3]), val[4],val[5])
|
'<column alignment="%s" valignment="%s" leftline="%s" rightline="%s" width="%s" special="%s">'
|
||||||
|
% (
|
||||||
|
align_table[val[0]],
|
||||||
|
align_vertical[val[1]],
|
||||||
|
bool_table(val[2]),
|
||||||
|
bool_table(val[3]),
|
||||||
|
val[4],
|
||||||
|
val[5],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
return lines[:2] + col_info + lines[2:]
|
return lines[:2] + col_info + lines[2:]
|
||||||
|
|
||||||
|
|
||||||
supported_versions = ["1.1.6fix3","1.1.6fix4","1.1"]
|
supported_versions = ["1.1.6fix3", "1.1.6fix4", "1.1"]
|
||||||
convert = [[218, [update_tabular]]]
|
convert = [[218, [update_tabular]]]
|
||||||
revert = []
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -16,21 +16,30 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 1.2"""
|
"""Convert files to the file format generated by lyx 1.2"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from parser_tools import find_token, find_token_backwards, \
|
from parser_tools import (
|
||||||
find_tokens, find_tokens_backwards, \
|
find_token,
|
||||||
find_beginning_of, find_end_of, find_re, \
|
find_token_backwards,
|
||||||
is_nonempty_line, find_nonempty_line, \
|
find_tokens,
|
||||||
get_value, check_token
|
find_tokens_backwards,
|
||||||
|
find_beginning_of,
|
||||||
|
find_end_of,
|
||||||
|
find_re,
|
||||||
|
is_nonempty_line,
|
||||||
|
find_nonempty_line,
|
||||||
|
get_value,
|
||||||
|
check_token,
|
||||||
|
)
|
||||||
|
|
||||||
####################################################################
|
####################################################################
|
||||||
# Private helper functions
|
# Private helper functions
|
||||||
|
|
||||||
|
|
||||||
def get_layout(line, default_layout):
|
def get_layout(line, default_layout):
|
||||||
" Get layout, if empty return the default layout."
|
"Get layout, if empty return the default layout."
|
||||||
tokens = line.split()
|
tokens = line.split()
|
||||||
if len(tokens) > 1:
|
if len(tokens) > 1:
|
||||||
return tokens[1]
|
return tokens[1]
|
||||||
@ -38,12 +47,13 @@ def get_layout(line, default_layout):
|
|||||||
|
|
||||||
|
|
||||||
def get_paragraph(lines, i, format):
|
def get_paragraph(lines, i, format):
|
||||||
" Finds the paragraph that contains line i."
|
"Finds the paragraph that contains line i."
|
||||||
begin_layout = "\\layout"
|
begin_layout = "\\layout"
|
||||||
|
|
||||||
while i != -1:
|
while i != -1:
|
||||||
i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
|
i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
|
||||||
if i == -1: return -1
|
if i == -1:
|
||||||
|
return -1
|
||||||
if check_token(lines[i], begin_layout):
|
if check_token(lines[i], begin_layout):
|
||||||
return i
|
return i
|
||||||
i = find_beginning_of_inset(lines, i)
|
i = find_beginning_of_inset(lines, i)
|
||||||
@ -51,7 +61,7 @@ def get_paragraph(lines, i, format):
|
|||||||
|
|
||||||
|
|
||||||
def get_next_paragraph(lines, i, format):
|
def get_next_paragraph(lines, i, format):
|
||||||
" Finds the paragraph after the paragraph that contains line i."
|
"Finds the paragraph after the paragraph that contains line i."
|
||||||
tokens = ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"]
|
tokens = ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"]
|
||||||
|
|
||||||
while i != -1:
|
while i != -1:
|
||||||
@ -63,78 +73,79 @@ def get_next_paragraph(lines, i, format):
|
|||||||
|
|
||||||
|
|
||||||
def find_beginning_of_inset(lines, i):
|
def find_beginning_of_inset(lines, i):
|
||||||
" Find beginning of inset, where lines[i] is included."
|
"Find beginning of inset, where lines[i] is included."
|
||||||
return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset")
|
return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||||
|
|
||||||
|
|
||||||
def find_end_of_inset(lines, i):
|
def find_end_of_inset(lines, i):
|
||||||
r" Finds the matching \end_inset"
|
r"Finds the matching \end_inset"
|
||||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||||
|
|
||||||
|
|
||||||
def find_end_of_tabular(lines, i):
|
def find_end_of_tabular(lines, i):
|
||||||
" Finds the matching end of tabular."
|
"Finds the matching end of tabular."
|
||||||
return find_end_of(lines, i, "<lyxtabular", "</lyxtabular")
|
return find_end_of(lines, i, "<lyxtabular", "</lyxtabular")
|
||||||
|
|
||||||
|
|
||||||
def get_tabular_lines(lines, i):
|
def get_tabular_lines(lines, i):
|
||||||
" Returns a lists of tabular lines."
|
"Returns a lists of tabular lines."
|
||||||
result = []
|
result = []
|
||||||
i = i+1
|
i = i + 1
|
||||||
j = find_end_of_tabular(lines, i)
|
j = find_end_of_tabular(lines, i)
|
||||||
if j == -1:
|
if j == -1:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
while i <= j:
|
while i <= j:
|
||||||
if check_token(lines[i], "\\begin_inset"):
|
if check_token(lines[i], "\\begin_inset"):
|
||||||
i = find_end_of_inset(lines, i)+1
|
i = find_end_of_inset(lines, i) + 1
|
||||||
else:
|
else:
|
||||||
result.append(i)
|
result.append(i)
|
||||||
i = i+1
|
i = i + 1
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
# End of helper functions
|
# End of helper functions
|
||||||
####################################################################
|
####################################################################
|
||||||
|
|
||||||
|
|
||||||
floats = {
|
floats = {
|
||||||
"footnote": ["\\begin_inset Foot",
|
"footnote": ["\\begin_inset Foot", "collapsed true"],
|
||||||
"collapsed true"],
|
"margin": ["\\begin_inset Marginal", "collapsed true"],
|
||||||
"margin": ["\\begin_inset Marginal",
|
"fig": ["\\begin_inset Float figure", "wide false", "collapsed false"],
|
||||||
"collapsed true"],
|
"tab": ["\\begin_inset Float table", "wide false", "collapsed false"],
|
||||||
"fig": ["\\begin_inset Float figure",
|
"alg": ["\\begin_inset Float algorithm", "wide false", "collapsed false"],
|
||||||
"wide false",
|
"wide-fig": ["\\begin_inset Float figure", "wide true", "collapsed false"],
|
||||||
"collapsed false"],
|
"wide-tab": ["\\begin_inset Float table", "wide true", "collapsed false"],
|
||||||
"tab": ["\\begin_inset Float table",
|
|
||||||
"wide false",
|
|
||||||
"collapsed false"],
|
|
||||||
"alg": ["\\begin_inset Float algorithm",
|
|
||||||
"wide false",
|
|
||||||
"collapsed false"],
|
|
||||||
"wide-fig": ["\\begin_inset Float figure",
|
|
||||||
"wide true",
|
|
||||||
"collapsed false"],
|
|
||||||
"wide-tab": ["\\begin_inset Float table",
|
|
||||||
"wide true",
|
|
||||||
"collapsed false"]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
font_tokens = ["\\family", "\\series", "\\shape", "\\size", "\\emph",
|
font_tokens = [
|
||||||
"\\bar", "\\noun", "\\color", "\\lang", "\\latex"]
|
"\\family",
|
||||||
|
"\\series",
|
||||||
|
"\\shape",
|
||||||
|
"\\size",
|
||||||
|
"\\emph",
|
||||||
|
"\\bar",
|
||||||
|
"\\noun",
|
||||||
|
"\\color",
|
||||||
|
"\\lang",
|
||||||
|
"\\latex",
|
||||||
|
]
|
||||||
|
|
||||||
pextra_type3_rexp = re.compile(r".*\\pextra_type\s+3")
|
pextra_type3_rexp = re.compile(r".*\\pextra_type\s+3")
|
||||||
pextra_rexp = re.compile(r"\\pextra_type\s+(\S+)"+\
|
pextra_rexp = re.compile(
|
||||||
r"(\s+\\pextra_alignment\s+(\S+))?"+\
|
r"\\pextra_type\s+(\S+)"
|
||||||
r"(\s+\\pextra_hfill\s+(\S+))?"+\
|
+ r"(\s+\\pextra_alignment\s+(\S+))?"
|
||||||
r"(\s+\\pextra_start_minipage\s+(\S+))?"+\
|
+ r"(\s+\\pextra_hfill\s+(\S+))?"
|
||||||
r"(\s+(\\pextra_widthp?)\s+(\S*))?")
|
+ r"(\s+\\pextra_start_minipage\s+(\S+))?"
|
||||||
|
+ r"(\s+(\\pextra_widthp?)\s+(\S*))?"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_width(mo):
|
def get_width(mo):
|
||||||
" Get width from a regular expression. "
|
"Get width from a regular expression."
|
||||||
if mo.group(10):
|
if mo.group(10):
|
||||||
if mo.group(9) == "\\pextra_widthp":
|
if mo.group(9) == "\\pextra_widthp":
|
||||||
return mo.group(10)+"col%"
|
return mo.group(10) + "col%"
|
||||||
else:
|
else:
|
||||||
return mo.group(10)
|
return mo.group(10)
|
||||||
else:
|
else:
|
||||||
@ -142,7 +153,7 @@ def get_width(mo):
|
|||||||
|
|
||||||
|
|
||||||
def remove_oldfloat(document):
|
def remove_oldfloat(document):
|
||||||
r" Change \begin_float .. \end_float into \begin_inset Float .. \end_inset"
|
r"Change \begin_float .. \end_float into \begin_inset Float .. \end_inset"
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -150,7 +161,7 @@ def remove_oldfloat(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
# There are no nested floats, so finding the end of the float is simple
|
# There are no nested floats, so finding the end of the float is simple
|
||||||
j = find_token(lines, "\\end_float", i+1)
|
j = find_token(lines, "\\end_float", i + 1)
|
||||||
|
|
||||||
floattype = lines[i].split()[1]
|
floattype = lines[i].split()[1]
|
||||||
if floattype not in floats:
|
if floattype not in floats:
|
||||||
@ -158,14 +169,14 @@ def remove_oldfloat(document):
|
|||||||
floattype = "fig"
|
floattype = "fig"
|
||||||
|
|
||||||
# skip \end_deeper tokens
|
# skip \end_deeper tokens
|
||||||
i2 = i+1
|
i2 = i + 1
|
||||||
while check_token(lines[i2], "\\end_deeper"):
|
while check_token(lines[i2], "\\end_deeper"):
|
||||||
i2 = i2+1
|
i2 = i2 + 1
|
||||||
if i2 > i+1:
|
if i2 > i + 1:
|
||||||
j2 = get_next_paragraph(lines, j + 1, document.format + 1)
|
j2 = get_next_paragraph(lines, j + 1, document.format + 1)
|
||||||
lines[j2:j2] = ["\\end_deeper "]*(i2-(i+1))
|
lines[j2:j2] = ["\\end_deeper "] * (i2 - (i + 1))
|
||||||
|
|
||||||
new = floats[floattype]+[""]
|
new = floats[floattype] + [""]
|
||||||
|
|
||||||
# Check if the float is floatingfigure
|
# Check if the float is floatingfigure
|
||||||
k = find_re(lines, pextra_type3_rexp, i, j)
|
k = find_re(lines, pextra_type3_rexp, i, j)
|
||||||
@ -173,12 +184,14 @@ def remove_oldfloat(document):
|
|||||||
mo = pextra_rexp.search(lines[k])
|
mo = pextra_rexp.search(lines[k])
|
||||||
width = get_width(mo)
|
width = get_width(mo)
|
||||||
lines[k] = re.sub(pextra_rexp, "", lines[k])
|
lines[k] = re.sub(pextra_rexp, "", lines[k])
|
||||||
new = ["\\begin_inset Wrap figure",
|
new = [
|
||||||
'width "%s"' % width,
|
"\\begin_inset Wrap figure",
|
||||||
"collapsed false",
|
'width "%s"' % width,
|
||||||
""]
|
"collapsed false",
|
||||||
|
"",
|
||||||
|
]
|
||||||
|
|
||||||
new = new+lines[i2:j]+["\\end_inset ", ""]
|
new = new + lines[i2:j] + ["\\end_inset ", ""]
|
||||||
|
|
||||||
# After a float, all font attributes are reseted.
|
# After a float, all font attributes are reseted.
|
||||||
# We need to output '\foo default' for every attribute foo
|
# We need to output '\foo default' for every attribute foo
|
||||||
@ -197,20 +210,21 @@ def remove_oldfloat(document):
|
|||||||
flag = 1
|
flag = 1
|
||||||
new.append("")
|
new.append("")
|
||||||
if token == "\\lang":
|
if token == "\\lang":
|
||||||
new.append(token+" "+ document.language)
|
new.append(token + " " + document.language)
|
||||||
else:
|
else:
|
||||||
new.append(token+" default ")
|
new.append(token + " default ")
|
||||||
|
|
||||||
lines[i:j+1] = new
|
lines[i : j + 1] = new
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
pextra_type2_rexp = re.compile(r".*\\pextra_type\s+[12]")
|
pextra_type2_rexp = re.compile(r".*\\pextra_type\s+[12]")
|
||||||
pextra_type2_rexp2 = re.compile(r".*(\\layout|\\pextra_type\s+2)")
|
pextra_type2_rexp2 = re.compile(r".*(\\layout|\\pextra_type\s+2)")
|
||||||
pextra_widthp = re.compile(r"\\pextra_widthp")
|
pextra_widthp = re.compile(r"\\pextra_widthp")
|
||||||
|
|
||||||
|
|
||||||
def remove_pextra(document):
|
def remove_pextra(document):
|
||||||
" Remove pextra token."
|
"Remove pextra token."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
flag = 0
|
flag = 0
|
||||||
@ -221,17 +235,17 @@ def remove_pextra(document):
|
|||||||
|
|
||||||
# Sometimes the \pextra_widthp argument comes in it own
|
# Sometimes the \pextra_widthp argument comes in it own
|
||||||
# line. If that happens insert it back in this line.
|
# line. If that happens insert it back in this line.
|
||||||
if pextra_widthp.search(lines[i+1]):
|
if pextra_widthp.search(lines[i + 1]):
|
||||||
lines[i] = lines[i] + ' ' + lines[i+1]
|
lines[i] = lines[i] + " " + lines[i + 1]
|
||||||
del lines[i+1]
|
del lines[i + 1]
|
||||||
|
|
||||||
mo = pextra_rexp.search(lines[i])
|
mo = pextra_rexp.search(lines[i])
|
||||||
width = get_width(mo)
|
width = get_width(mo)
|
||||||
|
|
||||||
if mo.group(1) == "1":
|
if mo.group(1) == "1":
|
||||||
# handle \pextra_type 1 (indented paragraph)
|
# handle \pextra_type 1 (indented paragraph)
|
||||||
lines[i] = re.sub(pextra_rexp, "\\leftindent "+width+" ", lines[i])
|
lines[i] = re.sub(pextra_rexp, "\\leftindent " + width + " ", lines[i])
|
||||||
i = i+1
|
i = i + 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# handle \pextra_type 2 (minipage)
|
# handle \pextra_type 2 (minipage)
|
||||||
@ -239,30 +253,31 @@ def remove_pextra(document):
|
|||||||
hfill = mo.group(5)
|
hfill = mo.group(5)
|
||||||
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
||||||
|
|
||||||
start = ["\\begin_inset Minipage",
|
start = [
|
||||||
"position " + position,
|
"\\begin_inset Minipage",
|
||||||
"inner_position 0",
|
"position " + position,
|
||||||
'height "0pt"',
|
"inner_position 0",
|
||||||
'width "%s"' % width,
|
'height "0pt"',
|
||||||
"collapsed false"
|
'width "%s"' % width,
|
||||||
]
|
"collapsed false",
|
||||||
|
]
|
||||||
if flag:
|
if flag:
|
||||||
flag = 0
|
flag = 0
|
||||||
if hfill:
|
if hfill:
|
||||||
start = ["",r"\hfill",""]+start
|
start = ["", r"\hfill", ""] + start
|
||||||
else:
|
else:
|
||||||
start = ['\\layout %s' % document.default_layout,''] + start
|
start = ["\\layout %s" % document.default_layout, ""] + start
|
||||||
|
|
||||||
j0 = find_token_backwards(lines,"\\layout", i-1)
|
j0 = find_token_backwards(lines, "\\layout", i - 1)
|
||||||
j = get_next_paragraph(lines, i, document.format + 1)
|
j = get_next_paragraph(lines, i, document.format + 1)
|
||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
while True:
|
while True:
|
||||||
# collect more paragraphs to the minipage
|
# collect more paragraphs to the minipage
|
||||||
count = count+1
|
count = count + 1
|
||||||
if j == -1 or not check_token(lines[j], "\\layout"):
|
if j == -1 or not check_token(lines[j], "\\layout"):
|
||||||
break
|
break
|
||||||
i = find_re(lines, pextra_type2_rexp2, j+1)
|
i = find_re(lines, pextra_type2_rexp2, j + 1)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
mo = pextra_rexp.search(lines[i])
|
mo = pextra_rexp.search(lines[i])
|
||||||
@ -272,60 +287,72 @@ def remove_pextra(document):
|
|||||||
flag = 1
|
flag = 1
|
||||||
break
|
break
|
||||||
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
lines[i] = re.sub(pextra_rexp, "", lines[i])
|
||||||
j = find_tokens(lines, ["\\layout", "\\end_float"], i+1)
|
j = find_tokens(lines, ["\\layout", "\\end_float"], i + 1)
|
||||||
|
|
||||||
mid = lines[j0:j]
|
mid = lines[j0:j]
|
||||||
end = ["\\end_inset "]
|
end = ["\\end_inset "]
|
||||||
|
|
||||||
lines[j0:j] = start+mid+end
|
lines[j0:j] = start + mid + end
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def is_empty(lines):
|
def is_empty(lines):
|
||||||
" Are all the lines empty?"
|
"Are all the lines empty?"
|
||||||
return list(filter(is_nonempty_line, lines)) == []
|
return list(filter(is_nonempty_line, lines)) == []
|
||||||
|
|
||||||
|
|
||||||
move_rexp = re.compile(r"\\(family|series|shape|size|emph|numeric|bar|noun|end_deeper)")
|
move_rexp = re.compile(r"\\(family|series|shape|size|emph|numeric|bar|noun|end_deeper)")
|
||||||
ert_rexp = re.compile(r"\\begin_inset|\\hfill|.*\\SpecialChar")
|
ert_rexp = re.compile(r"\\begin_inset|\\hfill|.*\\SpecialChar")
|
||||||
spchar_rexp = re.compile(r"(.*)(\\SpecialChar.*)")
|
spchar_rexp = re.compile(r"(.*)(\\SpecialChar.*)")
|
||||||
|
|
||||||
|
|
||||||
def remove_oldert(document):
|
def remove_oldert(document):
|
||||||
" Remove old ERT inset."
|
"Remove old ERT inset."
|
||||||
ert_begin = ["\\begin_inset ERT",
|
ert_begin = [
|
||||||
"status Collapsed",
|
"\\begin_inset ERT",
|
||||||
"",
|
"status Collapsed",
|
||||||
'\\layout %s' % document.default_layout,
|
"",
|
||||||
""]
|
"\\layout %s" % document.default_layout,
|
||||||
|
"",
|
||||||
|
]
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
|
i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
j = i+1
|
j = i + 1
|
||||||
while True:
|
while True:
|
||||||
# \end_inset is for ert inside a tabular cell. The other tokens
|
# \end_inset is for ert inside a tabular cell. The other tokens
|
||||||
# are obvious.
|
# are obvious.
|
||||||
j = find_tokens(lines, ["\\latex default", "\\layout", "\\begin_inset", "\\end_inset", "\\end_float", "\\the_end"],
|
j = find_tokens(
|
||||||
j)
|
lines,
|
||||||
|
[
|
||||||
|
"\\latex default",
|
||||||
|
"\\layout",
|
||||||
|
"\\begin_inset",
|
||||||
|
"\\end_inset",
|
||||||
|
"\\end_float",
|
||||||
|
"\\the_end",
|
||||||
|
],
|
||||||
|
j,
|
||||||
|
)
|
||||||
if check_token(lines[j], "\\begin_inset"):
|
if check_token(lines[j], "\\begin_inset"):
|
||||||
j = find_end_of_inset(lines, j)+1
|
j = find_end_of_inset(lines, j) + 1
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
if check_token(lines[j], "\\layout"):
|
if check_token(lines[j], "\\layout"):
|
||||||
while j-1 >= 0 and check_token(lines[j-1], "\\begin_deeper"):
|
while j - 1 >= 0 and check_token(lines[j - 1], "\\begin_deeper"):
|
||||||
j = j-1
|
j = j - 1
|
||||||
|
|
||||||
# We need to remove insets, special chars & font commands from ERT text
|
# We need to remove insets, special chars & font commands from ERT text
|
||||||
new = []
|
new = []
|
||||||
new2 = []
|
new2 = []
|
||||||
if check_token(lines[i], "\\layout LaTeX"):
|
if check_token(lines[i], "\\layout LaTeX"):
|
||||||
new = [r'\layout %s' % document.default_layout, "", ""]
|
new = [r"\layout %s" % document.default_layout, "", ""]
|
||||||
|
|
||||||
k = i+1
|
k = i + 1
|
||||||
while True:
|
while True:
|
||||||
k2 = find_re(lines, ert_rexp, k, j)
|
k2 = find_re(lines, ert_rexp, k, j)
|
||||||
inset = hfill = specialchar = 0
|
inset = hfill = specialchar = 0
|
||||||
@ -336,13 +363,13 @@ def remove_oldert(document):
|
|||||||
elif check_token(lines[k2], "\\hfill"):
|
elif check_token(lines[k2], "\\hfill"):
|
||||||
hfill = 1
|
hfill = 1
|
||||||
del lines[k2]
|
del lines[k2]
|
||||||
j = j-1
|
j = j - 1
|
||||||
else:
|
else:
|
||||||
specialchar = 1
|
specialchar = 1
|
||||||
mo = spchar_rexp.match(lines[k2])
|
mo = spchar_rexp.match(lines[k2])
|
||||||
lines[k2] = mo.group(1)
|
lines[k2] = mo.group(1)
|
||||||
specialchar_str = mo.group(2)
|
specialchar_str = mo.group(2)
|
||||||
k2 = k2+1
|
k2 = k2 + 1
|
||||||
|
|
||||||
tmp = []
|
tmp = []
|
||||||
for line in lines[k:k2]:
|
for line in lines[k:k2]:
|
||||||
@ -361,19 +388,21 @@ def remove_oldert(document):
|
|||||||
if new == []:
|
if new == []:
|
||||||
# This is not necessary, but we want the output to be
|
# This is not necessary, but we want the output to be
|
||||||
# as similar as posible to the lyx format
|
# as similar as posible to the lyx format
|
||||||
lines[i-1] = lines[i-1]+" "
|
lines[i - 1] = lines[i - 1] + " "
|
||||||
else:
|
else:
|
||||||
new = new+[" "]
|
new = new + [" "]
|
||||||
else:
|
else:
|
||||||
new = new+ert_begin+tmp+["\\end_inset ", ""]
|
new = new + ert_begin + tmp + ["\\end_inset ", ""]
|
||||||
|
|
||||||
if inset:
|
if inset:
|
||||||
k3 = find_end_of_inset(lines, k2)
|
k3 = find_end_of_inset(lines, k2)
|
||||||
new = new+[""]+lines[k2:k3+1]+[""] # Put an empty line after \end_inset
|
new = (
|
||||||
k = k3+1
|
new + [""] + lines[k2 : k3 + 1] + [""]
|
||||||
|
) # Put an empty line after \end_inset
|
||||||
|
k = k3 + 1
|
||||||
# Skip the empty line after \end_inset
|
# Skip the empty line after \end_inset
|
||||||
if not is_nonempty_line(lines[k]):
|
if not is_nonempty_line(lines[k]):
|
||||||
k = k+1
|
k = k + 1
|
||||||
new.append("")
|
new.append("")
|
||||||
elif hfill:
|
elif hfill:
|
||||||
new = new + ["\\hfill", ""]
|
new = new + ["\\hfill", ""]
|
||||||
@ -382,19 +411,19 @@ def remove_oldert(document):
|
|||||||
if new == []:
|
if new == []:
|
||||||
# This is not necessary, but we want the output to be
|
# This is not necessary, but we want the output to be
|
||||||
# as similar as posible to the lyx format
|
# as similar as posible to the lyx format
|
||||||
lines[i-1] = lines[i-1]+specialchar_str
|
lines[i - 1] = lines[i - 1] + specialchar_str
|
||||||
new = [""]
|
new = [""]
|
||||||
else:
|
else:
|
||||||
new = new+[specialchar_str, ""]
|
new = new + [specialchar_str, ""]
|
||||||
k = k2
|
k = k2
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
new = new+new2
|
new = new + new2
|
||||||
if not check_token(lines[j], "\\latex "):
|
if not check_token(lines[j], "\\latex "):
|
||||||
new = new+[""]+[lines[j]]
|
new = new + [""] + [lines[j]]
|
||||||
lines[i:j+1] = new
|
lines[i : j + 1] = new
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
# Delete remaining "\latex xxx" tokens
|
# Delete remaining "\latex xxx" tokens
|
||||||
i = 0
|
i = 0
|
||||||
@ -406,7 +435,7 @@ def remove_oldert(document):
|
|||||||
|
|
||||||
|
|
||||||
def remove_oldertinset(document):
|
def remove_oldertinset(document):
|
||||||
" ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
"ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -414,34 +443,34 @@ def remove_oldertinset(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
j = find_end_of_inset(lines, i)
|
j = find_end_of_inset(lines, i)
|
||||||
k = find_token(lines, "\\layout", i+1)
|
k = find_token(lines, "\\layout", i + 1)
|
||||||
l = get_paragraph(lines, i, document.format + 1)
|
l = get_paragraph(lines, i, document.format + 1)
|
||||||
if lines[k] == lines[l]: # same layout
|
if lines[k] == lines[l]: # same layout
|
||||||
k = k+1
|
k = k + 1
|
||||||
new = lines[k:j]
|
new = lines[k:j]
|
||||||
lines[i:j+1] = new
|
lines[i : j + 1] = new
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def is_ert_paragraph(document, i):
|
def is_ert_paragraph(document, i):
|
||||||
" Is this a ert paragraph? "
|
"Is this a ert paragraph?"
|
||||||
lines = document.body
|
lines = document.body
|
||||||
if not check_token(lines[i], "\\layout"):
|
if not check_token(lines[i], "\\layout"):
|
||||||
return 0
|
return 0
|
||||||
if not document.is_default_layout(get_layout(lines[i], document.default_layout)):
|
if not document.is_default_layout(get_layout(lines[i], document.default_layout)):
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
i = find_nonempty_line(lines, i+1)
|
i = find_nonempty_line(lines, i + 1)
|
||||||
if not check_token(lines[i], "\\begin_inset ERT"):
|
if not check_token(lines[i], "\\begin_inset ERT"):
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
j = find_end_of_inset(lines, i)
|
j = find_end_of_inset(lines, i)
|
||||||
k = find_nonempty_line(lines, j+1)
|
k = find_nonempty_line(lines, j + 1)
|
||||||
return check_token(lines[k], "\\layout")
|
return check_token(lines[k], "\\layout")
|
||||||
|
|
||||||
|
|
||||||
def combine_ert(document):
|
def combine_ert(document):
|
||||||
" Combine ERT paragraphs."
|
"Combine ERT paragraphs."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -452,41 +481,41 @@ def combine_ert(document):
|
|||||||
count = 0
|
count = 0
|
||||||
text = []
|
text = []
|
||||||
while is_ert_paragraph(document, j):
|
while is_ert_paragraph(document, j):
|
||||||
|
count = count + 1
|
||||||
count = count+1
|
i2 = find_token(lines, "\\layout", j + 1)
|
||||||
i2 = find_token(lines, "\\layout", j+1)
|
k = find_token(lines, "\\end_inset", i2 + 1)
|
||||||
k = find_token(lines, "\\end_inset", i2+1)
|
text = text + lines[i2:k]
|
||||||
text = text+lines[i2:k]
|
j = find_token(lines, "\\layout", k + 1)
|
||||||
j = find_token(lines, "\\layout", k+1)
|
|
||||||
if j == -1:
|
if j == -1:
|
||||||
break
|
break
|
||||||
|
|
||||||
if count >= 2:
|
if count >= 2:
|
||||||
j = find_token(lines, "\\layout", i+1)
|
j = find_token(lines, "\\layout", i + 1)
|
||||||
lines[j:k] = text
|
lines[j:k] = text
|
||||||
|
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
oldunits = ["pt", "cm", "in", "text%", "col%"]
|
oldunits = ["pt", "cm", "in", "text%", "col%"]
|
||||||
|
|
||||||
|
|
||||||
def get_length(lines, name, start, end):
|
def get_length(lines, name, start, end):
|
||||||
" Get lenght."
|
"Get lenght."
|
||||||
i = find_token(lines, name, start, end)
|
i = find_token(lines, name, start, end)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return ""
|
return ""
|
||||||
x = lines[i].split()
|
x = lines[i].split()
|
||||||
return x[2]+oldunits[int(x[1])]
|
return x[2] + oldunits[int(x[1])]
|
||||||
|
|
||||||
|
|
||||||
def write_attribute(x, token, value):
|
def write_attribute(x, token, value):
|
||||||
" Write attribute."
|
"Write attribute."
|
||||||
if value != "":
|
if value != "":
|
||||||
x.append("\t"+token+" "+value)
|
x.append("\t" + token + " " + value)
|
||||||
|
|
||||||
|
|
||||||
def remove_figinset(document):
|
def remove_figinset(document):
|
||||||
" Remove figinset."
|
"Remove figinset."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -495,26 +524,26 @@ def remove_figinset(document):
|
|||||||
break
|
break
|
||||||
j = find_end_of_inset(lines, i)
|
j = find_end_of_inset(lines, i)
|
||||||
|
|
||||||
if ( len(lines[i].split()) > 2 ):
|
if len(lines[i].split()) > 2:
|
||||||
lyxwidth = lines[i].split()[3]+"pt"
|
lyxwidth = lines[i].split()[3] + "pt"
|
||||||
lyxheight = lines[i].split()[4]+"pt"
|
lyxheight = lines[i].split()[4] + "pt"
|
||||||
else:
|
else:
|
||||||
lyxwidth = ""
|
lyxwidth = ""
|
||||||
lyxheight = ""
|
lyxheight = ""
|
||||||
|
|
||||||
filename = get_value(lines, "file", i+1, j)
|
filename = get_value(lines, "file", i + 1, j)
|
||||||
|
|
||||||
width = get_length(lines, "width", i+1, j)
|
width = get_length(lines, "width", i + 1, j)
|
||||||
# what does width=5 mean ?
|
# what does width=5 mean ?
|
||||||
height = get_length(lines, "height", i+1, j)
|
height = get_length(lines, "height", i + 1, j)
|
||||||
rotateAngle = get_value(lines, "angle", i+1, j)
|
rotateAngle = get_value(lines, "angle", i + 1, j)
|
||||||
if width == "" and height == "":
|
if width == "" and height == "":
|
||||||
size_type = "0"
|
size_type = "0"
|
||||||
else:
|
else:
|
||||||
size_type = "1"
|
size_type = "1"
|
||||||
|
|
||||||
flags = get_value(lines, "flags", i+1, j)
|
flags = get_value(lines, "flags", i + 1, j)
|
||||||
x = int(flags)%4
|
x = int(flags) % 4
|
||||||
if x == 1:
|
if x == 1:
|
||||||
display = "monochrome"
|
display = "monochrome"
|
||||||
elif x == 2:
|
elif x == 2:
|
||||||
@ -523,13 +552,13 @@ def remove_figinset(document):
|
|||||||
display = "color"
|
display = "color"
|
||||||
|
|
||||||
subcaptionText = ""
|
subcaptionText = ""
|
||||||
subcaptionLine = find_token(lines, "subcaption", i+1, j)
|
subcaptionLine = find_token(lines, "subcaption", i + 1, j)
|
||||||
if subcaptionLine != -1:
|
if subcaptionLine != -1:
|
||||||
subcaptionText = lines[subcaptionLine][11:]
|
subcaptionText = lines[subcaptionLine][11:]
|
||||||
if subcaptionText != "":
|
if subcaptionText != "":
|
||||||
subcaptionText = '"'+subcaptionText+'"'
|
subcaptionText = '"' + subcaptionText + '"'
|
||||||
|
|
||||||
k = find_token(lines, "subfigure", i+1,j)
|
k = find_token(lines, "subfigure", i + 1, j)
|
||||||
if k == -1:
|
if k == -1:
|
||||||
subcaption = 0
|
subcaption = 0
|
||||||
else:
|
else:
|
||||||
@ -552,15 +581,16 @@ def remove_figinset(document):
|
|||||||
write_attribute(new, "lyxwidth", lyxwidth)
|
write_attribute(new, "lyxwidth", lyxwidth)
|
||||||
write_attribute(new, "lyxheight", lyxheight)
|
write_attribute(new, "lyxheight", lyxheight)
|
||||||
new = new + ["\\end_inset"]
|
new = new + ["\\end_inset"]
|
||||||
lines[i:j+1] = new
|
lines[i : j + 1] = new
|
||||||
|
|
||||||
|
|
||||||
attr_re = re.compile(r' \w*="(false|0|)"')
|
attr_re = re.compile(r' \w*="(false|0|)"')
|
||||||
line_re = re.compile(r'<(features|column|row|cell)')
|
line_re = re.compile(r"<(features|column|row|cell)")
|
||||||
|
|
||||||
|
|
||||||
def update_tabular(document):
|
def update_tabular(document):
|
||||||
" Convert tabular format 2 to 3."
|
"Convert tabular format 2 to 3."
|
||||||
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
regexp = re.compile(r"^\\begin_inset\s+Tabular")
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -577,7 +607,7 @@ def update_tabular(document):
|
|||||||
if line_re.match(lines[k]):
|
if line_re.match(lines[k]):
|
||||||
lines[k] = re.sub(attr_re, "", lines[k])
|
lines[k] = re.sub(attr_re, "", lines[k])
|
||||||
|
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
##
|
##
|
||||||
@ -597,17 +627,19 @@ def update_tabular(document):
|
|||||||
false = 0
|
false = 0
|
||||||
true = 1
|
true = 1
|
||||||
|
|
||||||
|
|
||||||
class row:
|
class row:
|
||||||
" Simple data structure to deal with long table info."
|
"Simple data structure to deal with long table info."
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.endhead = false # header row
|
self.endhead = false # header row
|
||||||
self.endfirsthead = false # first header row
|
self.endfirsthead = false # first header row
|
||||||
self.endfoot = false # footer row
|
self.endfoot = false # footer row
|
||||||
self.endlastfoot = false # last footer row
|
self.endlastfoot = false # last footer row
|
||||||
|
|
||||||
|
|
||||||
def haveLTFoot(row_info):
|
def haveLTFoot(row_info):
|
||||||
" Does row has LTFoot?"
|
"Does row has LTFoot?"
|
||||||
for row_ in row_info:
|
for row_ in row_info:
|
||||||
if row_.endfoot:
|
if row_.endfoot:
|
||||||
return true
|
return true
|
||||||
@ -615,11 +647,11 @@ def haveLTFoot(row_info):
|
|||||||
|
|
||||||
|
|
||||||
def setHeaderFooterRows(hr, fhr, fr, lfr, rows_, row_info):
|
def setHeaderFooterRows(hr, fhr, fr, lfr, rows_, row_info):
|
||||||
" Set Header/Footer rows."
|
"Set Header/Footer rows."
|
||||||
endfirsthead_empty = false
|
endfirsthead_empty = false
|
||||||
endlastfoot_empty = false
|
endlastfoot_empty = false
|
||||||
# set header info
|
# set header info
|
||||||
while (hr > 0):
|
while hr > 0:
|
||||||
hr = hr - 1
|
hr = hr - 1
|
||||||
row_info[hr].endhead = true
|
row_info[hr].endhead = true
|
||||||
|
|
||||||
@ -671,8 +703,17 @@ def setHeaderFooterRows(hr, fhr, fr, lfr, rows_, row_info):
|
|||||||
lfr = lfr - 1
|
lfr = lfr - 1
|
||||||
row_info[lfr].endlastfoot = true
|
row_info[lfr].endlastfoot = true
|
||||||
row_info[lfr].endfoot = false
|
row_info[lfr].endfoot = false
|
||||||
elif not row_info[fr - 1].endhead and not row_info[fr - 1].endfirsthead and not row_info[fr - 1].endfoot:
|
elif (
|
||||||
while lfr > 0 and not row_info[lfr - 1].endhead and not row_info[lfr - 1].endfirsthead and not row_info[lfr - 1].endfoot:
|
not row_info[fr - 1].endhead
|
||||||
|
and not row_info[fr - 1].endfirsthead
|
||||||
|
and not row_info[fr - 1].endfoot
|
||||||
|
):
|
||||||
|
while (
|
||||||
|
lfr > 0
|
||||||
|
and not row_info[lfr - 1].endhead
|
||||||
|
and not row_info[lfr - 1].endfirsthead
|
||||||
|
and not row_info[lfr - 1].endfoot
|
||||||
|
):
|
||||||
lfr = lfr - 1
|
lfr = lfr - 1
|
||||||
row_info[lfr].endlastfoot = true
|
row_info[lfr].endlastfoot = true
|
||||||
elif haveLTFoot(row_info):
|
elif haveLTFoot(row_info):
|
||||||
@ -682,18 +723,24 @@ def setHeaderFooterRows(hr, fhr, fr, lfr, rows_, row_info):
|
|||||||
|
|
||||||
|
|
||||||
def insert_attribute(lines, i, attribute):
|
def insert_attribute(lines, i, attribute):
|
||||||
" Insert attribute in lines[i]."
|
"Insert attribute in lines[i]."
|
||||||
last = lines[i].find('>')
|
last = lines[i].find(">")
|
||||||
lines[i] = lines[i][:last] + ' ' + attribute + lines[i][last:]
|
lines[i] = lines[i][:last] + " " + attribute + lines[i][last:]
|
||||||
|
|
||||||
|
|
||||||
rows_re = re.compile(r'rows="(\d*)"')
|
rows_re = re.compile(r'rows="(\d*)"')
|
||||||
longtable_re = re.compile(r'islongtable="(\w)"')
|
longtable_re = re.compile(r'islongtable="(\w)"')
|
||||||
ltvalues_re = re.compile(r'endhead="(-?\d*)" endfirsthead="(-?\d*)" endfoot="(-?\d*)" endlastfoot="(-?\d*)"')
|
ltvalues_re = re.compile(
|
||||||
lt_features_re = re.compile(r'(endhead="-?\d*" endfirsthead="-?\d*" endfoot="-?\d*" endlastfoot="-?\d*")')
|
r'endhead="(-?\d*)" endfirsthead="(-?\d*)" endfoot="(-?\d*)" endlastfoot="(-?\d*)"'
|
||||||
|
)
|
||||||
|
lt_features_re = re.compile(
|
||||||
|
r'(endhead="-?\d*" endfirsthead="-?\d*" endfoot="-?\d*" endlastfoot="-?\d*")'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def update_longtables(document):
|
def update_longtables(document):
|
||||||
" Update longtables to new format."
|
"Update longtables to new format."
|
||||||
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
regexp = re.compile(r"^\\begin_inset\s+Tabular")
|
||||||
body = document.body
|
body = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -709,7 +756,7 @@ def update_longtables(document):
|
|||||||
rows = int(rows_re.search(body[i]).group(1))
|
rows = int(rows_re.search(body[i]).group(1))
|
||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
i = find_token(body, '<features', i)
|
i = find_token(body, "<features", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -732,7 +779,9 @@ def update_longtables(document):
|
|||||||
if not res:
|
if not res:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
endfirsthead_empty, endlastfoot_empty = setHeaderFooterRows(res.group(1), res.group(2), res.group(3), res.group(4), rows, row_info)
|
endfirsthead_empty, endlastfoot_empty = setHeaderFooterRows(
|
||||||
|
res.group(1), res.group(2), res.group(3), res.group(4), rows, row_info
|
||||||
|
)
|
||||||
|
|
||||||
if endfirsthead_empty:
|
if endfirsthead_empty:
|
||||||
insert_attribute(body, i, 'firstHeadEmpty="true"')
|
insert_attribute(body, i, 'firstHeadEmpty="true"')
|
||||||
@ -742,10 +791,10 @@ def update_longtables(document):
|
|||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
for j in range(rows):
|
for j in range(rows):
|
||||||
i = find_token(body, '<row', i)
|
i = find_token(body, "<row", i)
|
||||||
|
|
||||||
row_info[i].endfoot = false # footer row
|
row_info[i].endfoot = false # footer row
|
||||||
row_info[i].endlastfoot = false # last footer row
|
row_info[i].endlastfoot = false # last footer row
|
||||||
if row_info[j].endhead:
|
if row_info[j].endhead:
|
||||||
insert_attribute(body, i, 'endhead="true"')
|
insert_attribute(body, i, 'endhead="true"')
|
||||||
|
|
||||||
@ -762,7 +811,7 @@ def update_longtables(document):
|
|||||||
|
|
||||||
|
|
||||||
def fix_oldfloatinset(document):
|
def fix_oldfloatinset(document):
|
||||||
" Figure insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
"Figure insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -772,11 +821,11 @@ def fix_oldfloatinset(document):
|
|||||||
j = find_token(lines, "collapsed", i)
|
j = find_token(lines, "collapsed", i)
|
||||||
if j != -1:
|
if j != -1:
|
||||||
lines[j:j] = ["wide false"]
|
lines[j:j] = ["wide false"]
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def change_listof(document):
|
def change_listof(document):
|
||||||
" Change listof insets."
|
"Change listof insets."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -784,12 +833,12 @@ def change_listof(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
type = re.search(r"listof(\w*)", lines[i]).group(1)[:-1]
|
type = re.search(r"listof(\w*)", lines[i]).group(1)[:-1]
|
||||||
lines[i] = "\\begin_inset FloatList "+type
|
lines[i] = "\\begin_inset FloatList " + type
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def change_infoinset(document):
|
def change_infoinset(document):
|
||||||
" Change info inset."
|
"Change info inset."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -802,36 +851,50 @@ def change_infoinset(document):
|
|||||||
if j == -1:
|
if j == -1:
|
||||||
break
|
break
|
||||||
|
|
||||||
note_lines = lines[i+1:j]
|
note_lines = lines[i + 1 : j]
|
||||||
if len(txt) > 0:
|
if len(txt) > 0:
|
||||||
note_lines = [txt]+note_lines
|
note_lines = [txt] + note_lines
|
||||||
|
|
||||||
for line in note_lines:
|
for line in note_lines:
|
||||||
new = new + [r'\layout %s' % document.default_layout, ""]
|
new = new + [r"\layout %s" % document.default_layout, ""]
|
||||||
tmp = line.split('\\')
|
tmp = line.split("\\")
|
||||||
new = new + [tmp[0]]
|
new = new + [tmp[0]]
|
||||||
for x in tmp[1:]:
|
for x in tmp[1:]:
|
||||||
new = new + ["\\backslash ", x]
|
new = new + ["\\backslash ", x]
|
||||||
lines[i:j] = new
|
lines[i:j] = new
|
||||||
i = i+5
|
i = i + 5
|
||||||
|
|
||||||
|
|
||||||
def change_header(document):
|
def change_header(document):
|
||||||
" Update header."
|
"Update header."
|
||||||
lines = document.header
|
lines = document.header
|
||||||
i = find_token(lines, "\\use_amsmath", 0)
|
i = find_token(lines, "\\use_amsmath", 0)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
lines[i+1:i+1] = ["\\use_natbib 0",
|
lines[i + 1 : i + 1] = ["\\use_natbib 0", "\\use_numerical_citations 0"]
|
||||||
"\\use_numerical_citations 0"]
|
|
||||||
|
|
||||||
|
|
||||||
supported_versions = ["1.2.%d" % i for i in range(5)] + ["1.2"]
|
supported_versions = ["1.2.%d" % i for i in range(5)] + ["1.2"]
|
||||||
convert = [[220, [change_header, change_listof, fix_oldfloatinset,
|
convert = [
|
||||||
update_tabular, update_longtables, remove_pextra,
|
[
|
||||||
remove_oldfloat, remove_figinset, remove_oldertinset,
|
220,
|
||||||
remove_oldert, combine_ert, change_infoinset]]]
|
[
|
||||||
revert = []
|
change_header,
|
||||||
|
change_listof,
|
||||||
|
fix_oldfloatinset,
|
||||||
|
update_tabular,
|
||||||
|
update_longtables,
|
||||||
|
remove_pextra,
|
||||||
|
remove_oldfloat,
|
||||||
|
remove_figinset,
|
||||||
|
remove_oldertinset,
|
||||||
|
remove_oldert,
|
||||||
|
combine_ert,
|
||||||
|
change_infoinset,
|
||||||
|
],
|
||||||
|
]
|
||||||
|
]
|
||||||
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -16,22 +16,22 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 1.3"""
|
"""Convert files to the file format generated by lyx 1.3"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from parser_tools import find_token, find_end_of, get_value,\
|
from parser_tools import find_token, find_end_of, get_value, find_token_exact
|
||||||
find_token_exact
|
|
||||||
|
|
||||||
####################################################################
|
####################################################################
|
||||||
# Private helper functions
|
# Private helper functions
|
||||||
|
|
||||||
|
|
||||||
def find_end_of_inset(lines, i):
|
def find_end_of_inset(lines, i):
|
||||||
r"Finds the matching \end_inset"
|
r"Finds the matching \end_inset"
|
||||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||||
|
|
||||||
|
|
||||||
def del_token(lines, token, start, end):
|
def del_token(lines, token, start, end):
|
||||||
""" del_token(lines, token, start, end) -> int
|
"""del_token(lines, token, start, end) -> int
|
||||||
|
|
||||||
Find the lower line in lines where token is the first element and
|
Find the lower line in lines where token is the first element and
|
||||||
delete that line.
|
delete that line.
|
||||||
@ -45,12 +45,13 @@ def del_token(lines, token, start, end):
|
|||||||
del lines[k]
|
del lines[k]
|
||||||
return end - 1
|
return end - 1
|
||||||
|
|
||||||
|
|
||||||
# End of helper functions
|
# End of helper functions
|
||||||
####################################################################
|
####################################################################
|
||||||
|
|
||||||
|
|
||||||
def change_insetgraphics(document):
|
def change_insetgraphics(document):
|
||||||
" Change inset Graphics."
|
"Change inset Graphics."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -69,7 +70,7 @@ def change_insetgraphics(document):
|
|||||||
k = find_token_exact(lines, "rotate", i, j)
|
k = find_token_exact(lines, "rotate", i, j)
|
||||||
if k != -1:
|
if k != -1:
|
||||||
del lines[k]
|
del lines[k]
|
||||||
j = j-1
|
j = j - 1
|
||||||
else:
|
else:
|
||||||
j = del_token(lines, "rotateAngle", i, j)
|
j = del_token(lines, "rotateAngle", i, j)
|
||||||
|
|
||||||
@ -79,7 +80,7 @@ def change_insetgraphics(document):
|
|||||||
if k != -1:
|
if k != -1:
|
||||||
size_type = lines[k].split()[1]
|
size_type = lines[k].split()[1]
|
||||||
del lines[k]
|
del lines[k]
|
||||||
j = j-1
|
j = j - 1
|
||||||
if size_type in ["0", "original"]:
|
if size_type in ["0", "original"]:
|
||||||
j = del_token(lines, "width", i, j)
|
j = del_token(lines, "width", i, j)
|
||||||
j = del_token(lines, "height", i, j)
|
j = del_token(lines, "height", i, j)
|
||||||
@ -98,18 +99,20 @@ def change_insetgraphics(document):
|
|||||||
if k != -1:
|
if k != -1:
|
||||||
lyxsize_type = lines[k].split()[1]
|
lyxsize_type = lines[k].split()[1]
|
||||||
del lines[k]
|
del lines[k]
|
||||||
j = j-1
|
j = j - 1
|
||||||
j = del_token(lines, "lyxwidth", i, j)
|
j = del_token(lines, "lyxwidth", i, j)
|
||||||
j = del_token(lines, "lyxheight", i, j)
|
j = del_token(lines, "lyxheight", i, j)
|
||||||
if lyxsize_type not in ["2", "scale"] or \
|
if (
|
||||||
get_value(lines, "lyxscale", i, j) == "100":
|
lyxsize_type not in ["2", "scale"]
|
||||||
|
or get_value(lines, "lyxscale", i, j) == "100"
|
||||||
|
):
|
||||||
j = del_token(lines, "lyxscale", i, j)
|
j = del_token(lines, "lyxscale", i, j)
|
||||||
|
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
def change_tabular(document):
|
def change_tabular(document):
|
||||||
" Change tabular."
|
"Change tabular."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
@ -117,13 +120,13 @@ def change_tabular(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
if not re.search('width="0pt"', lines[i]):
|
if not re.search('width="0pt"', lines[i]):
|
||||||
lines[i] = re.sub(' alignment=".*?"',' alignment="block"',lines[i])
|
lines[i] = re.sub(' alignment=".*?"', ' alignment="block"', lines[i])
|
||||||
i = i+1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
supported_versions = ["1.3.%d" % i for i in range(8)] + ["1.3"]
|
supported_versions = ["1.3.%d" % i for i in range(8)] + ["1.3"]
|
||||||
convert = [[221, [change_insetgraphics, change_tabular]]]
|
convert = [[221, [change_insetgraphics, change_tabular]]]
|
||||||
revert = []
|
revert = []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -15,18 +15,13 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||||
|
|
||||||
""" Convert files to the file format generated by lyx 2.5"""
|
"""Convert files to the file format generated by lyx 2.5"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
# Uncomment only what you need to import, please.
|
# Uncomment only what you need to import, please.
|
||||||
|
|
||||||
from parser_tools import (
|
from parser_tools import find_end_of_inset, find_token, find_re, get_value
|
||||||
find_end_of_inset,
|
|
||||||
find_token,
|
|
||||||
find_re,
|
|
||||||
get_value
|
|
||||||
)
|
|
||||||
# count_pars_in_inset, del_complete_lines, del_token, find_end_of,
|
# count_pars_in_inset, del_complete_lines, del_token, find_end_of,
|
||||||
# find_end_of_layout,
|
# find_end_of_layout,
|
||||||
# find_token_backwards, find_token_exact, get_bool_value,
|
# find_token_backwards, find_token_exact, get_bool_value,
|
||||||
@ -38,10 +33,7 @@ from parser_tools import (
|
|||||||
# set_bool_value
|
# set_bool_value
|
||||||
# find_tokens, check_token
|
# find_tokens, check_token
|
||||||
|
|
||||||
from lyx2lyx_tools import (
|
from lyx2lyx_tools import add_to_preamble, latex_length
|
||||||
add_to_preamble,
|
|
||||||
latex_length
|
|
||||||
)
|
|
||||||
# put_cmd_in_ert, insert_to_preamble, lyx2latex,
|
# put_cmd_in_ert, insert_to_preamble, lyx2latex,
|
||||||
# revert_language, revert_flex_inset, str2bool,
|
# revert_language, revert_flex_inset, str2bool,
|
||||||
# revert_font_attrs,
|
# revert_font_attrs,
|
||||||
@ -52,23 +44,28 @@ from lyx2lyx_tools import (
|
|||||||
# Private helper functions
|
# Private helper functions
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
###
|
###
|
||||||
### Conversion and reversion routines
|
### Conversion and reversion routines
|
||||||
###
|
###
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
|
|
||||||
def convert_url_escapes(document):
|
def convert_url_escapes(document):
|
||||||
"""Unescape # and % in URLs with hyperref."""
|
"""Unescape # and % in URLs with hyperref."""
|
||||||
|
|
||||||
hyperref = find_token(document.header, "\\use_hyperref true", 0) != -1
|
hyperref = find_token(document.header, "\\use_hyperref true", 0) != -1
|
||||||
beamer = document.textclass in ['beamer', 'scrarticle-beamer', 'beamerposter', 'article-beamer']
|
beamer = document.textclass in [
|
||||||
|
"beamer",
|
||||||
|
"scrarticle-beamer",
|
||||||
|
"beamerposter",
|
||||||
|
"article-beamer",
|
||||||
|
]
|
||||||
|
|
||||||
if not hyperref and not beamer:
|
if not hyperref and not beamer:
|
||||||
return
|
return
|
||||||
|
|
||||||
rurl = re.compile(r'^[%#].*')
|
rurl = re.compile(r"^[%#].*")
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(document.body, "\\begin_inset Flex URL", i)
|
i = find_token(document.body, "\\begin_inset Flex URL", i)
|
||||||
@ -93,12 +90,17 @@ def revert_url_escapes(document):
|
|||||||
"""Unescape # and % in URLs with hyperref."""
|
"""Unescape # and % in URLs with hyperref."""
|
||||||
|
|
||||||
hyperref = find_token(document.header, "\\use_hyperref true", 0) != -1
|
hyperref = find_token(document.header, "\\use_hyperref true", 0) != -1
|
||||||
beamer = document.textclass in ['beamer', 'scrarticle-beamer', 'beamerposter', 'article-beamer']
|
beamer = document.textclass in [
|
||||||
|
"beamer",
|
||||||
|
"scrarticle-beamer",
|
||||||
|
"beamerposter",
|
||||||
|
"article-beamer",
|
||||||
|
]
|
||||||
|
|
||||||
if not hyperref and not beamer:
|
if not hyperref and not beamer:
|
||||||
return
|
return
|
||||||
|
|
||||||
rurl = re.compile(r'^(.*)([%#].*)')
|
rurl = re.compile(r"^(.*)([%#].*)")
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
i = find_token(document.body, "\\begin_inset Flex URL", i)
|
i = find_token(document.body, "\\begin_inset Flex URL", i)
|
||||||
@ -121,12 +123,18 @@ def revert_url_escapes(document):
|
|||||||
document.body[surl : surl + 1] = [m.group(1), "\\backslash", m.group(2)]
|
document.body[surl : surl + 1] = [m.group(1), "\\backslash", m.group(2)]
|
||||||
i = surl
|
i = surl
|
||||||
|
|
||||||
|
|
||||||
def convert_url_escapes2(document):
|
def convert_url_escapes2(document):
|
||||||
"""Unescape backslashes in URLs with hyperref."""
|
"""Unescape backslashes in URLs with hyperref."""
|
||||||
|
|
||||||
i = find_token(document.header, "\\use_hyperref true", 0)
|
i = find_token(document.header, "\\use_hyperref true", 0)
|
||||||
|
|
||||||
if i == -1 and document.textclass not in ['beamer', 'scrarticle-beamer', 'beamerposter', 'article-beamer']:
|
if i == -1 and document.textclass not in [
|
||||||
|
"beamer",
|
||||||
|
"scrarticle-beamer",
|
||||||
|
"beamerposter",
|
||||||
|
"article-beamer",
|
||||||
|
]:
|
||||||
return
|
return
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
@ -147,12 +155,18 @@ def convert_url_escapes2(document):
|
|||||||
del document.body[bs + 2]
|
del document.body[bs + 2]
|
||||||
i = bs + 1
|
i = bs + 1
|
||||||
|
|
||||||
|
|
||||||
def revert_url_escapes2(document):
|
def revert_url_escapes2(document):
|
||||||
"""Escape backslashes in URLs with hyperref."""
|
"""Escape backslashes in URLs with hyperref."""
|
||||||
|
|
||||||
i = find_token(document.header, "\\use_hyperref true", 0)
|
i = find_token(document.header, "\\use_hyperref true", 0)
|
||||||
|
|
||||||
if i == -1 and document.textclass not in ['beamer', 'scrarticle-beamer', 'beamerposter', 'article-beamer']:
|
if i == -1 and document.textclass not in [
|
||||||
|
"beamer",
|
||||||
|
"scrarticle-beamer",
|
||||||
|
"beamerposter",
|
||||||
|
"article-beamer",
|
||||||
|
]:
|
||||||
return
|
return
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
@ -196,6 +210,7 @@ def revert_glue_parskip(document):
|
|||||||
document.header[i] = "\\paragraph_separation indent"
|
document.header[i] = "\\paragraph_separation indent"
|
||||||
document.header[j] = "\\paragraph_indentation default"
|
document.header[j] = "\\paragraph_indentation default"
|
||||||
|
|
||||||
|
|
||||||
def convert_he_letter(document):
|
def convert_he_letter(document):
|
||||||
"""Convert hebrew letter to letter document class"""
|
"""Convert hebrew letter to letter document class"""
|
||||||
|
|
||||||
@ -209,16 +224,17 @@ def convert_he_letter(document):
|
|||||||
|
|
||||||
supported_versions = ["2.5.0", "2.5"]
|
supported_versions = ["2.5.0", "2.5"]
|
||||||
convert = [
|
convert = [
|
||||||
[621, [convert_url_escapes, convert_url_escapes2]],
|
[621, [convert_url_escapes, convert_url_escapes2]],
|
||||||
[622, []],
|
[622, []],
|
||||||
[623, [convert_he_letter]]
|
[623, [convert_he_letter]],
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
revert = [[622, []],
|
revert = [
|
||||||
[621, [revert_glue_parskip]],
|
[622, []],
|
||||||
[620, [revert_url_escapes2, revert_url_escapes]]
|
[621, [revert_glue_parskip]],
|
||||||
]
|
[620, [revert_url_escapes2, revert_url_escapes]],
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -166,9 +166,10 @@ count_pars_in_inset(lines, i):
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
# Utilities for one line
|
# Utilities for one line
|
||||||
def check_token(line, token):
|
def check_token(line, token):
|
||||||
""" check_token(line, token) -> bool
|
"""check_token(line, token) -> bool
|
||||||
|
|
||||||
Return True if token is present in line and is the first element
|
Return True if token is present in line and is the first element
|
||||||
else returns False.
|
else returns False.
|
||||||
@ -179,7 +180,7 @@ def check_token(line, token):
|
|||||||
|
|
||||||
|
|
||||||
def is_nonempty_line(line):
|
def is_nonempty_line(line):
|
||||||
""" is_nonempty_line(line) -> bool
|
"""is_nonempty_line(line) -> bool
|
||||||
|
|
||||||
Return False if line is either empty or it has only whitespaces,
|
Return False if line is either empty or it has only whitespaces,
|
||||||
else return True."""
|
else return True."""
|
||||||
@ -188,7 +189,7 @@ def is_nonempty_line(line):
|
|||||||
|
|
||||||
# Utilities for a list of lines
|
# Utilities for a list of lines
|
||||||
def find_token(lines, token, start=0, end=0, ignorews=False):
|
def find_token(lines, token, start=0, end=0, ignorews=False):
|
||||||
""" find_token(lines, token, start[[, end], ignorews]) -> int
|
"""find_token(lines, token, start[[, end], ignorews]) -> int
|
||||||
|
|
||||||
Return the lowest line where token is found, and is the first
|
Return the lowest line where token is found, and is the first
|
||||||
element, in lines[start, end].
|
element, in lines[start, end].
|
||||||
@ -210,7 +211,7 @@ def find_token(lines, token, start=0, end=0, ignorews=False):
|
|||||||
x = lines[i].split()
|
x = lines[i].split()
|
||||||
if len(x) < len(y):
|
if len(x) < len(y):
|
||||||
continue
|
continue
|
||||||
if x[:len(y)] == y:
|
if x[: len(y)] == y:
|
||||||
return i
|
return i
|
||||||
else:
|
else:
|
||||||
if lines[i].startswith(token):
|
if lines[i].startswith(token):
|
||||||
@ -223,7 +224,7 @@ def find_token_exact(lines, token, start=0, end=0):
|
|||||||
|
|
||||||
|
|
||||||
def find_tokens(lines, tokens, start=0, end=0, ignorews=False):
|
def find_tokens(lines, tokens, start=0, end=0, ignorews=False):
|
||||||
""" find_tokens(lines, tokens, start[[, end], ignorews]) -> int
|
"""find_tokens(lines, tokens, start[[, end], ignorews]) -> int
|
||||||
|
|
||||||
Return the lowest line where one token in tokens is found, and is
|
Return the lowest line where one token in tokens is found, and is
|
||||||
the first element, in lines[start, end].
|
the first element, in lines[start, end].
|
||||||
@ -240,7 +241,7 @@ def find_tokens(lines, tokens, start=0, end=0, ignorews=False):
|
|||||||
y = token.split()
|
y = token.split()
|
||||||
if len(x) < len(y):
|
if len(x) < len(y):
|
||||||
continue
|
continue
|
||||||
if x[:len(y)] == y:
|
if x[: len(y)] == y:
|
||||||
return i
|
return i
|
||||||
else:
|
else:
|
||||||
if lines[i].startswith(token):
|
if lines[i].startswith(token):
|
||||||
@ -253,7 +254,7 @@ def find_tokens_exact(lines, tokens, start=0, end=0):
|
|||||||
|
|
||||||
|
|
||||||
def find_substring(lines, sub, start=0, end=0):
|
def find_substring(lines, sub, start=0, end=0):
|
||||||
""" find_substring(lines, sub[, start[, end]]) -> int
|
"""find_substring(lines, sub[, start[, end]]) -> int
|
||||||
|
|
||||||
Return the lowest line number `i` in [start, end] where
|
Return the lowest line number `i` in [start, end] where
|
||||||
`sub` is a substring of line[i].
|
`sub` is a substring of line[i].
|
||||||
@ -264,12 +265,12 @@ def find_substring(lines, sub, start=0, end=0):
|
|||||||
end = len(lines)
|
end = len(lines)
|
||||||
for i in range(start, end):
|
for i in range(start, end):
|
||||||
if sub in lines[i]:
|
if sub in lines[i]:
|
||||||
return i
|
return i
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
|
|
||||||
def find_re(lines, rexp, start=0, end=0):
|
def find_re(lines, rexp, start=0, end=0):
|
||||||
""" find_re(lines, rexp[, start[, end]]) -> int
|
"""find_re(lines, rexp[, start[, end]]) -> int
|
||||||
|
|
||||||
Return the lowest line number `i` in [start, end] where the regular
|
Return the lowest line number `i` in [start, end] where the regular
|
||||||
expression object `rexp` matches at the beginning of line[i].
|
expression object `rexp` matches at the beginning of line[i].
|
||||||
@ -282,12 +283,12 @@ def find_re(lines, rexp, start=0, end=0):
|
|||||||
end = len(lines)
|
end = len(lines)
|
||||||
for i in range(start, end):
|
for i in range(start, end):
|
||||||
if rexp.match(lines[i]):
|
if rexp.match(lines[i]):
|
||||||
return i
|
return i
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
|
|
||||||
def find_token_backwards(lines, token, start):
|
def find_token_backwards(lines, token, start):
|
||||||
""" find_token_backwards(lines, token, start) -> int
|
"""find_token_backwards(lines, token, start) -> int
|
||||||
|
|
||||||
Return the highest line where token is found, and is the first
|
Return the highest line where token is found, and is the first
|
||||||
element, in lines[start, end].
|
element, in lines[start, end].
|
||||||
@ -300,7 +301,7 @@ def find_token_backwards(lines, token, start):
|
|||||||
|
|
||||||
|
|
||||||
def find_tokens_backwards(lines, tokens, start):
|
def find_tokens_backwards(lines, tokens, start):
|
||||||
""" find_tokens_backwards(lines, token, start) -> int
|
"""find_tokens_backwards(lines, token, start) -> int
|
||||||
|
|
||||||
Return the highest line where token is found, and is the first
|
Return the highest line where token is found, and is the first
|
||||||
element, in lines[end, start].
|
element, in lines[end, start].
|
||||||
@ -325,9 +326,9 @@ def find_complete_lines(lines, sublines, start=0, end=0):
|
|||||||
|
|
||||||
The `start` and `end` arguments work similar to list.index()
|
The `start` and `end` arguments work similar to list.index()
|
||||||
|
|
||||||
>>> find_complete_lines([1, 2, 3, 1, 1 ,2], [1, 2], start=1)
|
>>> find_complete_lines([1, 2, 3, 1, 1, 2], [1, 2], start=1)
|
||||||
4
|
4
|
||||||
>>> find_complete_lines([1, 2, 3, 1, 1 ,2], [1, 2], start=1, end=4)
|
>>> find_complete_lines([1, 2, 3, 1, 1, 2], [1, 2], start=1, end=4)
|
||||||
-1
|
-1
|
||||||
|
|
||||||
The return value can be used to substitute the sub-list.
|
The return value can be used to substitute the sub-list.
|
||||||
@ -336,7 +337,8 @@ def find_complete_lines(lines, sublines, start=0, end=0):
|
|||||||
>>> l = [1, 1, 2]
|
>>> l = [1, 1, 2]
|
||||||
>>> s = find_complete_lines(l, [1, 2])
|
>>> s = find_complete_lines(l, [1, 2])
|
||||||
>>> if s != -1:
|
>>> if s != -1:
|
||||||
... l[s:s+2] = [3]; l
|
... l[s : s + 2] = [3]
|
||||||
|
... l
|
||||||
[1, 3]
|
[1, 3]
|
||||||
|
|
||||||
See also del_complete_lines().
|
See also del_complete_lines().
|
||||||
@ -350,12 +352,12 @@ def find_complete_lines(lines, sublines, start=0, end=0):
|
|||||||
for j, value in enumerate(sublines):
|
for j, value in enumerate(sublines):
|
||||||
i = lines.index(value, start, end)
|
i = lines.index(value, start, end)
|
||||||
if j and i != start:
|
if j and i != start:
|
||||||
start = i-j
|
start = i - j
|
||||||
break
|
break
|
||||||
start = i + 1
|
start = i + 1
|
||||||
else:
|
else:
|
||||||
return i +1 - N
|
return i + 1 - N
|
||||||
except ValueError: # `sublines` not found
|
except ValueError: # `sublines` not found
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
|
|
||||||
@ -363,13 +365,14 @@ def find_across_lines(lines, sub, start=0, end=0):
|
|||||||
sublines = sub.splitlines()
|
sublines = sub.splitlines()
|
||||||
if len(sublines) > 2:
|
if len(sublines) > 2:
|
||||||
# at least 3 lines: the middle one(s) are complete -> use index search
|
# at least 3 lines: the middle one(s) are complete -> use index search
|
||||||
i = find_complete_lines(lines, sublines[1:-1], start+1, end-1)
|
i = find_complete_lines(lines, sublines[1:-1], start + 1, end - 1)
|
||||||
if i < start+1:
|
if i < start + 1:
|
||||||
return -1
|
return -1
|
||||||
try:
|
try:
|
||||||
if (lines[i-1].endswith(sublines[0]) and
|
if lines[i - 1].endswith(sublines[0]) and lines[i + len(sublines)].startswith(
|
||||||
lines[i+len(sublines)].startswith(sublines[-1])):
|
sublines[-1]
|
||||||
return i-1
|
):
|
||||||
|
return i - 1
|
||||||
except IndexError:
|
except IndexError:
|
||||||
pass
|
pass
|
||||||
elif len(sublines) > 1:
|
elif len(sublines) > 1:
|
||||||
@ -377,9 +380,9 @@ def find_across_lines(lines, sub, start=0, end=0):
|
|||||||
i = find_token(lines, sublines[-1], start, end)
|
i = find_token(lines, sublines[-1], start, end)
|
||||||
if i < start + 1:
|
if i < start + 1:
|
||||||
return -1
|
return -1
|
||||||
if lines[i-1].endswith(sublines[0]):
|
if lines[i - 1].endswith(sublines[0]):
|
||||||
return i-1
|
return i - 1
|
||||||
else: # no line-break, may be in the middle of a line
|
else: # no line-break, may be in the middle of a line
|
||||||
if end == 0 or end > len(lines):
|
if end == 0 or end > len(lines):
|
||||||
end = len(lines)
|
end = len(lines)
|
||||||
for i in range(start, end):
|
for i in range(start, end):
|
||||||
@ -407,14 +410,14 @@ def get_value(lines, token, start=0, end=0, default="", delete=False):
|
|||||||
# see test_parser_tools.py
|
# see test_parser_tools.py
|
||||||
l = lines[i].split(None, 1)
|
l = lines[i].split(None, 1)
|
||||||
if delete:
|
if delete:
|
||||||
del(lines[i])
|
del lines[i]
|
||||||
if len(l) > 1:
|
if len(l) > 1:
|
||||||
return l[1].strip()
|
return l[1].strip()
|
||||||
return default
|
return default
|
||||||
|
|
||||||
|
|
||||||
def get_quoted_value(lines, token, start=0, end=0, default="", delete=False):
|
def get_quoted_value(lines, token, start=0, end=0, default="", delete=False):
|
||||||
""" get_quoted_value(lines, token, start[[, end], default]) -> string
|
"""get_quoted_value(lines, token, start[[, end], default]) -> string
|
||||||
|
|
||||||
Find the next line that looks like:
|
Find the next line that looks like:
|
||||||
token "followed by other stuff"
|
token "followed by other stuff"
|
||||||
@ -426,15 +429,15 @@ def get_quoted_value(lines, token, start=0, end=0, default="", delete=False):
|
|||||||
"""
|
"""
|
||||||
val = get_value(lines, token, start, end, "", delete)
|
val = get_value(lines, token, start, end, "", delete)
|
||||||
if not val:
|
if not val:
|
||||||
return default
|
return default
|
||||||
return val.strip('"')
|
return val.strip('"')
|
||||||
|
|
||||||
|
|
||||||
bool_values = {"true": True, "1": True,
|
bool_values = {"true": True, "1": True, "false": False, "0": False}
|
||||||
"false": False, "0": False}
|
|
||||||
|
|
||||||
def get_bool_value(lines, token, start=0, end=0, default=None, delete=False):
|
def get_bool_value(lines, token, start=0, end=0, default=None, delete=False):
|
||||||
""" get_bool_value(lines, token, start[[, end], default]) -> string
|
"""get_bool_value(lines, token, start[[, end], default]) -> string
|
||||||
|
|
||||||
Find the next line that looks like:
|
Find the next line that looks like:
|
||||||
`token` <bool_value>
|
`token` <bool_value>
|
||||||
@ -456,11 +459,11 @@ def set_bool_value(lines, token, value, start=0, end=0):
|
|||||||
i = find_token(lines, token, start, end)
|
i = find_token(lines, token, start, end)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
oldvalue = get_bool_value(lines, token, i, i+1)
|
oldvalue = get_bool_value(lines, token, i, i + 1)
|
||||||
if oldvalue is value:
|
if oldvalue is value:
|
||||||
return oldvalue
|
return oldvalue
|
||||||
# set to new value
|
# set to new value
|
||||||
if get_quoted_value(lines, token, i, i+1) in ('0', '1'):
|
if get_quoted_value(lines, token, i, i + 1) in ("0", "1"):
|
||||||
lines[i] = "%s %d" % (token, value)
|
lines[i] = "%s %d" % (token, value)
|
||||||
else:
|
else:
|
||||||
lines[i] = f"{token} {str(value).lower()}"
|
lines[i] = f"{token} {str(value).lower()}"
|
||||||
@ -473,21 +476,21 @@ def get_option_value(line, option):
|
|||||||
rx = re.compile(rx)
|
rx = re.compile(rx)
|
||||||
m = rx.search(line)
|
m = rx.search(line)
|
||||||
if not m:
|
if not m:
|
||||||
return ""
|
return ""
|
||||||
return m.group(1)
|
return m.group(1)
|
||||||
|
|
||||||
|
|
||||||
def set_option_value(line, option, value):
|
def set_option_value(line, option, value):
|
||||||
rx = '(' + option + r'\s*=\s*")[^"]+"'
|
rx = "(" + option + r'\s*=\s*")[^"]+"'
|
||||||
rx = re.compile(rx)
|
rx = re.compile(rx)
|
||||||
m = rx.search(line)
|
m = rx.search(line)
|
||||||
if not m:
|
if not m:
|
||||||
return line
|
return line
|
||||||
return re.sub(rx, r'\g<1>' + value + '"', line)
|
return re.sub(rx, r"\g<1>" + value + '"', line)
|
||||||
|
|
||||||
|
|
||||||
def del_token(lines, token, start=0, end=0):
|
def del_token(lines, token, start=0, end=0):
|
||||||
""" del_token(lines, token, start, end) -> int
|
"""del_token(lines, token, start, end) -> int
|
||||||
|
|
||||||
Find the first line in lines where token is the first element
|
Find the first line in lines where token is the first element
|
||||||
and delete that line. Returns True if we deleted a line, False
|
and delete that line. Returns True if we deleted a line, False
|
||||||
@ -499,6 +502,7 @@ def del_token(lines, token, start=0, end=0):
|
|||||||
del lines[k]
|
del lines[k]
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def del_complete_lines(lines, sublines, start=0, end=0):
|
def del_complete_lines(lines, sublines, start=0, end=0):
|
||||||
"""Delete first occurence of `sublines` in list `lines`.
|
"""Delete first occurence of `sublines` in list `lines`.
|
||||||
|
|
||||||
@ -516,7 +520,7 @@ def del_complete_lines(lines, sublines, start=0, end=0):
|
|||||||
i = find_complete_lines(lines, sublines, start, end)
|
i = find_complete_lines(lines, sublines, start, end)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return False
|
return False
|
||||||
del(lines[i:i+len(sublines)])
|
del lines[i : i + len(sublines)]
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@ -532,19 +536,19 @@ def del_value(lines, token, start=0, end=0, default=None):
|
|||||||
i = find_token_exact(lines, token, start, end)
|
i = find_token_exact(lines, token, start, end)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return default
|
return default
|
||||||
return lines.pop(i)[len(token):].strip()
|
return lines.pop(i)[len(token) :].strip()
|
||||||
|
|
||||||
|
|
||||||
def find_beginning_of(lines, i, start_token, end_token):
|
def find_beginning_of(lines, i, start_token, end_token):
|
||||||
count = 1
|
count = 1
|
||||||
while i > 0:
|
while i > 0:
|
||||||
i = find_tokens_backwards(lines, [start_token, end_token], i-1)
|
i = find_tokens_backwards(lines, [start_token, end_token], i - 1)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return -1
|
return -1
|
||||||
if lines[i].startswith(end_token):
|
if lines[i].startswith(end_token):
|
||||||
count = count+1
|
count = count + 1
|
||||||
else:
|
else:
|
||||||
count = count-1
|
count = count - 1
|
||||||
if count == 0:
|
if count == 0:
|
||||||
return i
|
return i
|
||||||
return -1
|
return -1
|
||||||
@ -554,13 +558,13 @@ def find_end_of(lines, i, start_token, end_token):
|
|||||||
count = 1
|
count = 1
|
||||||
n = len(lines)
|
n = len(lines)
|
||||||
while i < n:
|
while i < n:
|
||||||
i = find_tokens(lines, [end_token, start_token], i+1)
|
i = find_tokens(lines, [end_token, start_token], i + 1)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return -1
|
return -1
|
||||||
if lines[i].startswith(start_token):
|
if lines[i].startswith(start_token):
|
||||||
count = count+1
|
count = count + 1
|
||||||
else:
|
else:
|
||||||
count = count-1
|
count = count - 1
|
||||||
if count == 0:
|
if count == 0:
|
||||||
return i
|
return i
|
||||||
return -1
|
return -1
|
||||||
@ -576,16 +580,16 @@ def find_nonempty_line(lines, start=0, end=0):
|
|||||||
|
|
||||||
|
|
||||||
def find_end_of_inset(lines, i):
|
def find_end_of_inset(lines, i):
|
||||||
" Find end of inset, where lines[i] is included."
|
"Find end of inset, where lines[i] is included."
|
||||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||||
|
|
||||||
|
|
||||||
def find_end_of_layout(lines, i):
|
def find_end_of_layout(lines, i):
|
||||||
" Find end of layout, where lines[i] is included."
|
"Find end of layout, where lines[i] is included."
|
||||||
return find_end_of(lines, i, "\\begin_layout", "\\end_layout")
|
return find_end_of(lines, i, "\\begin_layout", "\\end_layout")
|
||||||
|
|
||||||
|
|
||||||
def is_in_inset(lines, i, inset, default=(-1,-1)):
|
def is_in_inset(lines, i, inset, default=(-1, -1)):
|
||||||
"""
|
"""
|
||||||
Check if line i is in an inset of the given type.
|
Check if line i is in an inset of the given type.
|
||||||
If so, return starting and ending lines, otherwise `default`.
|
If so, return starting and ending lines, otherwise `default`.
|
||||||
@ -601,126 +605,133 @@ def is_in_inset(lines, i, inset, default=(-1,-1)):
|
|||||||
"""
|
"""
|
||||||
start = find_token_backwards(lines, inset, i)
|
start = find_token_backwards(lines, inset, i)
|
||||||
if start == -1:
|
if start == -1:
|
||||||
return default
|
return default
|
||||||
end = find_end_of_inset(lines, start)
|
end = find_end_of_inset(lines, start)
|
||||||
if end < i: # this includes the notfound case.
|
if end < i: # this includes the notfound case.
|
||||||
return default
|
return default
|
||||||
return (start, end)
|
return (start, end)
|
||||||
|
|
||||||
|
|
||||||
def get_containing_inset(lines, i):
|
def get_containing_inset(lines, i):
|
||||||
'''
|
"""
|
||||||
Finds out what kind of inset line i is within. Returns a
|
Finds out what kind of inset line i is within. Returns a
|
||||||
list containing (i) what follows \\begin_inset on the line
|
list containing (i) what follows \\begin_inset on the line
|
||||||
on which the inset begins, plus the starting and ending line.
|
on which the inset begins, plus the starting and ending line.
|
||||||
Returns False on any kind of error or if it isn't in an inset.
|
Returns False on any kind of error or if it isn't in an inset.
|
||||||
'''
|
"""
|
||||||
j = i
|
j = i
|
||||||
while True:
|
while True:
|
||||||
stins = find_token_backwards(lines, "\\begin_inset", j)
|
stins = find_token_backwards(lines, "\\begin_inset", j)
|
||||||
if stins == -1:
|
if stins == -1:
|
||||||
return False
|
return False
|
||||||
endins = find_end_of_inset(lines, stins)
|
endins = find_end_of_inset(lines, stins)
|
||||||
if endins > j:
|
if endins > j:
|
||||||
break
|
break
|
||||||
j = stins - 1
|
j = stins - 1
|
||||||
|
|
||||||
if endins < i:
|
if endins < i:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
inset = get_value(lines, "\\begin_inset", stins)
|
inset = get_value(lines, "\\begin_inset", stins)
|
||||||
if inset == "":
|
if inset == "":
|
||||||
# shouldn't happen
|
# shouldn't happen
|
||||||
return False
|
return False
|
||||||
return (inset, stins, endins)
|
return (inset, stins, endins)
|
||||||
|
|
||||||
|
|
||||||
def get_containing_layout(lines, i):
|
def get_containing_layout(lines, i):
|
||||||
'''
|
"""
|
||||||
Find out what kind of layout line `i` is within.
|
Find out what kind of layout line `i` is within.
|
||||||
Return a tuple
|
Return a tuple
|
||||||
(layoutname, layoutstart, layoutend, startofcontent)
|
(layoutname, layoutstart, layoutend, startofcontent)
|
||||||
containing
|
containing
|
||||||
* layout style/name,
|
* layout style/name,
|
||||||
* start line number,
|
* start line number,
|
||||||
* end line number, and
|
* end line number, and
|
||||||
* number of first paragraph line (after all params).
|
* number of first paragraph line (after all params).
|
||||||
Return `False` on any kind of error.
|
Return `False` on any kind of error.
|
||||||
'''
|
"""
|
||||||
j = i
|
j = i
|
||||||
while True:
|
while True:
|
||||||
stlay = find_token_backwards(lines, "\\begin_layout", j)
|
stlay = find_token_backwards(lines, "\\begin_layout", j)
|
||||||
if stlay == -1:
|
if stlay == -1:
|
||||||
return False
|
return False
|
||||||
endlay = find_end_of_layout(lines, stlay)
|
endlay = find_end_of_layout(lines, stlay)
|
||||||
if endlay > i:
|
if endlay > i:
|
||||||
break
|
break
|
||||||
j = stlay - 1
|
j = stlay - 1
|
||||||
|
|
||||||
if endlay < i:
|
if endlay < i:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
layoutname = get_value(lines, "\\begin_layout", stlay)
|
layoutname = get_value(lines, "\\begin_layout", stlay)
|
||||||
if layoutname == "": # layout style missing
|
if layoutname == "": # layout style missing
|
||||||
# TODO: What shall we do in this case?
|
# TODO: What shall we do in this case?
|
||||||
pass
|
pass
|
||||||
# layoutname == "Standard" # use same fallback as the LyX parser:
|
# layoutname == "Standard" # use same fallback as the LyX parser:
|
||||||
# raise ValueError("Missing layout name on line %d"%stlay) # diagnosis
|
# raise ValueError("Missing layout name on line %d"%stlay) # diagnosis
|
||||||
# return False # generic error response
|
# return False # generic error response
|
||||||
par_params = ["\\noindent", "\\indent", "\\indent-toggle", "\\leftindent",
|
par_params = [
|
||||||
"\\start_of_appendix", "\\paragraph_spacing", "\\align",
|
"\\noindent",
|
||||||
"\\labelwidthstring"]
|
"\\indent",
|
||||||
stpar = stlay
|
"\\indent-toggle",
|
||||||
while True:
|
"\\leftindent",
|
||||||
stpar += 1
|
"\\start_of_appendix",
|
||||||
if lines[stpar].split(' ', 1)[0] not in par_params:
|
"\\paragraph_spacing",
|
||||||
break
|
"\\align",
|
||||||
return (layoutname, stlay, endlay, stpar)
|
"\\labelwidthstring",
|
||||||
|
]
|
||||||
|
stpar = stlay
|
||||||
|
while True:
|
||||||
|
stpar += 1
|
||||||
|
if lines[stpar].split(" ", 1)[0] not in par_params:
|
||||||
|
break
|
||||||
|
return (layoutname, stlay, endlay, stpar)
|
||||||
|
|
||||||
|
|
||||||
def count_pars_in_inset(lines, i):
|
def count_pars_in_inset(lines, i):
|
||||||
'''
|
"""
|
||||||
Counts the paragraphs within this inset
|
Counts the paragraphs within this inset
|
||||||
'''
|
"""
|
||||||
ins = get_containing_inset(lines, i)
|
ins = get_containing_inset(lines, i)
|
||||||
if ins == -1:
|
if ins == -1:
|
||||||
return -1
|
return -1
|
||||||
pars = 0
|
pars = 0
|
||||||
for j in range(ins[1], ins[2]):
|
for j in range(ins[1], ins[2]):
|
||||||
m = re.match(r'\\begin_layout (.*)', lines[j])
|
m = re.match(r"\\begin_layout (.*)", lines[j])
|
||||||
found_inset = get_containing_inset(lines, j)
|
found_inset = get_containing_inset(lines, j)
|
||||||
if m and found_inset and found_inset[1] == ins[1]:
|
if m and found_inset and found_inset[1] == ins[1]:
|
||||||
pars += 1
|
pars += 1
|
||||||
|
|
||||||
return pars
|
return pars
|
||||||
|
|
||||||
|
|
||||||
def find_end_of_sequence(lines, i):
|
def find_end_of_sequence(lines, i):
|
||||||
'''
|
"""
|
||||||
Returns the end of a sequence of identical layouts.
|
Returns the end of a sequence of identical layouts.
|
||||||
'''
|
"""
|
||||||
lay = get_containing_layout(lines, i)
|
lay = get_containing_layout(lines, i)
|
||||||
if lay == False:
|
if lay == False:
|
||||||
return -1
|
return -1
|
||||||
layout = lay[0]
|
layout = lay[0]
|
||||||
endlay = lay[2]
|
endlay = lay[2]
|
||||||
i = endlay
|
i = endlay
|
||||||
while True:
|
while True:
|
||||||
m = re.match(r'\\begin_layout (.*)', lines[i])
|
m = re.match(r"\\begin_layout (.*)", lines[i])
|
||||||
if m and m.group(1) != layout:
|
if m and m.group(1) != layout:
|
||||||
return endlay
|
return endlay
|
||||||
elif lines[i] == "\\begin_deeper":
|
elif lines[i] == "\\begin_deeper":
|
||||||
j = find_end_of(lines, i, "\\begin_deeper", "\\end_deeper")
|
j = find_end_of(lines, i, "\\begin_deeper", "\\end_deeper")
|
||||||
if j != -1:
|
if j != -1:
|
||||||
i = j
|
i = j
|
||||||
endlay = j
|
endlay = j
|
||||||
continue
|
continue
|
||||||
if m and m.group(1) == layout:
|
if m and m.group(1) == layout:
|
||||||
endlay = find_end_of_layout(lines, i)
|
endlay = find_end_of_layout(lines, i)
|
||||||
i = endlay
|
i = endlay
|
||||||
continue
|
continue
|
||||||
if i == len(lines) - 1:
|
if i == len(lines) - 1:
|
||||||
break
|
break
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
return endlay
|
return endlay
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
# We need all this because lyx2lyx does not have the .py termination
|
# We need all this because lyx2lyx does not have the .py termination
|
||||||
import imp
|
import imp
|
||||||
|
|
||||||
lyx2lyx = imp.load_source("lyx2lyx", "lyx2lyx", open("lyx2lyx"))
|
lyx2lyx = imp.load_source("lyx2lyx", "lyx2lyx", open("lyx2lyx"))
|
||||||
|
|
||||||
# Profiler used in the study
|
# Profiler used in the study
|
||||||
@ -34,16 +35,17 @@ Example:
|
|||||||
./profiling.py -ou.lyx ../doc/UserGuide.lyx
|
./profiling.py -ou.lyx ../doc/UserGuide.lyx
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# This will only work with python >= 2.2, the version where this module was added
|
# This will only work with python >= 2.2, the version where this module was added
|
||||||
prof = hotshot.Profile("lyx2lyx.prof") # Use temporary file, here?
|
prof = hotshot.Profile("lyx2lyx.prof") # Use temporary file, here?
|
||||||
benchtime = prof.runcall(lyx2lyx.main)
|
benchtime = prof.runcall(lyx2lyx.main)
|
||||||
prof.close()
|
prof.close()
|
||||||
|
|
||||||
# After the tests, show the profile analysis.
|
# After the tests, show the profile analysis.
|
||||||
stats = hotshot.stats.load("lyx2lyx.prof")
|
stats = hotshot.stats.load("lyx2lyx.prof")
|
||||||
stats.strip_dirs()
|
stats.strip_dirs()
|
||||||
stats.sort_stats('time', 'calls')
|
stats.sort_stats("time", "calls")
|
||||||
stats.print_stats(20)
|
stats.print_stats(20)
|
||||||
|
|
||||||
os.unlink("lyx2lyx.prof")
|
os.unlink("lyx2lyx.prof")
|
||||||
|
@ -15,52 +15,56 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
" This modules tests the auxiliary functions for lyx2lyx."
|
"This modules tests the auxiliary functions for lyx2lyx."
|
||||||
|
|
||||||
from lyx2lyx_tools import *
|
from lyx2lyx_tools import *
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
class TestParserTools(unittest.TestCase):
|
|
||||||
|
|
||||||
|
class TestParserTools(unittest.TestCase):
|
||||||
def test_put_cmd_in_ert(self):
|
def test_put_cmd_in_ert(self):
|
||||||
ert = ['\\begin_inset ERT',
|
ert = [
|
||||||
'status collapsed',
|
"\\begin_inset ERT",
|
||||||
'',
|
"status collapsed",
|
||||||
'\\begin_layout Plain Layout',
|
"",
|
||||||
'',
|
"\\begin_layout Plain Layout",
|
||||||
'',
|
"",
|
||||||
'\\backslash',
|
"",
|
||||||
'texttt{Gr',
|
"\\backslash",
|
||||||
'\\backslash',
|
"texttt{Gr",
|
||||||
'"{u}',
|
"\\backslash",
|
||||||
'\\backslash',
|
'"{u}',
|
||||||
'ss{}e}',
|
"\\backslash",
|
||||||
'\\end_layout',
|
"ss{}e}",
|
||||||
'',
|
"\\end_layout",
|
||||||
'\\end_inset']
|
"",
|
||||||
|
"\\end_inset",
|
||||||
|
]
|
||||||
ert_open = ert[:]
|
ert_open = ert[:]
|
||||||
ert_open[1] = 'status open'
|
ert_open[1] = "status open"
|
||||||
ert_paragraph = ["\\begin_layout Standard",
|
ert_paragraph = [
|
||||||
'\\begin_inset ERT',
|
"\\begin_layout Standard",
|
||||||
'status collapsed',
|
"\\begin_inset ERT",
|
||||||
'',
|
"status collapsed",
|
||||||
'\\begin_layout Plain Layout',
|
"",
|
||||||
'',
|
"\\begin_layout Plain Layout",
|
||||||
'',
|
"",
|
||||||
'\\backslash',
|
"",
|
||||||
'texttt{Gr',
|
"\\backslash",
|
||||||
'\\backslash',
|
"texttt{Gr",
|
||||||
'"{u}',
|
"\\backslash",
|
||||||
'\\backslash',
|
'"{u}',
|
||||||
'ss{}e}',
|
"\\backslash",
|
||||||
'\\end_layout',
|
"ss{}e}",
|
||||||
'',
|
"\\end_layout",
|
||||||
'\\end_inset',
|
"",
|
||||||
'',
|
"\\end_inset",
|
||||||
'',
|
"",
|
||||||
'\\end_layout',
|
"",
|
||||||
'']
|
"\\end_layout",
|
||||||
|
"",
|
||||||
|
]
|
||||||
self.assertEqual(put_cmd_in_ert("\\texttt{Grüße}"), ert)
|
self.assertEqual(put_cmd_in_ert("\\texttt{Grüße}"), ert)
|
||||||
self.assertEqual(put_cmd_in_ert(["\\texttt{Grüße}"]), ert)
|
self.assertEqual(put_cmd_in_ert(["\\texttt{Grüße}"]), ert)
|
||||||
self.assertEqual(put_cmd_in_ert("\\texttt{Grüße}", is_open=True), ert_open)
|
self.assertEqual(put_cmd_in_ert("\\texttt{Grüße}", is_open=True), ert_open)
|
||||||
@ -71,8 +75,7 @@ class TestParserTools(unittest.TestCase):
|
|||||||
self.assertEqual(latex_length("35baselineskip%"), (True, "0.35\\baselineskip"))
|
self.assertEqual(latex_length("35baselineskip%"), (True, "0.35\\baselineskip"))
|
||||||
self.assertEqual(latex_length("11em"), (False, "11em"))
|
self.assertEqual(latex_length("11em"), (False, "11em"))
|
||||||
self.assertEqual(latex_length("-0.4pt"), (False, "-0.4pt"))
|
self.assertEqual(latex_length("-0.4pt"), (False, "-0.4pt"))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
" This modules tests the functions used to help parse lines."
|
"This modules tests the functions used to help parse lines."
|
||||||
|
|
||||||
from parser_tools import *
|
from parser_tools import *
|
||||||
|
|
||||||
@ -77,45 +77,40 @@ newheader = r"""\begin_header
|
|||||||
|
|
||||||
|
|
||||||
class TestParserTools(unittest.TestCase):
|
class TestParserTools(unittest.TestCase):
|
||||||
|
|
||||||
def test_check_token(self):
|
def test_check_token(self):
|
||||||
line = "\\begin_layout Standard"
|
line = "\\begin_layout Standard"
|
||||||
|
|
||||||
self.assertEqual(check_token(line, '\\begin_layout'), True)
|
self.assertEqual(check_token(line, "\\begin_layout"), True)
|
||||||
self.assertEqual(check_token(line, 'Standard'), False)
|
self.assertEqual(check_token(line, "Standard"), False)
|
||||||
|
|
||||||
|
|
||||||
def test_is_nonempty_line(self):
|
def test_is_nonempty_line(self):
|
||||||
self.assertEqual(is_nonempty_line(lines[0]), False)
|
self.assertEqual(is_nonempty_line(lines[0]), False)
|
||||||
self.assertEqual(is_nonempty_line(lines[1]), True)
|
self.assertEqual(is_nonempty_line(lines[1]), True)
|
||||||
self.assertEqual(is_nonempty_line(" "*5), False)
|
self.assertEqual(is_nonempty_line(" " * 5), False)
|
||||||
|
|
||||||
|
|
||||||
def test_find_token(self):
|
def test_find_token(self):
|
||||||
self.assertEqual(find_token(lines, '\\emph', 0), 7)
|
self.assertEqual(find_token(lines, "\\emph", 0), 7)
|
||||||
# no line starts with "emph" (without backspace):
|
# no line starts with "emph" (without backspace):
|
||||||
self.assertEqual(find_token(lines, 'emph', 0), -1)
|
self.assertEqual(find_token(lines, "emph", 0), -1)
|
||||||
# token on line[start] is found:
|
# token on line[start] is found:
|
||||||
self.assertEqual(find_token(lines, '\\emph', 7), 7)
|
self.assertEqual(find_token(lines, "\\emph", 7), 7)
|
||||||
self.assertEqual(find_token(lines, '\\emph', 8), 9)
|
self.assertEqual(find_token(lines, "\\emph", 8), 9)
|
||||||
# token on line[end] is not found:
|
# token on line[end] is not found:
|
||||||
self.assertEqual(find_token(lines, '\\emph', 0, 7), -1)
|
self.assertEqual(find_token(lines, "\\emph", 0, 7), -1)
|
||||||
# `ignorews` looks for whitespace-separated tokens:
|
# `ignorews` looks for whitespace-separated tokens:
|
||||||
self.assertEqual(find_token(lines, '\\emp', 0, ignorews=True), -1)
|
self.assertEqual(find_token(lines, "\\emp", 0, ignorews=True), -1)
|
||||||
self.assertEqual(find_token(lines, '\\emph',0, ignorews=True), 7)
|
self.assertEqual(find_token(lines, "\\emph", 0, ignorews=True), 7)
|
||||||
self.assertEqual(find_token(lines, '\\emph', 7, ignorews=True), 7)
|
self.assertEqual(find_token(lines, "\\emph", 7, ignorews=True), 7)
|
||||||
self.assertEqual(find_token(lines, '\\emph', 0, 7, True), -1)
|
self.assertEqual(find_token(lines, "\\emph", 0, 7, True), -1)
|
||||||
# only first token is found:
|
# only first token is found:
|
||||||
self.assertEqual(find_token(lines, 'Quotes', 0), -1)
|
self.assertEqual(find_token(lines, "Quotes", 0), -1)
|
||||||
self.assertEqual(find_token(lines, 'Quotes', 0, ignorews=True), -1)
|
self.assertEqual(find_token(lines, "Quotes", 0, ignorews=True), -1)
|
||||||
|
|
||||||
|
|
||||||
def test_find_tokens(self):
|
def test_find_tokens(self):
|
||||||
tokens = ['\\emph', '\\end_inset']
|
tokens = ["\\emph", "\\end_inset"]
|
||||||
self.assertEqual(find_tokens(lines, tokens, 0), 4)
|
self.assertEqual(find_tokens(lines, tokens, 0), 4)
|
||||||
self.assertEqual(find_tokens(lines, tokens, 0, 4), -1)
|
self.assertEqual(find_tokens(lines, tokens, 0, 4), -1)
|
||||||
|
|
||||||
|
|
||||||
def test_find_substring(self):
|
def test_find_substring(self):
|
||||||
# Quotes is not a "token" (substring at the start of any line):
|
# Quotes is not a "token" (substring at the start of any line):
|
||||||
self.assertEqual(find_token(lines, "Quotes", 0), -1)
|
self.assertEqual(find_token(lines, "Quotes", 0), -1)
|
||||||
@ -123,9 +118,8 @@ class TestParserTools(unittest.TestCase):
|
|||||||
# return -1 on failure:
|
# return -1 on failure:
|
||||||
self.assertEqual(find_substring(lines, "Qualen", 0), -1)
|
self.assertEqual(find_substring(lines, "Qualen", 0), -1)
|
||||||
|
|
||||||
|
|
||||||
def test_find_re(self):
|
def test_find_re(self):
|
||||||
regexp_object = re.compile(r'\\begin.*Quote')
|
regexp_object = re.compile(r"\\begin.*Quote")
|
||||||
# matching starts with line[start] (default: start=0)
|
# matching starts with line[start] (default: start=0)
|
||||||
self.assertEqual(find_re(lines, regexp_object), 3)
|
self.assertEqual(find_re(lines, regexp_object), 3)
|
||||||
self.assertEqual(find_re(lines, regexp_object, start=3), 3)
|
self.assertEqual(find_re(lines, regexp_object, start=3), 3)
|
||||||
@ -134,13 +128,12 @@ class TestParserTools(unittest.TestCase):
|
|||||||
self.assertEqual(find_re(lines, regexp_object, start=4, end=11), -1)
|
self.assertEqual(find_re(lines, regexp_object, start=4, end=11), -1)
|
||||||
|
|
||||||
def test_find_complete_lines(self):
|
def test_find_complete_lines(self):
|
||||||
sublines = ["\\begin_inset Quotes eld",
|
sublines = ["\\begin_inset Quotes eld", "\\end_inset"]
|
||||||
"\\end_inset"]
|
|
||||||
# return index of first line of sublines:
|
# return index of first line of sublines:
|
||||||
self.assertEqual(find_complete_lines(lines, sublines), 3)
|
self.assertEqual(find_complete_lines(lines, sublines), 3)
|
||||||
self.assertEqual(find_complete_lines(lines, ["\\end_inset"]), 4)
|
self.assertEqual(find_complete_lines(lines, ["\\end_inset"]), 4)
|
||||||
# return -1 if sublines is not found:
|
# return -1 if sublines is not found:
|
||||||
self.assertEqual(find_complete_lines(lines, ['x']), -1)
|
self.assertEqual(find_complete_lines(lines, ["x"]), -1)
|
||||||
# search includes line `start`:
|
# search includes line `start`:
|
||||||
self.assertEqual(find_complete_lines(lines, sublines, 3), 3)
|
self.assertEqual(find_complete_lines(lines, sublines, 3), 3)
|
||||||
self.assertEqual(find_complete_lines(lines, sublines, 4), 20)
|
self.assertEqual(find_complete_lines(lines, sublines, 4), 20)
|
||||||
@ -150,7 +143,6 @@ class TestParserTools(unittest.TestCase):
|
|||||||
# an empty list is always found
|
# an empty list is always found
|
||||||
self.assertEqual(find_complete_lines(lines, []), 0)
|
self.assertEqual(find_complete_lines(lines, []), 0)
|
||||||
|
|
||||||
|
|
||||||
def test_find_across_lines(self):
|
def test_find_across_lines(self):
|
||||||
# sub with at least 2 line-breaks (uses find_complete_lines):
|
# sub with at least 2 line-breaks (uses find_complete_lines):
|
||||||
sub = "Quotes eld\n\\end_inset\n\n\n"
|
sub = "Quotes eld\n\\end_inset\n\n\n"
|
||||||
@ -182,7 +174,6 @@ class TestParserTools(unittest.TestCase):
|
|||||||
self.assertEqual(find_across_lines(lines, sub, 2, 1), -1)
|
self.assertEqual(find_across_lines(lines, sub, 2, 1), -1)
|
||||||
self.assertEqual(find_across_lines(lines, "XXX"), -1)
|
self.assertEqual(find_across_lines(lines, "XXX"), -1)
|
||||||
|
|
||||||
|
|
||||||
def test_get_value(self):
|
def test_get_value(self):
|
||||||
self.assertEqual(get_value(lines, "\\begin_inset"), "Quotes eld")
|
self.assertEqual(get_value(lines, "\\begin_inset"), "Quotes eld")
|
||||||
# TODO: do we want this:
|
# TODO: do we want this:
|
||||||
@ -216,12 +207,11 @@ class TestParserTools(unittest.TestCase):
|
|||||||
|
|
||||||
def test_del_complete_lines(self):
|
def test_del_complete_lines(self):
|
||||||
l = lines[:]
|
l = lines[:]
|
||||||
sublines = ["\\begin_inset Quotes eld",
|
sublines = ["\\begin_inset Quotes eld", "\\end_inset"]
|
||||||
"\\end_inset"]
|
|
||||||
# normal operation: remove the first occurence of sublines:
|
# normal operation: remove the first occurence of sublines:
|
||||||
self.assertEqual(del_complete_lines(l, sublines), True)
|
self.assertEqual(del_complete_lines(l, sublines), True)
|
||||||
self.assertEqual(l[3], "")
|
self.assertEqual(l[3], "")
|
||||||
self.assertEqual(len(l), len(lines)-len(sublines))
|
self.assertEqual(len(l), len(lines) - len(sublines))
|
||||||
# special cases:
|
# special cases:
|
||||||
l = lines[:]
|
l = lines[:]
|
||||||
self.assertEqual(del_complete_lines(l, sublines, 21), False)
|
self.assertEqual(del_complete_lines(l, sublines, 21), False)
|
||||||
@ -239,5 +229,5 @@ class TestParserTools(unittest.TestCase):
|
|||||||
self.assertEqual(del_value(l, "\\end_inset", default=None), "")
|
self.assertEqual(del_value(l, "\\end_inset", default=None), "")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -15,15 +15,15 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
" Import unicode_reps from this module for access to the unicode<->LaTeX mapping. "
|
"Import unicode_reps from this module for access to the unicode<->LaTeX mapping."
|
||||||
|
|
||||||
import sys, os, re, codecs
|
import sys, os, re, codecs
|
||||||
|
|
||||||
|
|
||||||
def read_unicodesymbols():
|
def read_unicodesymbols():
|
||||||
" Read the unicodesymbols list of unicode characters and corresponding commands."
|
"Read the unicodesymbols list of unicode characters and corresponding commands."
|
||||||
pathname = os.path.abspath(os.path.dirname(__file__))
|
pathname = os.path.abspath(os.path.dirname(__file__))
|
||||||
filename = os.path.join(pathname.strip('lyx2lyx'), 'unicodesymbols')
|
filename = os.path.join(pathname.strip("lyx2lyx"), "unicodesymbols")
|
||||||
|
|
||||||
# Read as Unicode strings in both, Python 2 and 3
|
# Read as Unicode strings in both, Python 2 and 3
|
||||||
# Specify the encoding for those systems where the default is not UTF-8
|
# Specify the encoding for those systems where the default is not UTF-8
|
||||||
@ -35,28 +35,31 @@ def read_unicodesymbols():
|
|||||||
# as: \"u or even \" u.
|
# as: \"u or even \" u.
|
||||||
# The two backslashes in the string literal are needed to specify a literal
|
# The two backslashes in the string literal are needed to specify a literal
|
||||||
# backslash in the regex. Without r prefix, these would be four backslashes.
|
# backslash in the regex. Without r prefix, these would be four backslashes.
|
||||||
r = re.compile(r'\\(\W)\{(\w)\}')
|
r = re.compile(r"\\(\W)\{(\w)\}")
|
||||||
|
|
||||||
spec_chars = []
|
spec_chars = []
|
||||||
for line in fp.readlines():
|
for line in fp.readlines():
|
||||||
if not line.strip() or line.startswith('#'):
|
if not line.strip() or line.startswith("#"):
|
||||||
# skip empty lines and comments
|
# skip empty lines and comments
|
||||||
continue
|
continue
|
||||||
# Note: backslashes in the string literals with r prefix are not escaped,
|
# Note: backslashes in the string literals with r prefix are not escaped,
|
||||||
# so one backslash in the source file equals one backslash in memory.
|
# so one backslash in the source file equals one backslash in memory.
|
||||||
# Without r prefix backslahses are escaped, so two backslashes in the
|
# Without r prefix backslahses are escaped, so two backslashes in the
|
||||||
# source file equal one backslash in memory.
|
# source file equal one backslash in memory.
|
||||||
line=line.replace(' "',' ') # remove all quotation marks with spaces before
|
line = line.replace(' "', " ") # remove all quotation marks with spaces before
|
||||||
line=line.replace('" ',' ') # remove all quotation marks with spaces after
|
line = line.replace('" ', " ") # remove all quotation marks with spaces after
|
||||||
line=line.replace(r'\"','"') # unescape "
|
line = line.replace(r"\"", '"') # unescape "
|
||||||
line=line.replace(r'\\','\\') # unescape \
|
line = line.replace(r"\\", "\\") # unescape \
|
||||||
try:
|
try:
|
||||||
[ucs4,command,dead] = line.split(None,2)
|
[ucs4, command, dead] = line.split(None, 2)
|
||||||
if command[0:1] != "\\":
|
if command[0:1] != "\\":
|
||||||
continue
|
continue
|
||||||
literal_char = chr(int(ucs4, 16))
|
literal_char = chr(int(ucs4, 16))
|
||||||
if (line.find("notermination=text") < 0 and
|
if (
|
||||||
line.find("notermination=both") < 0 and command[-1] != "}"):
|
line.find("notermination=text") < 0
|
||||||
|
and line.find("notermination=both") < 0
|
||||||
|
and command[-1] != "}"
|
||||||
|
):
|
||||||
command = command + "{}"
|
command = command + "{}"
|
||||||
spec_chars.append([command, literal_char])
|
spec_chars.append([command, literal_char])
|
||||||
except:
|
except:
|
||||||
@ -66,7 +69,7 @@ def read_unicodesymbols():
|
|||||||
command = "\\"
|
command = "\\"
|
||||||
commandbl = command
|
commandbl = command
|
||||||
command += m.group(1) + m.group(2)
|
command += m.group(1) + m.group(2)
|
||||||
commandbl += m.group(1) + ' ' + m.group(2)
|
commandbl += m.group(1) + " " + m.group(2)
|
||||||
spec_chars.append([command, literal_char])
|
spec_chars.append([command, literal_char])
|
||||||
spec_chars.append([commandbl, literal_char])
|
spec_chars.append([commandbl, literal_char])
|
||||||
fp.close()
|
fp.close()
|
||||||
|
Loading…
Reference in New Issue
Block a user