2018-02-23 07:58:16 +00:00
|
|
|
|
# This file is part of lyx2lyx
|
|
|
|
|
# Copyright (C) 2018 The LyX team
|
|
|
|
|
#
|
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
|
#
|
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
|
#
|
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"""Convert files to the file format generated by lyx 2.4"""
|
2018-02-23 07:58:16 +00:00
|
|
|
|
|
2024-06-10 11:05:03 +00:00
|
|
|
|
import re
|
2024-06-18 09:25:52 +00:00
|
|
|
|
from datetime import date, datetime, time
|
2018-02-23 07:58:16 +00:00
|
|
|
|
|
2024-06-18 09:25:52 +00:00
|
|
|
|
from lyx2lyx_tools import (
|
|
|
|
|
add_to_preamble,
|
|
|
|
|
insert_to_preamble,
|
|
|
|
|
lyx2latex,
|
|
|
|
|
put_cmd_in_ert,
|
|
|
|
|
revert_flex_inset,
|
|
|
|
|
revert_language,
|
|
|
|
|
str2bool,
|
|
|
|
|
)
|
2024-06-10 11:05:03 +00:00
|
|
|
|
from parser_tools import (
|
|
|
|
|
count_pars_in_inset,
|
|
|
|
|
del_complete_lines,
|
|
|
|
|
del_token,
|
|
|
|
|
find_end_of,
|
|
|
|
|
find_end_of_inset,
|
|
|
|
|
find_end_of_layout,
|
2024-06-18 09:25:52 +00:00
|
|
|
|
find_re,
|
2024-06-10 11:05:03 +00:00
|
|
|
|
find_token,
|
|
|
|
|
find_token_backwards,
|
|
|
|
|
find_token_exact,
|
|
|
|
|
get_bool_value,
|
|
|
|
|
get_containing_inset,
|
|
|
|
|
get_containing_layout,
|
|
|
|
|
get_option_value,
|
|
|
|
|
get_quoted_value,
|
2024-06-18 09:25:52 +00:00
|
|
|
|
get_value,
|
2024-06-15 09:06:06 +00:00
|
|
|
|
is_in_inset,
|
2024-06-10 11:05:03 +00:00
|
|
|
|
)
|
2018-02-23 07:58:16 +00:00
|
|
|
|
|
|
|
|
|
####################################################################
|
|
|
|
|
# Private helper functions
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-08-18 14:18:03 +00:00
|
|
|
|
def add_preamble_fonts(document, fontmap):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Add collected font-packages with their option to user-preamble"""
|
2018-08-18 14:18:03 +00:00
|
|
|
|
|
|
|
|
|
for pkg in fontmap:
|
|
|
|
|
if len(fontmap[pkg]) > 0:
|
|
|
|
|
xoption = "[" + ",".join(fontmap[pkg]) + "]"
|
|
|
|
|
else:
|
|
|
|
|
xoption = ""
|
2024-06-10 09:55:40 +00:00
|
|
|
|
preamble = f"\\usepackage{xoption}{{{pkg}}}"
|
2018-08-18 14:18:03 +00:00
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
|
|
|
|
|
|
|
2018-08-20 13:47:49 +00:00
|
|
|
|
def createkey(pkg, options):
|
|
|
|
|
options.sort()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
return pkg + ":" + "-".join(options)
|
|
|
|
|
|
2018-08-20 13:47:49 +00:00
|
|
|
|
|
|
|
|
|
class fontinfo:
|
|
|
|
|
def __init__(self):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
self.fontname = None # key into font2pkgmap
|
|
|
|
|
self.fonttype = None # roman,sans,typewriter,math
|
|
|
|
|
self.scaletype = None # None,sf,tt
|
|
|
|
|
self.scaleopt = None # None, 'scaled', 'scale'
|
2018-08-20 13:47:49 +00:00
|
|
|
|
self.scaleval = 1
|
|
|
|
|
self.package = None
|
|
|
|
|
self.options = []
|
2024-06-15 09:06:06 +00:00
|
|
|
|
self.pkgkey = None # key into pkg2fontmap
|
|
|
|
|
self.osfopt = None # None, string
|
|
|
|
|
self.osfdef = "false" # "false" or "true"
|
2018-08-20 13:47:49 +00:00
|
|
|
|
|
|
|
|
|
def addkey(self):
|
|
|
|
|
self.pkgkey = createkey(self.package, self.options)
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-08-20 13:47:49 +00:00
|
|
|
|
class fontmapping:
|
|
|
|
|
def __init__(self):
|
|
|
|
|
self.font2pkgmap = dict()
|
|
|
|
|
self.pkg2fontmap = dict()
|
|
|
|
|
self.pkginmap = dict() # defines, if a map for package exists
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
def expandFontMapping(
|
|
|
|
|
self,
|
|
|
|
|
font_list,
|
|
|
|
|
font_type,
|
|
|
|
|
scale_type,
|
|
|
|
|
pkg,
|
|
|
|
|
scaleopt=None,
|
|
|
|
|
osfopt=None,
|
|
|
|
|
osfdef="false",
|
|
|
|
|
):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Expand fontinfo mapping"""
|
2018-08-20 13:47:49 +00:00
|
|
|
|
#
|
|
|
|
|
# fontlist: list of fontnames, each element
|
|
|
|
|
# may contain a ','-separated list of needed options
|
|
|
|
|
# like e.g. 'IBMPlexSansCondensed,condensed'
|
|
|
|
|
# font_type: one of 'roman', 'sans', 'typewriter', 'math'
|
|
|
|
|
# scale_type: one of None, 'sf', 'tt'
|
|
|
|
|
# pkg: package defining the font. Defaults to fontname if None
|
|
|
|
|
# scaleopt: one of None, 'scale', 'scaled', or some other string
|
|
|
|
|
# to be used in scale option (e.g. scaled=0.7)
|
2019-07-13 15:03:25 +00:00
|
|
|
|
# osfopt: None or some other string to be used in osf option
|
2019-07-15 11:29:09 +00:00
|
|
|
|
# osfdef: "true" if osf is default
|
2018-08-20 13:47:49 +00:00
|
|
|
|
for fl in font_list:
|
|
|
|
|
fe = fontinfo()
|
|
|
|
|
fe.fonttype = font_type
|
|
|
|
|
fe.scaletype = scale_type
|
|
|
|
|
flt = fl.split(",")
|
|
|
|
|
font_name = flt[0]
|
|
|
|
|
fe.fontname = font_name
|
|
|
|
|
fe.options = flt[1:]
|
|
|
|
|
fe.scaleopt = scaleopt
|
2019-07-13 15:03:25 +00:00
|
|
|
|
fe.osfopt = osfopt
|
2019-07-15 11:29:09 +00:00
|
|
|
|
fe.osfdef = osfdef
|
2018-08-20 13:47:49 +00:00
|
|
|
|
if pkg == None:
|
|
|
|
|
fe.package = font_name
|
|
|
|
|
else:
|
|
|
|
|
fe.package = pkg
|
|
|
|
|
fe.addkey()
|
|
|
|
|
self.font2pkgmap[font_name] = fe
|
|
|
|
|
if fe.pkgkey in self.pkg2fontmap:
|
|
|
|
|
# Repeated the same entry? Check content
|
|
|
|
|
if self.pkg2fontmap[fe.pkgkey] != font_name:
|
|
|
|
|
document.error("Something is wrong in pkgname+options <-> fontname mapping")
|
|
|
|
|
self.pkg2fontmap[fe.pkgkey] = font_name
|
|
|
|
|
self.pkginmap[fe.package] = 1
|
|
|
|
|
|
|
|
|
|
def getfontname(self, pkg, options):
|
2018-08-18 14:18:03 +00:00
|
|
|
|
options.sort()
|
|
|
|
|
pkgkey = createkey(pkg, options)
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if pkgkey not in self.pkg2fontmap:
|
2018-08-18 14:18:03 +00:00
|
|
|
|
return None
|
2018-08-20 13:47:49 +00:00
|
|
|
|
fontname = self.pkg2fontmap[pkgkey]
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if fontname not in self.font2pkgmap:
|
2018-08-20 13:47:49 +00:00
|
|
|
|
document.error("Something is wrong in pkgname+options <-> fontname mapping")
|
2018-08-18 14:18:03 +00:00
|
|
|
|
return None
|
2018-08-20 13:47:49 +00:00
|
|
|
|
if pkgkey == self.font2pkgmap[fontname].pkgkey:
|
2018-08-18 14:18:03 +00:00
|
|
|
|
return fontname
|
|
|
|
|
return None
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-10-02 15:24:47 +00:00
|
|
|
|
def createFontMapping(fontlist):
|
2018-08-20 13:47:49 +00:00
|
|
|
|
# Create info for known fonts for the use in
|
|
|
|
|
# convert_latexFonts() and
|
|
|
|
|
# revert_latexFonts()
|
|
|
|
|
#
|
|
|
|
|
# * Would be more handy to parse latexFonts file,
|
|
|
|
|
# but the path to this file is unknown
|
|
|
|
|
# * For now, add DejaVu and IBMPlex only.
|
|
|
|
|
# * Expand, if desired
|
|
|
|
|
fm = fontmapping()
|
2018-10-02 15:24:47 +00:00
|
|
|
|
for font in fontlist:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if font == "DejaVu":
|
|
|
|
|
fm.expandFontMapping(["DejaVuSerif", "DejaVuSerifCondensed"], "roman", None, None)
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
["DejaVuSans", "DejaVuSansCondensed"], "sans", "sf", None, "scaled"
|
|
|
|
|
)
|
|
|
|
|
fm.expandFontMapping(["DejaVuSansMono"], "typewriter", "tt", None, "scaled")
|
|
|
|
|
elif font == "IBM":
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
[
|
|
|
|
|
"IBMPlexSerif",
|
|
|
|
|
"IBMPlexSerifThin,thin",
|
|
|
|
|
"IBMPlexSerifExtraLight,extralight",
|
|
|
|
|
"IBMPlexSerifLight,light",
|
|
|
|
|
"IBMPlexSerifSemibold,semibold",
|
|
|
|
|
],
|
|
|
|
|
"roman",
|
|
|
|
|
None,
|
|
|
|
|
"plex-serif",
|
|
|
|
|
)
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
[
|
|
|
|
|
"IBMPlexSans",
|
|
|
|
|
"IBMPlexSansCondensed,condensed",
|
|
|
|
|
"IBMPlexSansThin,thin",
|
|
|
|
|
"IBMPlexSansExtraLight,extralight",
|
|
|
|
|
"IBMPlexSansLight,light",
|
|
|
|
|
"IBMPlexSansSemibold,semibold",
|
|
|
|
|
],
|
|
|
|
|
"sans",
|
|
|
|
|
"sf",
|
|
|
|
|
"plex-sans",
|
|
|
|
|
"scale",
|
|
|
|
|
)
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
[
|
|
|
|
|
"IBMPlexMono",
|
|
|
|
|
"IBMPlexMonoThin,thin",
|
|
|
|
|
"IBMPlexMonoExtraLight,extralight",
|
|
|
|
|
"IBMPlexMonoLight,light",
|
|
|
|
|
"IBMPlexMonoSemibold,semibold",
|
|
|
|
|
],
|
|
|
|
|
"typewriter",
|
|
|
|
|
"tt",
|
|
|
|
|
"plex-mono",
|
|
|
|
|
"scale",
|
|
|
|
|
)
|
|
|
|
|
elif font == "Adobe":
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
["ADOBESourceSerifPro"], "roman", None, "sourceserifpro", None, "osf"
|
|
|
|
|
)
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
["ADOBESourceSansPro"], "sans", "sf", "sourcesanspro", "scaled", "osf"
|
|
|
|
|
)
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
["ADOBESourceCodePro"],
|
|
|
|
|
"typewriter",
|
|
|
|
|
"tt",
|
|
|
|
|
"sourcecodepro",
|
|
|
|
|
"scaled",
|
|
|
|
|
"osf",
|
|
|
|
|
)
|
|
|
|
|
elif font == "Noto":
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
[
|
|
|
|
|
"NotoSerifRegular,regular",
|
|
|
|
|
"NotoSerifMedium,medium",
|
|
|
|
|
"NotoSerifThin,thin",
|
|
|
|
|
"NotoSerifLight,light",
|
|
|
|
|
"NotoSerifExtralight,extralight",
|
|
|
|
|
],
|
|
|
|
|
"roman",
|
|
|
|
|
None,
|
|
|
|
|
"noto-serif",
|
|
|
|
|
None,
|
|
|
|
|
"osf",
|
|
|
|
|
)
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
[
|
|
|
|
|
"NotoSansRegular,regular",
|
|
|
|
|
"NotoSansMedium,medium",
|
|
|
|
|
"NotoSansThin,thin",
|
|
|
|
|
"NotoSansLight,light",
|
|
|
|
|
"NotoSansExtralight,extralight",
|
|
|
|
|
],
|
|
|
|
|
"sans",
|
|
|
|
|
"sf",
|
|
|
|
|
"noto-sans",
|
|
|
|
|
"scaled",
|
|
|
|
|
)
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
["NotoMonoRegular,regular"], "typewriter", "tt", "noto-mono", "scaled"
|
|
|
|
|
)
|
|
|
|
|
elif font == "Cantarell":
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
["cantarell,defaultsans"],
|
|
|
|
|
"sans",
|
|
|
|
|
"sf",
|
|
|
|
|
"cantarell",
|
|
|
|
|
"scaled",
|
|
|
|
|
"oldstyle",
|
|
|
|
|
)
|
|
|
|
|
elif font == "Chivo":
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
[
|
|
|
|
|
"ChivoThin,thin",
|
|
|
|
|
"ChivoLight,light",
|
|
|
|
|
"Chivo,regular",
|
|
|
|
|
"ChivoMedium,medium",
|
|
|
|
|
],
|
|
|
|
|
"sans",
|
|
|
|
|
"sf",
|
|
|
|
|
"Chivo",
|
|
|
|
|
"scale",
|
|
|
|
|
"oldstyle",
|
|
|
|
|
)
|
|
|
|
|
elif font == "CrimsonPro":
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
[
|
|
|
|
|
"CrimsonPro",
|
|
|
|
|
"CrimsonProExtraLight,extralight",
|
|
|
|
|
"CrimsonProLight,light",
|
|
|
|
|
"CrimsonProMedium,medium",
|
|
|
|
|
],
|
|
|
|
|
"roman",
|
|
|
|
|
None,
|
|
|
|
|
"CrimsonPro",
|
|
|
|
|
None,
|
|
|
|
|
"lf",
|
|
|
|
|
"true",
|
|
|
|
|
)
|
|
|
|
|
elif font == "Fira":
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
[
|
|
|
|
|
"FiraSans",
|
|
|
|
|
"FiraSansBook,book",
|
|
|
|
|
"FiraSansThin,thin",
|
|
|
|
|
"FiraSansLight,light",
|
|
|
|
|
"FiraSansExtralight,extralight",
|
|
|
|
|
"FiraSansUltralight,ultralight",
|
|
|
|
|
],
|
|
|
|
|
"sans",
|
|
|
|
|
"sf",
|
|
|
|
|
"FiraSans",
|
|
|
|
|
"scaled",
|
|
|
|
|
"lf",
|
|
|
|
|
"true",
|
|
|
|
|
)
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
["FiraMono"], "typewriter", "tt", "FiraMono", "scaled", "lf", "true"
|
|
|
|
|
)
|
|
|
|
|
elif font == "libertinus":
|
|
|
|
|
fm.expandFontMapping(["libertinus,serif"], "roman", None, "libertinus", None, "osf")
|
|
|
|
|
fm.expandFontMapping(
|
|
|
|
|
["libertinusmath"], "math", None, "libertinust1math", None, None
|
|
|
|
|
)
|
2018-08-20 13:47:49 +00:00
|
|
|
|
return fm
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
|
|
|
|
def convert_fonts(document, fm, osfoption="osf"):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Handle font definition (LaTeX preamble -> native)"""
|
2024-06-15 09:06:06 +00:00
|
|
|
|
rpkg = re.compile(r"^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}")
|
|
|
|
|
rscaleopt = re.compile(r"^scaled?=(.*)")
|
2018-08-20 13:47:49 +00:00
|
|
|
|
|
2019-07-15 08:34:19 +00:00
|
|
|
|
# Check whether we go beyond font option feature introduction
|
|
|
|
|
haveFontOpts = document.end_format > 580
|
|
|
|
|
|
2018-08-11 11:04:57 +00:00
|
|
|
|
i = 0
|
2020-03-03 22:44:08 +00:00
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_re(document.preamble, rpkg, i + 1)
|
2018-08-11 09:45:02 +00:00
|
|
|
|
if i == -1:
|
2018-08-11 11:04:57 +00:00
|
|
|
|
return
|
2018-08-11 09:45:02 +00:00
|
|
|
|
mo = rpkg.search(document.preamble[i])
|
2018-08-18 14:18:03 +00:00
|
|
|
|
if mo == None or mo.group(2) == None:
|
|
|
|
|
options = []
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
options = mo.group(2).replace(" ", "").split(",")
|
2018-08-11 09:45:02 +00:00
|
|
|
|
pkg = mo.group(3)
|
2018-08-18 14:18:03 +00:00
|
|
|
|
o = 0
|
|
|
|
|
oscale = 1
|
2019-07-13 15:03:25 +00:00
|
|
|
|
has_osf = False
|
2018-08-18 14:18:03 +00:00
|
|
|
|
while o < len(options):
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if options[o] == osfoption:
|
|
|
|
|
has_osf = True
|
|
|
|
|
del options[o]
|
|
|
|
|
continue
|
2018-08-18 14:18:03 +00:00
|
|
|
|
mo = rscaleopt.search(options[o])
|
|
|
|
|
if mo == None:
|
|
|
|
|
o += 1
|
|
|
|
|
continue
|
|
|
|
|
oscale = mo.group(1)
|
|
|
|
|
del options[o]
|
2019-07-13 15:03:25 +00:00
|
|
|
|
continue
|
2018-08-18 14:18:03 +00:00
|
|
|
|
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if pkg not in fm.pkginmap:
|
2018-08-18 14:18:03 +00:00
|
|
|
|
continue
|
|
|
|
|
# determine fontname
|
2019-07-15 08:34:19 +00:00
|
|
|
|
fn = None
|
|
|
|
|
if haveFontOpts:
|
|
|
|
|
# Try with name-option combination first
|
|
|
|
|
# (only one default option supported currently)
|
|
|
|
|
o = 0
|
|
|
|
|
while o < len(options):
|
|
|
|
|
opt = options[o]
|
|
|
|
|
fn = fm.getfontname(pkg, [opt])
|
|
|
|
|
if fn != None:
|
|
|
|
|
del options[o]
|
|
|
|
|
break
|
|
|
|
|
o += 1
|
|
|
|
|
continue
|
|
|
|
|
if fn == None:
|
|
|
|
|
fn = fm.getfontname(pkg, [])
|
|
|
|
|
else:
|
|
|
|
|
fn = fm.getfontname(pkg, options)
|
2018-08-18 14:18:03 +00:00
|
|
|
|
if fn == None:
|
2018-08-11 09:45:02 +00:00
|
|
|
|
continue
|
|
|
|
|
del document.preamble[i]
|
2018-08-20 13:47:49 +00:00
|
|
|
|
fontinfo = fm.font2pkgmap[fn]
|
|
|
|
|
if fontinfo.scaletype == None:
|
|
|
|
|
fontscale = None
|
|
|
|
|
else:
|
|
|
|
|
fontscale = "\\font_" + fontinfo.scaletype + "_scale"
|
|
|
|
|
fontinfo.scaleval = oscale
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (has_osf and fontinfo.osfdef == "false") or (
|
|
|
|
|
not has_osf and fontinfo.osfdef == "true"
|
|
|
|
|
):
|
2019-07-15 05:11:43 +00:00
|
|
|
|
if fontinfo.osfopt == None:
|
|
|
|
|
options.extend(osfoption)
|
|
|
|
|
continue
|
|
|
|
|
osf = find_token(document.header, "\\font_osf false")
|
|
|
|
|
osftag = "\\font_osf"
|
|
|
|
|
if osf == -1 and fontinfo.fonttype != "math":
|
|
|
|
|
# Try with newer format
|
|
|
|
|
osftag = "\\font_" + fontinfo.fonttype + "_osf"
|
|
|
|
|
osf = find_token(document.header, osftag + " false")
|
|
|
|
|
if osf != -1:
|
|
|
|
|
document.header[osf] = osftag + " true"
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if i > 0 and document.preamble[i - 1] == "% Added by lyx2lyx":
|
|
|
|
|
del document.preamble[i - 1]
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i -= 1
|
2018-08-11 09:45:02 +00:00
|
|
|
|
if fontscale != None:
|
|
|
|
|
j = find_token(document.header, fontscale, 0)
|
|
|
|
|
if j != -1:
|
|
|
|
|
val = get_value(document.header, fontscale, j)
|
|
|
|
|
vals = val.split()
|
|
|
|
|
scale = "100"
|
2018-08-18 14:18:03 +00:00
|
|
|
|
if oscale != None:
|
|
|
|
|
scale = "%03d" % int(float(oscale) * 100)
|
2018-08-11 09:45:02 +00:00
|
|
|
|
document.header[j] = fontscale + " " + scale + " " + vals[1]
|
2018-08-20 13:47:49 +00:00
|
|
|
|
ft = "\\font_" + fontinfo.fonttype
|
2018-08-11 09:45:02 +00:00
|
|
|
|
j = find_token(document.header, ft, 0)
|
|
|
|
|
if j != -1:
|
|
|
|
|
val = get_value(document.header, ft, j)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
words = val.split() # ! splits also values like '"DejaVu Sans"'
|
2019-01-29 11:35:12 +00:00
|
|
|
|
words[0] = '"' + fn + '"'
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[j] = ft + " " + " ".join(words)
|
2019-07-15 08:34:19 +00:00
|
|
|
|
if haveFontOpts and fontinfo.fonttype != "math":
|
|
|
|
|
fotag = "\\font_" + fontinfo.fonttype + "_opts"
|
|
|
|
|
fo = find_token(document.header, fotag)
|
|
|
|
|
if fo != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[fo] = fotag + ' "' + ",".join(options) + '"'
|
2019-07-15 08:34:19 +00:00
|
|
|
|
else:
|
|
|
|
|
# Sensible place to insert tag
|
|
|
|
|
fo = find_token(document.header, "\\font_sf_scale")
|
|
|
|
|
if fo == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\font_sf_scale")
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header.insert(fo, fotag + ' "' + ",".join(options) + '"')
|
2019-07-15 08:34:19 +00:00
|
|
|
|
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
def revert_fonts(document, fm, fontmap, OnlyWithXOpts=False, WithXOpts=False):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert native font definition to LaTeX"""
|
2018-08-18 14:18:03 +00:00
|
|
|
|
# fonlist := list of fonts created from the same package
|
|
|
|
|
# Empty package means that the font-name is the same as the package-name
|
|
|
|
|
# fontmap (key = package, val += found options) will be filled
|
|
|
|
|
# and used later in add_preamble_fonts() to be added to user-preamble
|
2018-08-10 17:38:06 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
rfontscale = re.compile(r"^\s*(\\font_(roman|sans|typewriter|math))\s+")
|
|
|
|
|
rscales = re.compile(r"^\s*(\d+)\s+(\d+)")
|
2018-08-20 13:47:49 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while i < len(document.header):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_re(document.header, rfontscale, i + 1)
|
|
|
|
|
if i == -1:
|
2019-07-13 15:03:25 +00:00
|
|
|
|
return True
|
2018-08-20 13:47:49 +00:00
|
|
|
|
mo = rfontscale.search(document.header[i])
|
|
|
|
|
if mo == None:
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
|
2018-08-20 13:47:49 +00:00
|
|
|
|
val = get_value(document.header, ft, i)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
words = val.split(" ") # ! splits also values like '"DejaVu Sans"'
|
|
|
|
|
font = words[0].strip('"') # TeX font name has no whitespace
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if font not in fm.font2pkgmap:
|
2018-08-20 13:47:49 +00:00
|
|
|
|
continue
|
|
|
|
|
fontinfo = fm.font2pkgmap[font]
|
|
|
|
|
val = fontinfo.package
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if val not in fontmap:
|
2018-08-20 13:47:49 +00:00
|
|
|
|
fontmap[val] = []
|
2019-07-13 12:55:59 +00:00
|
|
|
|
x = -1
|
2019-07-15 04:34:28 +00:00
|
|
|
|
if OnlyWithXOpts or WithXOpts:
|
2019-07-13 12:55:59 +00:00
|
|
|
|
if ft == "\\font_math":
|
2019-07-13 15:03:25 +00:00
|
|
|
|
return False
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"^\s*(\\font_roman_opts)\s+")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
if ft == "\\font_sans":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"^\s*(\\font_sans_opts)\s+")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
elif ft == "\\font_typewriter":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"^\s*(\\font_typewriter_opts)\s+")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
x = find_re(document.header, regexp, 0)
|
2019-07-15 04:34:28 +00:00
|
|
|
|
if x == -1 and OnlyWithXOpts:
|
2019-07-13 15:03:25 +00:00
|
|
|
|
return False
|
2019-07-13 12:55:59 +00:00
|
|
|
|
|
2019-07-15 04:34:28 +00:00
|
|
|
|
if x != -1:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
|
|
|
|
opts = xopts[1].strip('"').split(",")
|
|
|
|
|
fontmap[val].extend(opts)
|
|
|
|
|
del document.header[x]
|
2019-01-29 12:21:05 +00:00
|
|
|
|
words[0] = '"default"'
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[i] = ft + " " + " ".join(words)
|
2018-08-20 13:47:49 +00:00
|
|
|
|
if fontinfo.scaleopt != None:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
|
2018-08-20 13:47:49 +00:00
|
|
|
|
mo = rscales.search(xval)
|
|
|
|
|
if mo != None:
|
|
|
|
|
xval1 = mo.group(1)
|
|
|
|
|
if xval1 != "100":
|
|
|
|
|
# set correct scale option
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fontmap[val].extend(
|
|
|
|
|
[fontinfo.scaleopt + "=" + format(float(xval1) / 100, ".2f")]
|
|
|
|
|
)
|
2019-07-14 07:09:46 +00:00
|
|
|
|
if fontinfo.osfopt != None:
|
2019-07-15 11:29:09 +00:00
|
|
|
|
oldval = "true"
|
|
|
|
|
if fontinfo.osfdef == "true":
|
|
|
|
|
oldval = "false"
|
|
|
|
|
osf = find_token(document.header, "\\font_osf " + oldval)
|
2019-07-15 04:34:28 +00:00
|
|
|
|
if osf == -1 and ft != "\\font_math":
|
|
|
|
|
# Try with newer format
|
2019-07-15 11:29:09 +00:00
|
|
|
|
osftag = "\\font_roman_osf " + oldval
|
2019-07-15 04:34:28 +00:00
|
|
|
|
if ft == "\\font_sans":
|
2019-07-15 11:29:09 +00:00
|
|
|
|
osftag = "\\font_sans_osf " + oldval
|
2019-07-15 04:34:28 +00:00
|
|
|
|
elif ft == "\\font_typewriter":
|
2019-07-15 11:29:09 +00:00
|
|
|
|
osftag = "\\font_typewriter_osf " + oldval
|
2019-07-15 04:34:28 +00:00
|
|
|
|
osf = find_token(document.header, osftag)
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if osf != -1:
|
|
|
|
|
fontmap[val].extend([fontinfo.osfopt])
|
2018-08-20 13:47:49 +00:00
|
|
|
|
if len(fontinfo.options) > 0:
|
|
|
|
|
fontmap[val].extend(fontinfo.options)
|
2019-07-13 15:03:25 +00:00
|
|
|
|
return True
|
2018-08-10 17:38:06 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-08-11 09:45:02 +00:00
|
|
|
|
###############################################################################
|
|
|
|
|
###
|
|
|
|
|
### Conversion and reversion routines
|
|
|
|
|
###
|
|
|
|
|
###############################################################################
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-04-16 18:17:32 +00:00
|
|
|
|
def convert_inputencoding_namechange(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Rename inputencoding settings."""
|
2019-04-16 18:17:32 +00:00
|
|
|
|
i = find_token(document.header, "\\inputencoding", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
s = document.header[i].replace("auto", "auto-legacy")
|
|
|
|
|
document.header[i] = s.replace("default", "auto-legacy-plain")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-04-16 18:17:32 +00:00
|
|
|
|
def revert_inputencoding_namechange(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Rename inputencoding settings."""
|
2019-04-16 18:17:32 +00:00
|
|
|
|
i = find_token(document.header, "\\inputencoding", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
s = document.header[i].replace("auto-legacy-plain", "default")
|
|
|
|
|
document.header[i] = s.replace("auto-legacy", "auto")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-04-07 09:05:42 +00:00
|
|
|
|
def convert_notoFonts(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Handle Noto fonts definition to LaTeX"""
|
2019-04-07 09:05:42 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Noto"])
|
2019-04-07 09:05:42 +00:00
|
|
|
|
convert_fonts(document, fm)
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-04-07 09:05:42 +00:00
|
|
|
|
def revert_notoFonts(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert native Noto font definition to LaTeX"""
|
2019-04-07 09:05:42 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-04-07 09:05:42 +00:00
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Noto"])
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
2019-04-07 09:05:42 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-08-20 13:47:49 +00:00
|
|
|
|
def convert_latexFonts(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Handle DejaVu and IBMPlex fonts definition to LaTeX"""
|
2018-08-18 14:18:03 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["DejaVu", "IBM"])
|
2018-08-20 13:47:49 +00:00
|
|
|
|
convert_fonts(document, fm)
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-08-20 13:47:49 +00:00
|
|
|
|
def revert_latexFonts(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert native DejaVu font definition to LaTeX"""
|
2018-08-11 09:45:02 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2018-08-20 13:47:49 +00:00
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["DejaVu", "IBM"])
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
2018-10-02 15:24:47 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-10-02 15:24:47 +00:00
|
|
|
|
def convert_AdobeFonts(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Handle Adobe Source fonts definition to LaTeX"""
|
2018-10-02 15:24:47 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Adobe"])
|
2018-10-02 15:24:47 +00:00
|
|
|
|
convert_fonts(document, fm)
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-10-02 15:24:47 +00:00
|
|
|
|
def revert_AdobeFonts(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert Adobe Source font definition to LaTeX"""
|
2018-10-02 15:24:47 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2018-10-02 15:24:47 +00:00
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Adobe"])
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
2018-08-11 09:45:02 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-07-30 11:44:01 +00:00
|
|
|
|
def removeFrontMatterStyles(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Remove styles Begin/EndFrontmatter"""
|
2018-07-30 11:44:01 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
layouts = ["BeginFrontmatter", "EndFrontmatter"]
|
|
|
|
|
tokenend = len("\\begin_layout ")
|
2019-07-04 18:49:06 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token_exact(document.body, "\\begin_layout ", i + 1)
|
2019-07-04 18:49:06 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
layout = document.body[i][tokenend:].strip()
|
|
|
|
|
if layout not in layouts:
|
|
|
|
|
continue
|
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while document.body[j + 1].strip() == "":
|
2019-07-04 18:49:06 +00:00
|
|
|
|
j += 1
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i : j + 1] = []
|
|
|
|
|
|
2018-07-30 11:44:01 +00:00
|
|
|
|
|
|
|
|
|
def addFrontMatterStyles(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Use styles Begin/EndFrontmatter for elsarticle"""
|
2018-07-30 11:44:01 +00:00
|
|
|
|
|
2019-07-04 18:49:06 +00:00
|
|
|
|
if document.textclass != "elsarticle":
|
|
|
|
|
return
|
|
|
|
|
|
2018-07-30 11:44:01 +00:00
|
|
|
|
def insertFrontmatter(prefix, line):
|
2018-07-31 11:44:48 +00:00
|
|
|
|
above = line
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while above > 0 and document.body[above - 1].strip() == "":
|
2018-07-31 11:44:48 +00:00
|
|
|
|
above -= 1
|
|
|
|
|
below = line
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while document.body[below].strip() == "":
|
2018-07-31 11:44:48 +00:00
|
|
|
|
below += 1
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[above:below] = [
|
|
|
|
|
"",
|
|
|
|
|
"\\begin_layout " + prefix + "Frontmatter",
|
|
|
|
|
"\\begin_inset Note Note",
|
|
|
|
|
"status open",
|
|
|
|
|
"",
|
|
|
|
|
"\\begin_layout Plain Layout",
|
|
|
|
|
"Keep this empty!",
|
|
|
|
|
"\\end_layout",
|
|
|
|
|
"",
|
|
|
|
|
"\\end_inset",
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
"\\end_layout",
|
|
|
|
|
"",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
layouts = [
|
|
|
|
|
"Title",
|
|
|
|
|
"Title footnote",
|
|
|
|
|
"Author",
|
|
|
|
|
"Author footnote",
|
|
|
|
|
"Corresponding author",
|
|
|
|
|
"Address",
|
|
|
|
|
"Email",
|
|
|
|
|
"Abstract",
|
|
|
|
|
"Keywords",
|
|
|
|
|
]
|
|
|
|
|
tokenend = len("\\begin_layout ")
|
2019-07-04 18:49:06 +00:00
|
|
|
|
first = -1
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token_exact(document.body, "\\begin_layout ", i + 1)
|
2019-07-04 18:49:06 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
layout = document.body[i][tokenend:].strip()
|
|
|
|
|
if layout not in layouts:
|
|
|
|
|
continue
|
|
|
|
|
k = find_end_of_layout(document.body, i)
|
|
|
|
|
if k == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
|
|
|
|
|
continue
|
2018-07-30 11:44:01 +00:00
|
|
|
|
if first == -1:
|
2019-07-04 18:49:06 +00:00
|
|
|
|
first = i
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = k
|
2019-07-04 18:49:06 +00:00
|
|
|
|
if first == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
insertFrontmatter("End", k + 1)
|
|
|
|
|
insertFrontmatter("Begin", first)
|
2019-07-04 18:49:06 +00:00
|
|
|
|
|
2018-07-30 11:44:01 +00:00
|
|
|
|
|
2018-02-23 07:58:16 +00:00
|
|
|
|
def convert_lst_literalparam(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Add param literal to include inset"""
|
2018-02-23 07:58:16 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i + 1)
|
2018-02-23 07:58:16 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of command inset at line %d" % i
|
|
|
|
|
)
|
2018-02-23 07:58:16 +00:00
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while i < j and document.body[i].strip() != "":
|
2018-02-23 07:58:16 +00:00
|
|
|
|
i += 1
|
2019-07-04 18:49:06 +00:00
|
|
|
|
document.body.insert(i, 'literal "true"')
|
2018-02-23 07:58:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_lst_literalparam(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Remove param literal from include inset"""
|
2018-02-23 07:58:16 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i + 1)
|
2018-02-23 07:58:16 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of include inset at line %d" % i
|
|
|
|
|
)
|
2018-02-23 07:58:16 +00:00
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del_token(document.body, "literal", i, j)
|
2018-02-23 07:58:16 +00:00
|
|
|
|
|
|
|
|
|
|
2018-03-10 18:59:48 +00:00
|
|
|
|
def revert_paratype(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert ParaType font definitions to LaTeX"""
|
2018-03-10 18:59:48 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i1 = find_token(document.header, '\\font_roman "PTSerif-TLF"', 0)
|
|
|
|
|
i2 = find_token(document.header, '\\font_sans "default"', 0)
|
|
|
|
|
i3 = find_token(document.header, '\\font_typewriter "default"', 0)
|
|
|
|
|
j = find_token(document.header, '\\font_sans "PTSans-TLF"', 0)
|
2019-07-14 13:08:01 +00:00
|
|
|
|
|
|
|
|
|
sf_scale = 100.0
|
|
|
|
|
sfval = find_token(document.header, "\\font_sf_scale", 0)
|
|
|
|
|
if sfval == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
|
|
|
|
|
else:
|
|
|
|
|
sfscale = document.header[sfval].split()
|
|
|
|
|
val = sfscale[1]
|
|
|
|
|
sfscale[1] = "100"
|
|
|
|
|
document.header[sfval] = " ".join(sfscale)
|
|
|
|
|
try:
|
|
|
|
|
# float() can throw
|
|
|
|
|
sf_scale = float(val)
|
|
|
|
|
except:
|
|
|
|
|
document.warning("Invalid font_sf_scale value: " + val)
|
|
|
|
|
|
2018-03-10 18:59:48 +00:00
|
|
|
|
sfoption = ""
|
2019-07-14 13:08:01 +00:00
|
|
|
|
if sf_scale != "100.0":
|
|
|
|
|
sfoption = "scaled=" + str(sf_scale / 100.0)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.header, '\\font_typewriter "PTMono-TLF"', 0)
|
2018-03-10 18:59:48 +00:00
|
|
|
|
ttval = get_value(document.header, "\\font_tt_scale", 0)
|
|
|
|
|
# cutoff " 100"
|
|
|
|
|
ttval = ttval[:-4]
|
|
|
|
|
ttoption = ""
|
|
|
|
|
if ttval != "100":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
ttoption = "scaled=" + format(float(ttval) / 100, ".2f")
|
|
|
|
|
if i1 != -1 and i2 != -1 and i3 != -1:
|
2018-03-10 18:59:48 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage{paratype}"])
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if i1 != -1:
|
2018-03-10 18:59:48 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage{PTSerif}"])
|
2018-03-16 08:02:29 +00:00
|
|
|
|
document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if j != -1:
|
2018-03-10 18:59:48 +00:00
|
|
|
|
if sfoption != "":
|
2018-07-01 17:18:38 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
|
2018-03-10 18:59:48 +00:00
|
|
|
|
else:
|
2018-07-01 17:18:38 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage{PTSans}"])
|
2018-03-16 08:02:29 +00:00
|
|
|
|
document.header[j] = document.header[j].replace("PTSans-TLF", "default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if k != -1:
|
2018-03-10 18:59:48 +00:00
|
|
|
|
if ttoption != "":
|
2018-07-01 17:18:38 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
|
2018-03-10 18:59:48 +00:00
|
|
|
|
else:
|
2018-07-31 11:44:48 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage{PTMono}"])
|
2018-03-16 08:02:29 +00:00
|
|
|
|
document.header[k] = document.header[k].replace("PTMono-TLF", "default")
|
2018-03-10 18:59:48 +00:00
|
|
|
|
|
2018-03-15 14:44:49 +00:00
|
|
|
|
|
|
|
|
|
def revert_xcharter(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert XCharter font definitions to LaTeX"""
|
2018-03-15 14:44:49 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, '\\font_roman "xcharter"', 0)
|
2018-03-16 08:02:29 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# replace unsupported font setting
|
|
|
|
|
document.header[i] = document.header[i].replace("xcharter", "default")
|
|
|
|
|
# no need for preamble code with system fonts
|
|
|
|
|
if get_bool_value(document.header, "\\use_non_tex_fonts"):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# transfer old style figures setting to package options
|
|
|
|
|
j = find_token(document.header, "\\font_osf true")
|
|
|
|
|
if j != -1:
|
|
|
|
|
options = "[osf]"
|
|
|
|
|
document.header[j] = "\\font_osf false"
|
|
|
|
|
else:
|
|
|
|
|
options = ""
|
|
|
|
|
if i != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage%s{XCharter}" % options])
|
2018-03-15 14:44:49 +00:00
|
|
|
|
|
|
|
|
|
|
2018-04-18 12:36:49 +00:00
|
|
|
|
def revert_lscape(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Reverts the landscape environment (Landscape module) to TeX-code"""
|
2018-04-18 12:36:49 +00:00
|
|
|
|
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if "landscape" not in document.get_module_list():
|
2018-04-18 12:36:49 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex Landscape", i + 1)
|
2018-04-18 12:36:49 +00:00
|
|
|
|
if i == -1:
|
2019-08-13 05:29:07 +00:00
|
|
|
|
break
|
2018-04-18 12:36:49 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Landscape inset")
|
|
|
|
|
continue
|
|
|
|
|
|
2018-04-18 14:20:19 +00:00
|
|
|
|
if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
|
|
|
|
|
document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
|
|
|
|
|
document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{afterpage}"])
|
|
|
|
|
else:
|
|
|
|
|
document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
|
|
|
|
|
document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
|
|
|
|
|
|
2018-04-18 12:36:49 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage{pdflscape}"])
|
2019-08-13 05:29:07 +00:00
|
|
|
|
document.del_module("landscape")
|
2018-04-18 12:36:49 +00:00
|
|
|
|
|
|
|
|
|
|
2018-04-22 17:06:46 +00:00
|
|
|
|
def convert_fontenc(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Convert default fontenc setting"""
|
2018-04-22 17:06:46 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\fontencoding global", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
document.header[i] = document.header[i].replace("global", "auto")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_fontenc(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert default fontenc setting"""
|
2018-04-22 17:06:46 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\fontencoding auto", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
document.header[i] = document.header[i].replace("auto", "global")
|
|
|
|
|
|
|
|
|
|
|
2018-05-06 17:48:21 +00:00
|
|
|
|
def revert_nospellcheck(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Remove nospellcheck font info param"""
|
2018-05-06 17:48:21 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\nospellcheck", i)
|
2018-05-06 17:48:21 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
del document.body[i]
|
|
|
|
|
|
|
|
|
|
|
2018-05-10 18:15:11 +00:00
|
|
|
|
def revert_floatpclass(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Remove float placement params 'document' and 'class'"""
|
2018-05-10 18:15:11 +00:00
|
|
|
|
|
2019-07-07 21:31:12 +00:00
|
|
|
|
del_token(document.header, "\\float_placement class")
|
2018-05-10 18:15:11 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Float", i + 1)
|
2018-05-10 18:15:11 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, "placement class", i, j)
|
2018-05-10 18:15:11 +00:00
|
|
|
|
if k == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, "placement document", i, j)
|
2018-05-10 18:15:11 +00:00
|
|
|
|
if k != -1:
|
|
|
|
|
del document.body[k]
|
|
|
|
|
continue
|
|
|
|
|
del document.body[k]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_floatalignment(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Remove float alignment params"""
|
2018-05-10 18:15:11 +00:00
|
|
|
|
|
2019-07-07 21:31:12 +00:00
|
|
|
|
galignment = get_value(document.header, "\\float_alignment", delete=True)
|
2018-05-10 18:15:11 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Float", i + 1)
|
2018-05-10 18:15:11 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of inset at line " + str(i)
|
|
|
|
|
)
|
2019-07-07 21:31:12 +00:00
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, "alignment", i, j)
|
2018-05-10 18:15:11 +00:00
|
|
|
|
if k == -1:
|
|
|
|
|
i = j
|
|
|
|
|
continue
|
|
|
|
|
alignment = get_value(document.body, "alignment", k)
|
|
|
|
|
if alignment == "document":
|
|
|
|
|
alignment = galignment
|
|
|
|
|
del document.body[k]
|
|
|
|
|
l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
|
|
|
|
|
if l == -1:
|
|
|
|
|
document.warning("Can't find float layout!")
|
|
|
|
|
continue
|
|
|
|
|
alcmd = []
|
|
|
|
|
if alignment == "left":
|
|
|
|
|
alcmd = put_cmd_in_ert("\\raggedright{}")
|
|
|
|
|
elif alignment == "center":
|
|
|
|
|
alcmd = put_cmd_in_ert("\\centering{}")
|
|
|
|
|
elif alignment == "right":
|
|
|
|
|
alcmd = put_cmd_in_ert("\\raggedleft{}")
|
|
|
|
|
if len(alcmd) > 0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[l + 1 : l + 1] = alcmd
|
2020-06-14 19:33:06 +00:00
|
|
|
|
# There might be subfloats, so we do not want to move past
|
|
|
|
|
# the end of the inset.
|
|
|
|
|
i += 1
|
2018-05-10 18:15:11 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2018-05-21 11:43:16 +00:00
|
|
|
|
def revert_tuftecite(document):
|
2022-10-26 08:52:21 +00:00
|
|
|
|
r"""Revert \cite commands in tufte classes"""
|
2018-05-21 11:43:16 +00:00
|
|
|
|
|
|
|
|
|
tufte = ["tufte-book", "tufte-handout"]
|
|
|
|
|
if document.textclass not in tufte:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = 0
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i + 1)
|
2018-05-21 11:43:16 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Can't find end of citation inset at line %d!!" % (i))
|
2018-05-21 11:43:16 +00:00
|
|
|
|
continue
|
|
|
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
|
|
|
if k == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = j
|
2018-05-21 11:43:16 +00:00
|
|
|
|
continue
|
|
|
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
|
|
|
if cmd != "cite":
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = j
|
2018-05-21 11:43:16 +00:00
|
|
|
|
continue
|
|
|
|
|
pre = get_quoted_value(document.body, "before", i, j)
|
|
|
|
|
post = get_quoted_value(document.body, "after", i, j)
|
|
|
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
|
|
|
if not key:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Citation inset at line %d does not have a key!" % (i))
|
2018-05-21 11:43:16 +00:00
|
|
|
|
key = "???"
|
|
|
|
|
# Replace command with ERT
|
|
|
|
|
res = "\\cite"
|
|
|
|
|
if pre:
|
|
|
|
|
res += "[" + pre + "]"
|
|
|
|
|
if post:
|
|
|
|
|
res += "[" + post + "]"
|
|
|
|
|
elif pre:
|
|
|
|
|
res += "[]"
|
|
|
|
|
res += "{" + key + "}"
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i : j + 1] = put_cmd_in_ert([res])
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = j
|
2018-05-21 11:43:16 +00:00
|
|
|
|
|
|
|
|
|
|
2018-06-24 08:05:15 +00:00
|
|
|
|
def revert_stretchcolumn(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""We remove the column varwidth flags or everything else will become a mess."""
|
2018-06-24 08:05:15 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
|
2018-06-24 08:05:15 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2018-06-24 08:05:15 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of tabular.")
|
|
|
|
|
continue
|
|
|
|
|
for k in range(i, j):
|
|
|
|
|
if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
|
|
|
|
|
document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = document.body[k].replace(' varwidth="true"', "")
|
2018-06-24 08:05:15 +00:00
|
|
|
|
|
|
|
|
|
|
2018-07-01 17:18:38 +00:00
|
|
|
|
def revert_vcolumns(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert standard columns with line breaks etc."""
|
2018-07-01 17:18:38 +00:00
|
|
|
|
i = 0
|
|
|
|
|
needvarwidth = False
|
|
|
|
|
needarray = False
|
|
|
|
|
try:
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
|
2018-07-01 17:18:38 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of tabular.")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Collect necessary column information
|
|
|
|
|
m = i + 1
|
2024-06-15 09:06:06 +00:00
|
|
|
|
nrows = int(document.body[i + 1].split('"')[3])
|
|
|
|
|
ncols = int(document.body[i + 1].split('"')[5])
|
2018-07-01 17:18:38 +00:00
|
|
|
|
col_info = []
|
|
|
|
|
for k in range(ncols):
|
|
|
|
|
m = find_token(document.body, "<column", m)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
width = get_option_value(document.body[m], "width")
|
|
|
|
|
varwidth = get_option_value(document.body[m], "varwidth")
|
|
|
|
|
alignment = get_option_value(document.body[m], "alignment")
|
|
|
|
|
special = get_option_value(document.body[m], "special")
|
2018-07-01 17:18:38 +00:00
|
|
|
|
col_info.append([width, varwidth, alignment, special, m])
|
|
|
|
|
|
|
|
|
|
# Now parse cells
|
|
|
|
|
m = i + 1
|
|
|
|
|
lines = []
|
|
|
|
|
for row in range(nrows):
|
|
|
|
|
for col in range(ncols):
|
|
|
|
|
m = find_token(document.body, "<cell", m)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
multicolumn = get_option_value(document.body[m], "multicolumn")
|
|
|
|
|
multirow = get_option_value(document.body[m], "multirow")
|
|
|
|
|
width = get_option_value(document.body[m], "width")
|
|
|
|
|
rotate = get_option_value(document.body[m], "rotate")
|
2018-07-01 17:18:38 +00:00
|
|
|
|
# Check for: linebreaks, multipars, non-standard environments
|
|
|
|
|
begcell = m
|
|
|
|
|
endcell = find_token(document.body, "</cell>", begcell)
|
|
|
|
|
vcand = False
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
find_token(document.body, "\\begin_inset Newline", begcell, endcell)
|
|
|
|
|
!= -1
|
|
|
|
|
):
|
2018-07-01 17:18:38 +00:00
|
|
|
|
vcand = True
|
|
|
|
|
elif count_pars_in_inset(document.body, begcell + 2) > 1:
|
|
|
|
|
vcand = True
|
|
|
|
|
elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
|
|
|
|
|
vcand = True
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
vcand
|
|
|
|
|
and rotate == ""
|
|
|
|
|
and ((multicolumn == "" and multirow == "") or width == "")
|
|
|
|
|
):
|
|
|
|
|
if (
|
|
|
|
|
col_info[col][0] == ""
|
|
|
|
|
and col_info[col][1] == ""
|
|
|
|
|
and col_info[col][3] == ""
|
|
|
|
|
):
|
2018-07-01 17:18:38 +00:00
|
|
|
|
needvarwidth = True
|
|
|
|
|
alignment = col_info[col][2]
|
|
|
|
|
col_line = col_info[col][4]
|
|
|
|
|
vval = ""
|
|
|
|
|
if alignment == "center":
|
|
|
|
|
vval = ">{\\centering}"
|
2024-06-15 09:06:06 +00:00
|
|
|
|
elif alignment == "left":
|
2018-07-01 17:18:38 +00:00
|
|
|
|
vval = ">{\\raggedright}"
|
|
|
|
|
elif alignment == "right":
|
|
|
|
|
vval = ">{\\raggedleft}"
|
|
|
|
|
if vval != "":
|
|
|
|
|
needarray = True
|
|
|
|
|
vval += "V{\\linewidth}"
|
2019-01-29 11:35:12 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[col_line] = (
|
|
|
|
|
document.body[col_line][:-1] + ' special="' + vval + '">'
|
|
|
|
|
)
|
2018-07-01 17:18:38 +00:00
|
|
|
|
# ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
|
|
|
|
|
# with newlines, and we do not want that)
|
|
|
|
|
while True:
|
|
|
|
|
endcell = find_token(document.body, "</cell>", begcell)
|
|
|
|
|
linebreak = False
|
2024-06-15 09:06:06 +00:00
|
|
|
|
nl = find_token(
|
|
|
|
|
document.body,
|
|
|
|
|
"\\begin_inset Newline newline",
|
|
|
|
|
begcell,
|
|
|
|
|
endcell,
|
|
|
|
|
)
|
2018-07-01 17:18:38 +00:00
|
|
|
|
if nl == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
nl = find_token(
|
|
|
|
|
document.body,
|
|
|
|
|
"\\begin_inset Newline linebreak",
|
|
|
|
|
begcell,
|
|
|
|
|
endcell,
|
|
|
|
|
)
|
2018-07-01 17:18:38 +00:00
|
|
|
|
if nl == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
break
|
2018-07-01 17:18:38 +00:00
|
|
|
|
linebreak = True
|
|
|
|
|
nle = find_end_of_inset(document.body, nl)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del document.body[nle : nle + 1]
|
2018-07-01 17:18:38 +00:00
|
|
|
|
if linebreak:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[nl : nl + 1] = put_cmd_in_ert("\\linebreak{}")
|
2018-07-01 17:18:38 +00:00
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[nl : nl + 1] = put_cmd_in_ert("\\\\")
|
2018-07-01 17:18:38 +00:00
|
|
|
|
m += 1
|
|
|
|
|
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = j
|
2018-07-01 17:18:38 +00:00
|
|
|
|
|
|
|
|
|
finally:
|
|
|
|
|
if needarray == True:
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{array}"])
|
|
|
|
|
if needvarwidth == True:
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{varwidth}"])
|
|
|
|
|
|
|
|
|
|
|
2018-07-07 13:25:35 +00:00
|
|
|
|
def revert_bibencoding(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert bibliography encoding"""
|
2018-07-07 13:25:35 +00:00
|
|
|
|
|
|
|
|
|
# Get cite engine
|
|
|
|
|
engine = "basic"
|
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
|
|
|
else:
|
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
|
|
|
|
|
|
# Check if biblatex
|
|
|
|
|
biblatex = False
|
|
|
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
|
|
|
biblatex = True
|
|
|
|
|
|
2019-01-29 11:35:12 +00:00
|
|
|
|
# Map lyx to latex encoding names
|
2018-07-07 13:25:35 +00:00
|
|
|
|
encodings = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"utf8": "utf8",
|
|
|
|
|
"utf8x": "utf8x",
|
|
|
|
|
"armscii8": "armscii8",
|
|
|
|
|
"iso8859-1": "latin1",
|
|
|
|
|
"iso8859-2": "latin2",
|
|
|
|
|
"iso8859-3": "latin3",
|
|
|
|
|
"iso8859-4": "latin4",
|
|
|
|
|
"iso8859-5": "iso88595",
|
|
|
|
|
"iso8859-6": "8859-6",
|
|
|
|
|
"iso8859-7": "iso-8859-7",
|
|
|
|
|
"iso8859-8": "8859-8",
|
|
|
|
|
"iso8859-9": "latin5",
|
|
|
|
|
"iso8859-13": "latin7",
|
|
|
|
|
"iso8859-15": "latin9",
|
|
|
|
|
"iso8859-16": "latin10",
|
|
|
|
|
"applemac": "applemac",
|
|
|
|
|
"cp437": "cp437",
|
|
|
|
|
"cp437de": "cp437de",
|
|
|
|
|
"cp850": "cp850",
|
|
|
|
|
"cp852": "cp852",
|
|
|
|
|
"cp855": "cp855",
|
|
|
|
|
"cp858": "cp858",
|
|
|
|
|
"cp862": "cp862",
|
|
|
|
|
"cp865": "cp865",
|
|
|
|
|
"cp866": "cp866",
|
|
|
|
|
"cp1250": "cp1250",
|
|
|
|
|
"cp1251": "cp1251",
|
|
|
|
|
"cp1252": "cp1252",
|
|
|
|
|
"cp1255": "cp1255",
|
|
|
|
|
"cp1256": "cp1256",
|
|
|
|
|
"cp1257": "cp1257",
|
|
|
|
|
"koi8-r": "koi8-r",
|
|
|
|
|
"koi8-u": "koi8-u",
|
|
|
|
|
"pt154": "pt154",
|
|
|
|
|
"utf8-platex": "utf8",
|
|
|
|
|
"ascii": "ascii",
|
2018-07-07 13:25:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i = 0
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i + 1)
|
2018-07-07 13:25:35 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Can't find end of bibtex inset at line %d!!" % (i))
|
2018-07-07 13:25:35 +00:00
|
|
|
|
continue
|
|
|
|
|
encoding = get_quoted_value(document.body, "encoding", i, j)
|
|
|
|
|
if not encoding:
|
|
|
|
|
continue
|
|
|
|
|
# remove encoding line
|
|
|
|
|
k = find_token(document.body, "encoding", i, j)
|
|
|
|
|
if k != -1:
|
|
|
|
|
del document.body[k]
|
2019-03-29 14:45:55 +00:00
|
|
|
|
if encoding == "default":
|
|
|
|
|
continue
|
2018-07-07 13:25:35 +00:00
|
|
|
|
# Re-find inset end line
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if biblatex:
|
|
|
|
|
biblio_options = ""
|
|
|
|
|
h = find_token(document.header, "\\biblio_options", 0)
|
|
|
|
|
if h != -1:
|
|
|
|
|
biblio_options = get_value(document.header, "\\biblio_options", h)
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if "bibencoding" not in biblio_options:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[h] += ",bibencoding=%s" % encodings[encoding]
|
2018-07-07 13:25:35 +00:00
|
|
|
|
else:
|
|
|
|
|
bs = find_token(document.header, "\\biblatex_bibstyle", 0)
|
|
|
|
|
if bs == -1:
|
|
|
|
|
# this should not happen
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document! No \\biblatex_bibstyle header found!"
|
|
|
|
|
)
|
2018-07-07 13:25:35 +00:00
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[bs - 1 : bs - 1] = [
|
|
|
|
|
"\\biblio_options bibencoding=" + encodings[encoding]
|
|
|
|
|
]
|
2018-07-07 13:25:35 +00:00
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[j + 1 : j + 1] = put_cmd_in_ert("\\egroup")
|
|
|
|
|
document.body[i:i] = put_cmd_in_ert(
|
|
|
|
|
"\\bgroup\\inputencoding{" + encodings[encoding] + "}"
|
|
|
|
|
)
|
2018-07-07 13:25:35 +00:00
|
|
|
|
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = j
|
2018-07-07 13:25:35 +00:00
|
|
|
|
|
|
|
|
|
|
2018-07-29 17:41:34 +00:00
|
|
|
|
def convert_vcsinfo(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Separate vcs Info inset from buffer Info inset."""
|
2018-07-29 17:41:34 +00:00
|
|
|
|
|
|
|
|
|
types = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"vcs-revision": "revision",
|
|
|
|
|
"vcs-tree-revision": "tree-revision",
|
|
|
|
|
"vcs-author": "author",
|
|
|
|
|
"vcs-time": "time",
|
|
|
|
|
"vcs-date": "date",
|
2018-07-29 17:41:34 +00:00
|
|
|
|
}
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Info", i + 1)
|
2018-07-29 17:41:34 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2018-07-29 17:41:34 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tp = find_token(document.body, "type", i, j)
|
2018-07-29 17:41:34 +00:00
|
|
|
|
tpv = get_quoted_value(document.body, "type", tp)
|
|
|
|
|
if tpv != "buffer":
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
arg = find_token(document.body, "arg", i, j)
|
2018-07-29 17:41:34 +00:00
|
|
|
|
argv = get_quoted_value(document.body, "arg", arg)
|
|
|
|
|
if argv not in list(types.keys()):
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[tp] = 'type "vcs"'
|
|
|
|
|
document.body[arg] = 'arg "' + types[argv] + '"'
|
2018-07-29 17:41:34 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_vcsinfo(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Merge vcs Info inset to buffer Info inset."""
|
2018-07-29 17:41:34 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
args = ["revision", "tree-revision", "author", "time", "date"]
|
2018-07-29 17:41:34 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Info", i + 1)
|
2018-07-29 17:41:34 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2018-07-29 17:41:34 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tp = find_token(document.body, "type", i, j)
|
2018-07-29 17:41:34 +00:00
|
|
|
|
tpv = get_quoted_value(document.body, "type", tp)
|
|
|
|
|
if tpv != "vcs":
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
arg = find_token(document.body, "arg", i, j)
|
2018-07-29 17:41:34 +00:00
|
|
|
|
argv = get_quoted_value(document.body, "arg", arg)
|
|
|
|
|
if argv not in args:
|
|
|
|
|
document.warning("Malformed Info inset. Invalid vcs arg.")
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[tp] = 'type "buffer"'
|
|
|
|
|
document.body[arg] = 'arg "vcs-' + argv + '"'
|
|
|
|
|
|
2018-07-29 17:41:34 +00:00
|
|
|
|
|
2019-07-28 20:01:17 +00:00
|
|
|
|
def revert_vcsinfo_rev_abbrev(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Convert abbreviated revisions to regular revisions."
|
2019-07-28 20:01:17 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Info", i + 1)
|
2019-07-28 20:01:17 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2019-07-28 20:01:17 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tp = find_token(document.body, "type", i, j)
|
2019-07-28 20:01:17 +00:00
|
|
|
|
tpv = get_quoted_value(document.body, "type", tp)
|
|
|
|
|
if tpv != "vcs":
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
arg = find_token(document.body, "arg", i, j)
|
2019-07-28 20:01:17 +00:00
|
|
|
|
argv = get_quoted_value(document.body, "arg", arg)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if argv == "revision-abbrev":
|
|
|
|
|
document.body[arg] = 'arg "revision"'
|
|
|
|
|
|
2018-07-29 17:41:34 +00:00
|
|
|
|
|
2018-08-05 07:51:12 +00:00
|
|
|
|
def revert_dateinfo(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert date info insets to static text."""
|
2018-08-05 07:51:12 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
# FIXME This currently only considers the main language and uses the system locale
|
|
|
|
|
# Ideally, it should honor context languages and switch the locale accordingly.
|
2018-08-05 07:51:12 +00:00
|
|
|
|
|
|
|
|
|
# The date formats for each language using strftime syntax:
|
|
|
|
|
# long, short, loclong, locmedium, locshort
|
|
|
|
|
dateformats = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"afrikaans": ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
|
|
|
|
|
"albanian": ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"american": ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
|
|
|
|
|
"amharic": ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"ancientgreek": [
|
|
|
|
|
"%A, %d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"arabic_arabi": [
|
|
|
|
|
"%A، %d %B، %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
"%d %B، %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"arabic_arabtex": [
|
|
|
|
|
"%A، %d %B، %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
"%d %B، %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"armenian": [
|
|
|
|
|
"%Y թ. %B %d, %A",
|
|
|
|
|
"%d.%m.%y",
|
|
|
|
|
"%d %B، %Y",
|
|
|
|
|
"%d %b، %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"asturian": [
|
|
|
|
|
"%A, %d %B de %Y",
|
|
|
|
|
"%d/%m/%y",
|
|
|
|
|
"%d de %B de %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"australian": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"austrian": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"bahasa": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"bahasam": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"basque": ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
|
|
|
|
|
"belarusian": [
|
|
|
|
|
"%A, %d %B %Y г.",
|
|
|
|
|
"%d.%m.%y",
|
|
|
|
|
"%d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"bosnian": [
|
|
|
|
|
"%A, %d. %B %Y.",
|
|
|
|
|
"%d.%m.%y.",
|
|
|
|
|
"%d. %B %Y",
|
|
|
|
|
"%d. %b %Y",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
],
|
|
|
|
|
"brazilian": [
|
|
|
|
|
"%A, %d de %B de %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
"%d de %B de %Y",
|
|
|
|
|
"%d de %b de %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"breton": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
|
|
|
|
|
"british": ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"bulgarian": [
|
|
|
|
|
"%A, %d %B %Y г.",
|
|
|
|
|
"%d.%m.%y г.",
|
|
|
|
|
"%d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
],
|
|
|
|
|
"canadian": ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
|
|
|
|
|
"canadien": ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
|
|
|
|
|
"catalan": [
|
|
|
|
|
"%A, %d %B de %Y",
|
|
|
|
|
"%d/%m/%y",
|
|
|
|
|
"%d / %B / %Y",
|
|
|
|
|
"%d / %b / %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"chinese-simplified": [
|
|
|
|
|
"%Y年%m月%d日%A",
|
|
|
|
|
"%Y/%m/%d",
|
|
|
|
|
"%Y年%m月%d日",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
"%y-%m-%d",
|
|
|
|
|
],
|
|
|
|
|
"chinese-traditional": [
|
|
|
|
|
"%Y年%m月%d日 %A",
|
|
|
|
|
"%Y/%m/%d",
|
|
|
|
|
"%Y年%m月%d日",
|
|
|
|
|
"%Y年%m月%d日",
|
|
|
|
|
"%y年%m月%d日",
|
|
|
|
|
],
|
|
|
|
|
"coptic": ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
|
|
|
|
|
"croatian": [
|
|
|
|
|
"%A, %d. %B %Y.",
|
|
|
|
|
"%d. %m. %Y.",
|
|
|
|
|
"%d. %B %Y.",
|
|
|
|
|
"%d. %b. %Y.",
|
|
|
|
|
"%d.%m.%Y.",
|
|
|
|
|
],
|
|
|
|
|
"czech": ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
|
|
|
|
|
"danish": [
|
|
|
|
|
"%A den %d. %B %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
"%d. %B %Y",
|
|
|
|
|
"%d. %b %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"divehi": ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
|
|
|
|
|
"dutch": ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
|
|
|
|
|
"english": ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
|
|
|
|
|
"esperanto": [
|
|
|
|
|
"%A, %d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"la %d de %B %Y",
|
|
|
|
|
"la %d de %b %Y",
|
|
|
|
|
"%m/%d/%Y",
|
|
|
|
|
],
|
|
|
|
|
"estonian": ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"farsi": ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
|
|
|
|
|
"finnish": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"french": ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"friulan": [
|
|
|
|
|
"%A %d di %B dal %Y",
|
|
|
|
|
"%d/%m/%y",
|
|
|
|
|
"%d di %B dal %Y",
|
|
|
|
|
"%d di %b dal %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"galician": [
|
|
|
|
|
"%A, %d de %B de %Y",
|
|
|
|
|
"%d/%m/%y",
|
|
|
|
|
"%d de %B de %Y",
|
|
|
|
|
"%d de %b de %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"georgian": ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
|
|
|
|
|
"german": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"german-ch": [
|
|
|
|
|
"%A, %d. %B %Y",
|
|
|
|
|
"%d.%m.%y",
|
|
|
|
|
"%d. %B %Y",
|
|
|
|
|
"%d. %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"german-ch-old": [
|
|
|
|
|
"%A, %d. %B %Y",
|
|
|
|
|
"%d.%m.%y",
|
|
|
|
|
"%d. %B %Y",
|
|
|
|
|
"%d. %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"greek": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"hebrew": ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"hindi": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
|
|
|
|
|
"icelandic": [
|
|
|
|
|
"%A, %d. %B %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
"%d. %B %Y",
|
|
|
|
|
"%d. %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"interlingua": [
|
|
|
|
|
"%Y %B %d, %A",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
"le %d de %B %Y",
|
|
|
|
|
"le %d de %b %Y",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
],
|
|
|
|
|
"irish": ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"italian": ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
|
|
|
|
|
"japanese": [
|
|
|
|
|
"%Y年%m月%d日%A",
|
|
|
|
|
"%Y/%m/%d",
|
|
|
|
|
"%Y年%m月%d日",
|
|
|
|
|
"%Y/%m/%d",
|
|
|
|
|
"%y/%m/%d",
|
|
|
|
|
],
|
|
|
|
|
"japanese-cjk": [
|
|
|
|
|
"%Y年%m月%d日%A",
|
|
|
|
|
"%Y/%m/%d",
|
|
|
|
|
"%Y年%m月%d日",
|
|
|
|
|
"%Y/%m/%d",
|
|
|
|
|
"%y/%m/%d",
|
|
|
|
|
],
|
|
|
|
|
"kannada": ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
|
|
|
|
|
"kazakh": ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
|
|
|
|
|
"khmer": ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
|
|
|
|
|
"korean": [
|
|
|
|
|
"%Y년 %m월 %d일 %A",
|
|
|
|
|
"%y. %m. %d.",
|
|
|
|
|
"%Y년 %m월 %d일",
|
|
|
|
|
"%Y. %m. %d.",
|
|
|
|
|
"%y. %m. %d.",
|
|
|
|
|
],
|
|
|
|
|
"kurmanji": ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
|
|
|
|
|
"lao": ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
|
|
|
|
|
"latin": ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
|
|
|
|
|
"latvian": [
|
|
|
|
|
"%A, %Y. gada %d. %B",
|
|
|
|
|
"%d.%m.%y",
|
|
|
|
|
"%Y. gada %d. %B",
|
|
|
|
|
"%Y. gada %d. %b",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"lithuanian": [
|
|
|
|
|
"%Y m. %B %d d., %A",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
"%Y m. %B %d d.",
|
|
|
|
|
"%Y m. %B %d d.",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
],
|
|
|
|
|
"lowersorbian": [
|
|
|
|
|
"%A, %d. %B %Y",
|
|
|
|
|
"%d.%m.%y",
|
|
|
|
|
"%d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"macedonian": ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"magyar": [
|
|
|
|
|
"%Y. %B %d., %A",
|
|
|
|
|
"%Y. %m. %d.",
|
|
|
|
|
"%Y. %B %d.",
|
|
|
|
|
"%Y. %b %d.",
|
|
|
|
|
"%Y.%m.%d.",
|
|
|
|
|
],
|
|
|
|
|
"malayalam": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
|
|
|
|
|
"marathi": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
|
|
|
|
|
"mongolian": [
|
|
|
|
|
"%A, %Y оны %m сарын %d",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
"%Y оны %m сарын %d",
|
|
|
|
|
"%d-%m-%Y",
|
|
|
|
|
"%d-%m-%Y",
|
|
|
|
|
],
|
|
|
|
|
"naustrian": [
|
|
|
|
|
"%A, %d. %B %Y",
|
|
|
|
|
"%d.%m.%y",
|
|
|
|
|
"%d. %B %Y",
|
|
|
|
|
"%d. %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"newzealand": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"ngerman": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"norsk": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"nynorsk": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"occitan": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"piedmontese": [
|
|
|
|
|
"%A, %d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%B %d, %Y",
|
|
|
|
|
"%b %d, %Y",
|
|
|
|
|
"%m/%d/%Y",
|
|
|
|
|
],
|
|
|
|
|
"polish": ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
|
|
|
|
|
"polutonikogreek": [
|
|
|
|
|
"%A, %d %B %Y",
|
|
|
|
|
"%d/%m/%y",
|
|
|
|
|
"%d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"portuguese": [
|
|
|
|
|
"%A, %d de %B de %Y",
|
|
|
|
|
"%d/%m/%y",
|
|
|
|
|
"%d de %B de %Y",
|
|
|
|
|
"%d de %b de %Y",
|
|
|
|
|
"%Y/%m/%d",
|
|
|
|
|
],
|
|
|
|
|
"romanian": ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"romansh": [
|
|
|
|
|
"%A, ils %d da %B %Y",
|
|
|
|
|
"%d-%m-%y",
|
|
|
|
|
"%d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"russian": [
|
|
|
|
|
"%A, %d %B %Y г.",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
"%d %B %Y г.",
|
|
|
|
|
"%d %b %Y г.",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"samin": [
|
|
|
|
|
"%Y %B %d, %A",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
"%B %d. b. %Y",
|
|
|
|
|
"%b %d. b. %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"sanskrit": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
|
|
|
|
|
"scottish": ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"serbian": [
|
|
|
|
|
"%A, %d. %B %Y.",
|
|
|
|
|
"%d.%m.%y.",
|
|
|
|
|
"%d. %B %Y",
|
|
|
|
|
"%d. %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"serbian-latin": [
|
|
|
|
|
"%A, %d. %B %Y.",
|
|
|
|
|
"%d.%m.%y.",
|
|
|
|
|
"%d. %B %Y",
|
|
|
|
|
"%d. %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"slovak": ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
|
|
|
|
|
"slovene": [
|
|
|
|
|
"%A, %d. %B %Y",
|
|
|
|
|
"%d. %m. %y",
|
|
|
|
|
"%d. %B %Y",
|
|
|
|
|
"%d. %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"spanish": [
|
|
|
|
|
"%A, %d de %B de %Y",
|
|
|
|
|
"%d/%m/%y",
|
|
|
|
|
"%d de %B %de %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"spanish-mexico": [
|
|
|
|
|
"%A, %d de %B %de %Y",
|
|
|
|
|
"%d/%m/%y",
|
|
|
|
|
"%d de %B de %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"swedish": ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
|
|
|
|
|
"syriac": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"tamil": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
|
|
|
|
|
"telugu": ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
|
|
|
|
|
"thai": ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"tibetan": [
|
|
|
|
|
"%Y %Bའི་ཚེས་%d, %A",
|
|
|
|
|
"%Y-%m-%d",
|
|
|
|
|
"%B %d, %Y",
|
|
|
|
|
"%b %d, %Y",
|
|
|
|
|
"%m/%d/%Y",
|
|
|
|
|
],
|
|
|
|
|
"turkish": ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
|
|
|
|
|
"turkmen": [
|
|
|
|
|
"%d %B %Y %A",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
"%Y ý. %B %d",
|
|
|
|
|
"%d.%m.%Y ý.",
|
|
|
|
|
"%d.%m.%y ý.",
|
|
|
|
|
],
|
|
|
|
|
"ukrainian": [
|
|
|
|
|
"%A, %d %B %Y р.",
|
|
|
|
|
"%d.%m.%y",
|
|
|
|
|
"%d %B %Y",
|
|
|
|
|
"%d %m %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"uppersorbian": [
|
|
|
|
|
"%A, %d. %B %Y",
|
|
|
|
|
"%d.%m.%y",
|
|
|
|
|
"%d %B %Y",
|
|
|
|
|
"%d %b %Y",
|
|
|
|
|
"%d.%m.%Y",
|
|
|
|
|
],
|
|
|
|
|
"urdu": ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
|
|
|
|
|
"vietnamese": [
|
|
|
|
|
"%A, %d %B, %Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
"%d tháng %B %Y",
|
|
|
|
|
"%d-%m-%Y",
|
|
|
|
|
"%d/%m/%Y",
|
|
|
|
|
],
|
|
|
|
|
"welsh": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
|
2018-08-05 07:51:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
types = ["date", "fixdate", "moddate"]
|
2019-05-15 15:32:09 +00:00
|
|
|
|
lang = get_value(document.header, "\\language")
|
|
|
|
|
if lang == "":
|
2018-08-05 07:51:12 +00:00
|
|
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Info", i + 1)
|
2018-08-05 07:51:12 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2018-08-05 07:51:12 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tp = find_token(document.body, "type", i, j)
|
2018-08-05 07:51:12 +00:00
|
|
|
|
tpv = get_quoted_value(document.body, "type", tp)
|
|
|
|
|
if tpv not in types:
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
arg = find_token(document.body, "arg", i, j)
|
2018-08-05 07:51:12 +00:00
|
|
|
|
argv = get_quoted_value(document.body, "arg", arg)
|
|
|
|
|
isodate = ""
|
|
|
|
|
dte = date.today()
|
|
|
|
|
if tpv == "fixdate":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
datecomps = argv.split("@")
|
2018-08-05 07:51:12 +00:00
|
|
|
|
if len(datecomps) > 1:
|
|
|
|
|
argv = datecomps[0]
|
|
|
|
|
isodate = datecomps[1]
|
2024-06-15 09:06:06 +00:00
|
|
|
|
m = re.search(r"(\d\d\d\d)-(\d\d)-(\d\d)", isodate)
|
2018-08-05 07:51:12 +00:00
|
|
|
|
if m:
|
|
|
|
|
dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
|
2024-06-15 09:06:06 +00:00
|
|
|
|
# FIXME if we had the path to the original document (not the one in the tmp dir),
|
|
|
|
|
# we could use the mtime.
|
|
|
|
|
# elif tpv == "moddate":
|
|
|
|
|
# dte = date.fromtimestamp(os.path.getmtime(document.dir))
|
2018-08-05 07:51:12 +00:00
|
|
|
|
result = ""
|
|
|
|
|
if argv == "ISO":
|
|
|
|
|
result = dte.isodate()
|
|
|
|
|
elif argv == "long":
|
|
|
|
|
result = dte.strftime(dateformats[lang][0])
|
|
|
|
|
elif argv == "short":
|
|
|
|
|
result = dte.strftime(dateformats[lang][1])
|
|
|
|
|
elif argv == "loclong":
|
|
|
|
|
result = dte.strftime(dateformats[lang][2])
|
|
|
|
|
elif argv == "locmedium":
|
|
|
|
|
result = dte.strftime(dateformats[lang][3])
|
|
|
|
|
elif argv == "locshort":
|
|
|
|
|
result = dte.strftime(dateformats[lang][4])
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fmt = (
|
|
|
|
|
argv.replace("MMMM", "%b")
|
|
|
|
|
.replace("MMM", "%b")
|
|
|
|
|
.replace("MM", "%m")
|
|
|
|
|
.replace("M", "%m")
|
|
|
|
|
)
|
2018-08-05 07:51:12 +00:00
|
|
|
|
fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
|
|
|
|
|
fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fmt = re.sub("[^'%]d", "%d", fmt)
|
2018-08-05 07:51:12 +00:00
|
|
|
|
fmt = fmt.replace("'", "")
|
|
|
|
|
result = dte.strftime(fmt)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i : j + 1] = [result]
|
2018-08-05 07:51:12 +00:00
|
|
|
|
|
|
|
|
|
|
2018-08-07 10:14:45 +00:00
|
|
|
|
def revert_timeinfo(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert time info insets to static text."""
|
2018-08-07 10:14:45 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
# FIXME This currently only considers the main language and uses the system locale
|
|
|
|
|
# Ideally, it should honor context languages and switch the locale accordingly.
|
|
|
|
|
# Also, the time object is "naive", i.e., it does not know of timezones (%Z will
|
|
|
|
|
# be empty).
|
2018-08-07 10:14:45 +00:00
|
|
|
|
|
|
|
|
|
# The time formats for each language using strftime syntax:
|
|
|
|
|
# long, short
|
|
|
|
|
timeformats = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"afrikaans": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"albanian": ["%I:%M:%S %p, %Z", "%I:%M %p"],
|
|
|
|
|
"american": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"amharic": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"ancientgreek": ["%H:%M:%S %Z", "%H:%M:%S"],
|
|
|
|
|
"arabic_arabi": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"arabic_arabtex": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"armenian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"asturian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"australian": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"austrian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"bahasa": ["%H.%M.%S %Z", "%H.%M"],
|
|
|
|
|
"bahasam": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"basque": ["%H:%M:%S (%Z)", "%H:%M"],
|
|
|
|
|
"belarusian": ["%H:%M:%S, %Z", "%H:%M"],
|
|
|
|
|
"bosnian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"brazilian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"breton": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"british": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"bulgarian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"canadian": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"canadien": ["%H:%M:%S %Z", "%H h %M"],
|
|
|
|
|
"catalan": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"chinese-simplified": ["%Z %p%I:%M:%S", "%p%I:%M"],
|
|
|
|
|
"chinese-traditional": ["%p%I:%M:%S [%Z]", "%p%I:%M"],
|
|
|
|
|
"coptic": ["%H:%M:%S %Z", "%H:%M:%S"],
|
|
|
|
|
"croatian": ["%H:%M:%S (%Z)", "%H:%M"],
|
|
|
|
|
"czech": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"danish": ["%H.%M.%S %Z", "%H.%M"],
|
|
|
|
|
"divehi": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"dutch": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"english": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"esperanto": ["%H:%M:%S %Z", "%H:%M:%S"],
|
|
|
|
|
"estonian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"farsi": ["%H:%M:%S (%Z)", "%H:%M"],
|
|
|
|
|
"finnish": ["%H.%M.%S %Z", "%H.%M"],
|
|
|
|
|
"french": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"friulan": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"galician": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"georgian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"german": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"german-ch": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"german-ch-old": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"greek": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"hebrew": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"hindi": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"icelandic": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"interlingua": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"irish": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"italian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"japanese": ["%H時%M分%S秒 %Z", "%H:%M"],
|
|
|
|
|
"japanese-cjk": ["%H時%M分%S秒 %Z", "%H:%M"],
|
|
|
|
|
"kannada": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"kazakh": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"khmer": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"korean": ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
|
|
|
|
|
"kurmanji": ["%H:%M:%S %Z", "%H:%M:%S"],
|
|
|
|
|
"lao": ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
|
|
|
|
|
"latin": ["%H:%M:%S %Z", "%H:%M:%S"],
|
|
|
|
|
"latvian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"lithuanian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"lowersorbian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"macedonian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"magyar": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"malayalam": ["%p %I:%M:%S %Z", "%p %I:%M"],
|
|
|
|
|
"marathi": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"mongolian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"naustrian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"newzealand": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"ngerman": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"norsk": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"nynorsk": ["kl. %H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"occitan": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"piedmontese": ["%H:%M:%S %Z", "%H:%M:%S"],
|
|
|
|
|
"polish": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"polutonikogreek": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"portuguese": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"romanian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"romansh": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"russian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"samin": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"sanskrit": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"scottish": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"serbian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"serbian-latin": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"slovak": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"slovene": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"spanish": ["%H:%M:%S (%Z)", "%H:%M"],
|
|
|
|
|
"spanish-mexico": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"swedish": ["kl. %H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"syriac": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"tamil": ["%p %I:%M:%S %Z", "%p %I:%M"],
|
|
|
|
|
"telugu": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"thai": ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
|
|
|
|
|
"tibetan": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"turkish": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"turkmen": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"ukrainian": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"uppersorbian": ["%H:%M:%S %Z", "%H:%M hodź."],
|
|
|
|
|
"urdu": ["%I:%M:%S %p %Z", "%I:%M %p"],
|
|
|
|
|
"vietnamese": ["%H:%M:%S %Z", "%H:%M"],
|
|
|
|
|
"welsh": ["%H:%M:%S %Z", "%H:%M"],
|
2018-08-07 10:14:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
types = ["time", "fixtime", "modtime"]
|
2018-08-07 10:14:45 +00:00
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
# this should not happen
|
|
|
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
|
|
|
return
|
|
|
|
|
lang = get_value(document.header, "\\language", i)
|
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Info", i + 1)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tp = find_token(document.body, "type", i, j)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
tpv = get_quoted_value(document.body, "type", tp)
|
|
|
|
|
if tpv not in types:
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
arg = find_token(document.body, "arg", i, j)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
argv = get_quoted_value(document.body, "arg", arg)
|
|
|
|
|
isotime = ""
|
|
|
|
|
dtme = datetime.now()
|
|
|
|
|
tme = dtme.time()
|
|
|
|
|
if tpv == "fixtime":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
timecomps = argv.split("@")
|
2018-08-07 10:14:45 +00:00
|
|
|
|
if len(timecomps) > 1:
|
|
|
|
|
argv = timecomps[0]
|
|
|
|
|
isotime = timecomps[1]
|
2024-06-15 09:06:06 +00:00
|
|
|
|
m = re.search(r"(\d\d):(\d\d):(\d\d)", isotime)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
if m:
|
|
|
|
|
tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
m = re.search(r"(\d\d):(\d\d)", isotime)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
if m:
|
|
|
|
|
tme = time(int(m.group(1)), int(m.group(2)))
|
2024-06-15 09:06:06 +00:00
|
|
|
|
# FIXME if we had the path to the original document (not the one in the tmp dir),
|
|
|
|
|
# we could use the mtime.
|
|
|
|
|
# elif tpv == "moddate":
|
|
|
|
|
# dte = date.fromtimestamp(os.path.getmtime(document.dir))
|
2018-08-07 10:14:45 +00:00
|
|
|
|
result = ""
|
|
|
|
|
if argv == "ISO":
|
|
|
|
|
result = tme.isoformat()
|
|
|
|
|
elif argv == "long":
|
|
|
|
|
result = tme.strftime(timeformats[lang][0])
|
|
|
|
|
elif argv == "short":
|
|
|
|
|
result = tme.strftime(timeformats[lang][1])
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fmt = (
|
|
|
|
|
argv.replace("HH", "%H")
|
|
|
|
|
.replace("H", "%H")
|
|
|
|
|
.replace("hh", "%I")
|
|
|
|
|
.replace("h", "%I")
|
|
|
|
|
)
|
|
|
|
|
fmt = (
|
|
|
|
|
fmt.replace("mm", "%M")
|
|
|
|
|
.replace("m", "%M")
|
|
|
|
|
.replace("ss", "%S")
|
|
|
|
|
.replace("s", "%S")
|
|
|
|
|
)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fmt = (
|
|
|
|
|
fmt.replace("AP", "%p")
|
|
|
|
|
.replace("ap", "%p")
|
|
|
|
|
.replace("A", "%p")
|
|
|
|
|
.replace("a", "%p")
|
|
|
|
|
)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
fmt = fmt.replace("'", "")
|
|
|
|
|
result = dte.strftime(fmt)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i : j + 1] = result
|
2018-08-07 10:14:45 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_namenoextinfo(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Merge buffer Info inset type name-noext to name."""
|
2018-08-07 10:14:45 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Info", i + 1)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tp = find_token(document.body, "type", i, j)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
tpv = get_quoted_value(document.body, "type", tp)
|
|
|
|
|
if tpv != "buffer":
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
arg = find_token(document.body, "arg", i, j)
|
2018-08-07 10:14:45 +00:00
|
|
|
|
argv = get_quoted_value(document.body, "arg", arg)
|
|
|
|
|
if argv != "name-noext":
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[arg] = 'arg "name"'
|
2018-08-07 10:14:45 +00:00
|
|
|
|
|
|
|
|
|
|
2018-08-13 15:18:44 +00:00
|
|
|
|
def revert_l7ninfo(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert l7n Info inset to text."""
|
2018-08-13 15:18:44 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Info", i + 1)
|
2018-08-13 15:18:44 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2018-08-13 15:18:44 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tp = find_token(document.body, "type", i, j)
|
2018-08-13 15:18:44 +00:00
|
|
|
|
tpv = get_quoted_value(document.body, "type", tp)
|
|
|
|
|
if tpv != "l7n":
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
arg = find_token(document.body, "arg", i, j)
|
2018-08-13 15:18:44 +00:00
|
|
|
|
argv = get_quoted_value(document.body, "arg", arg)
|
2019-01-29 11:35:12 +00:00
|
|
|
|
# remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argv = (
|
|
|
|
|
argv.rstrip(":")
|
|
|
|
|
.split("|")[0]
|
|
|
|
|
.replace(" & ", "</amp;>")
|
|
|
|
|
.replace("&", "")
|
|
|
|
|
.replace("</amp;>", " & ")
|
|
|
|
|
)
|
|
|
|
|
document.body[i : j + 1] = argv
|
2018-08-13 15:18:44 +00:00
|
|
|
|
|
|
|
|
|
|
2018-08-17 08:22:32 +00:00
|
|
|
|
def revert_listpargs(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Reverts listpreamble arguments to TeX-code"""
|
2018-08-17 08:22:32 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument listpreamble:", i + 1)
|
2018-08-17 08:22:32 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
# Find containing paragraph layout
|
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
|
if parent == False:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
|
|
|
|
continue
|
|
|
|
|
parbeg = parent[3]
|
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del document.body[i : j + 1]
|
|
|
|
|
subst = (
|
|
|
|
|
[
|
|
|
|
|
"\\begin_inset ERT",
|
|
|
|
|
"status collapsed",
|
|
|
|
|
"",
|
|
|
|
|
"\\begin_layout Plain Layout",
|
|
|
|
|
"{",
|
|
|
|
|
]
|
|
|
|
|
+ content
|
|
|
|
|
+ ["}", "\\end_layout", "", "\\end_inset", ""]
|
|
|
|
|
)
|
|
|
|
|
document.body[parbeg:parbeg] = subst
|
2018-08-17 08:22:32 +00:00
|
|
|
|
|
|
|
|
|
|
2018-09-20 09:33:03 +00:00
|
|
|
|
def revert_lformatinfo(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert layout format Info inset to text."""
|
2018-09-20 09:33:03 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Info", i + 1)
|
2018-09-20 09:33:03 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2018-09-20 09:33:03 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tp = find_token(document.body, "type", i, j)
|
2018-09-20 09:33:03 +00:00
|
|
|
|
tpv = get_quoted_value(document.body, "type", tp)
|
|
|
|
|
if tpv != "lyxinfo":
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
arg = find_token(document.body, "arg", i, j)
|
2018-09-20 09:33:03 +00:00
|
|
|
|
argv = get_quoted_value(document.body, "arg", arg)
|
|
|
|
|
if argv != "layoutformat":
|
|
|
|
|
continue
|
|
|
|
|
# hardcoded for now
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i : j + 1] = "69"
|
2018-09-20 09:33:03 +00:00
|
|
|
|
|
|
|
|
|
|
2018-10-30 11:33:35 +00:00
|
|
|
|
def convert_hebrew_parentheses(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"""Swap opening/closing parentheses in Hebrew text.
|
2019-05-15 15:32:09 +00:00
|
|
|
|
|
|
|
|
|
Up to LyX 2.4, "(" was used as closing parenthesis and
|
|
|
|
|
")" as opening parenthesis for Hebrew in the LyX source.
|
2019-04-30 09:27:35 +00:00
|
|
|
|
"""
|
|
|
|
|
current_languages = [document.language]
|
2023-12-31 21:32:36 +00:00
|
|
|
|
current_layouts = []
|
|
|
|
|
current_insets = []
|
|
|
|
|
# pass thru argument insets
|
|
|
|
|
skip_layouts_arguments = {}
|
|
|
|
|
skip_insets_arguments = {}
|
|
|
|
|
# pass thru insets
|
2024-06-15 09:06:06 +00:00
|
|
|
|
skip_insets = ["Formula", "ERT", "listings", "Flex URL"]
|
2023-12-31 21:32:36 +00:00
|
|
|
|
# pass thru insets per document class
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if document.textclass in [
|
|
|
|
|
"beamer",
|
|
|
|
|
"scrarticle-beamer",
|
|
|
|
|
"beamerposter",
|
|
|
|
|
"article-beamer",
|
|
|
|
|
]:
|
|
|
|
|
skip_layouts_arguments.update(
|
|
|
|
|
{
|
|
|
|
|
"Itemize": ["1", "item:2"],
|
|
|
|
|
"Enumerate": ["1", "item:2"],
|
|
|
|
|
"Description": ["1", "item:1"],
|
|
|
|
|
"Part": ["1"],
|
|
|
|
|
"Section": ["1"],
|
|
|
|
|
"Section*": ["1"],
|
|
|
|
|
"Subsection": ["1"],
|
|
|
|
|
"Subsection*": ["1"],
|
|
|
|
|
"Subsubsection": ["1"],
|
|
|
|
|
"Subsubsection*": ["1"],
|
|
|
|
|
"Frame": ["1", "2"],
|
|
|
|
|
"AgainFrame": ["1", "2"],
|
|
|
|
|
"PlainFrame": ["1", "2"],
|
|
|
|
|
"FragileFrame": ["1", "2"],
|
|
|
|
|
"FrameTitle": ["1"],
|
|
|
|
|
"FrameSubtitle": ["1"],
|
|
|
|
|
"Overprint": ["item:1"],
|
|
|
|
|
"Uncover": ["1"],
|
|
|
|
|
"Only": ["1"],
|
|
|
|
|
"Block": ["1"],
|
|
|
|
|
"ExampleBlock": ["1"],
|
|
|
|
|
"AlertBlock": ["1"],
|
|
|
|
|
"Quotation": ["1"],
|
|
|
|
|
"Quote": ["1"],
|
|
|
|
|
"Verse": ["1"],
|
|
|
|
|
"Corollary": ["1"],
|
|
|
|
|
"Corollary": ["1"],
|
|
|
|
|
"Definition": ["1"],
|
|
|
|
|
"Definitions": ["1"],
|
|
|
|
|
"Example": ["1"],
|
|
|
|
|
"Examples": ["1"],
|
|
|
|
|
"Fact": ["1"],
|
|
|
|
|
"Lemma": ["1"],
|
|
|
|
|
"proof": ["1"],
|
|
|
|
|
"Theorem": ["1"],
|
|
|
|
|
"NoteItem": ["1"],
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
skip_insets_arguments.update(
|
|
|
|
|
{
|
|
|
|
|
"Flex Bold": ["1"],
|
|
|
|
|
"Flex Emphasize": ["1"],
|
|
|
|
|
"Flex Alert": ["1"],
|
|
|
|
|
"Flex Structure": ["1"],
|
|
|
|
|
"Flex Only": ["1"],
|
|
|
|
|
"Flex Uncover": ["1"],
|
|
|
|
|
"Flex Visible": ["1"],
|
|
|
|
|
"Flex Invisible": ["1"],
|
|
|
|
|
"Flex Alternative": ["1"],
|
|
|
|
|
"Flex Beamer Note": ["1"],
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
elif document.textclass == "europecv":
|
|
|
|
|
skip_layouts_arguments.update({"Picture": ["1"], "Item": ["1"], "MotherTongue": ["1"]})
|
|
|
|
|
elif document.textclass in ["acmsiggraph", "acmsiggraph-0-92"]:
|
|
|
|
|
skip_insets_arguments.update({"Flex CRcat": ["1", "2", "3"]})
|
|
|
|
|
elif document.textclass in ["aastex", "aastex6", "aastex62"]:
|
|
|
|
|
skip_layouts_arguments.update(
|
|
|
|
|
{
|
|
|
|
|
"Altaffilation": ["1"],
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
elif document.textclass == "jss":
|
|
|
|
|
skip_insets.append("Flex Code Chunk")
|
|
|
|
|
elif document.textclass == "moderncv":
|
|
|
|
|
skip_layouts_arguments.update(
|
|
|
|
|
{
|
|
|
|
|
"Photo": ["1", "2"],
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
skip_insets_arguments.update({"Flex Column": ["1"]})
|
|
|
|
|
elif document.textclass == "agutex":
|
|
|
|
|
skip_layouts_arguments.update({"Author affiliation": ["1"]})
|
|
|
|
|
elif document.textclass in ["ijmpd", "ijmpc"]:
|
|
|
|
|
skip_layouts_arguments.update({"RomanList": ["1"]})
|
|
|
|
|
elif document.textclass in ["jlreq-book", "jlreq-report", "jlreq-article"]:
|
|
|
|
|
skip_insets.append("Flex Warichu*")
|
2023-12-31 21:32:36 +00:00
|
|
|
|
# pathru insets per module
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if "hpstatement" in document.get_module_list():
|
|
|
|
|
skip_insets.append("Flex H-P number")
|
|
|
|
|
if "tcolorbox" in document.get_module_list():
|
|
|
|
|
skip_layouts_arguments.update({"New Color Box Type": ["3"]})
|
|
|
|
|
if "sweave" in document.get_module_list():
|
|
|
|
|
skip_insets.extend(
|
|
|
|
|
[
|
|
|
|
|
"Flex Sweave Options",
|
|
|
|
|
"Flex S/R expression",
|
|
|
|
|
"Flex Sweave Input File",
|
|
|
|
|
"Flex Chunk",
|
|
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
if "knitr" in document.get_module_list():
|
|
|
|
|
skip_insets.extend(["Flex Sweave Options", "Flex S/R expression", "Flex Chunk"])
|
|
|
|
|
if "linguistics" in document.get_module_list():
|
|
|
|
|
skip_layouts_arguments.update(
|
|
|
|
|
{
|
|
|
|
|
"Numbered Example (multiline)": ["1"],
|
|
|
|
|
"Numbered Examples (consecutive)": ["1"],
|
|
|
|
|
"Subexample": ["1"],
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
if "chessboard" in document.get_module_list():
|
|
|
|
|
skip_insets.append("Flex Mainline")
|
|
|
|
|
skip_layouts_arguments.update({"NewChessGame": ["1"]})
|
|
|
|
|
skip_insets_arguments.update({"Flex ChessBoard": ["1"]})
|
|
|
|
|
if "lilypond" in document.get_module_list():
|
|
|
|
|
skip_insets.append("Flex LilyPond")
|
|
|
|
|
if "noweb" in document.get_module_list():
|
|
|
|
|
skip_insets.append("Flex Chunk")
|
|
|
|
|
if "multicol" in document.get_module_list():
|
|
|
|
|
skip_insets_arguments.update({"Flex Multiple Columns": ["1"]})
|
2023-07-04 05:37:06 +00:00
|
|
|
|
i = 0
|
2023-12-31 21:32:36 +00:00
|
|
|
|
inset_is_arg = False
|
2023-07-04 05:37:06 +00:00
|
|
|
|
while i < len(document.body):
|
|
|
|
|
line = document.body[i]
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if line.startswith("\\lang "):
|
|
|
|
|
tokenend = len("\\lang ")
|
2023-12-31 21:32:36 +00:00
|
|
|
|
lang = line[tokenend:].strip()
|
|
|
|
|
current_languages[-1] = lang
|
2024-06-15 09:06:06 +00:00
|
|
|
|
elif line.startswith("\\begin_layout "):
|
2019-04-30 09:27:35 +00:00
|
|
|
|
current_languages.append(current_languages[-1])
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tokenend = len("\\begin_layout ")
|
2023-12-31 21:32:36 +00:00
|
|
|
|
layout = line[tokenend:].strip()
|
|
|
|
|
current_layouts.append(layout)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
elif line.startswith("\\end_layout"):
|
2019-04-30 09:27:35 +00:00
|
|
|
|
current_languages.pop()
|
2023-12-31 21:32:36 +00:00
|
|
|
|
current_layouts.pop()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
elif line.startswith("\\begin_inset Argument "):
|
|
|
|
|
tokenend = len("\\begin_inset Argument ")
|
2023-12-31 21:32:36 +00:00
|
|
|
|
Argument = line[tokenend:].strip()
|
|
|
|
|
# all listpreamble:1 arguments are pass thru
|
2024-06-15 09:06:06 +00:00
|
|
|
|
listpreamble = Argument == "listpreamble:1"
|
|
|
|
|
layout_arg = current_layouts and Argument in skip_layouts_arguments.get(
|
|
|
|
|
current_layouts[-1], []
|
|
|
|
|
)
|
|
|
|
|
inset_arg = current_insets and Argument in skip_insets_arguments.get(
|
|
|
|
|
current_insets[-1], []
|
|
|
|
|
)
|
2023-12-31 21:32:36 +00:00
|
|
|
|
if layout_arg or inset_arg or listpreamble:
|
|
|
|
|
# In these arguments, parentheses must not be changed
|
|
|
|
|
i = find_end_of_inset(document.body, i) + 1
|
|
|
|
|
continue
|
|
|
|
|
else:
|
|
|
|
|
inset_is_arg = True
|
2024-06-15 09:06:06 +00:00
|
|
|
|
elif line.startswith("\\begin_inset "):
|
|
|
|
|
tokenend = len("\\begin_inset ")
|
2023-11-08 14:14:37 +00:00
|
|
|
|
inset = line[tokenend:].strip()
|
2023-12-31 21:32:36 +00:00
|
|
|
|
current_insets.append(inset)
|
2023-11-08 14:14:37 +00:00
|
|
|
|
if inset in skip_insets:
|
|
|
|
|
# In these insets, parentheses must not be changed
|
|
|
|
|
i = find_end_of_inset(document.body, i)
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
elif line.startswith("\\end_inset"):
|
2023-12-31 21:32:36 +00:00
|
|
|
|
if inset_is_arg:
|
2024-03-05 14:41:21 +00:00
|
|
|
|
inset_is_arg = is_in_inset(document.body, i, "\\begin_inset Argument")[0] != -1
|
|
|
|
|
else:
|
2023-12-31 21:32:36 +00:00
|
|
|
|
current_insets.pop()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
elif current_languages[-1] == "hebrew" and not line.startswith("\\"):
|
|
|
|
|
document.body[i] = line.replace("(", "\x00").replace(")", "(").replace("\x00", ")")
|
2023-07-04 05:37:06 +00:00
|
|
|
|
i += 1
|
2018-10-30 11:33:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_hebrew_parentheses(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Store parentheses in Hebrew text reversed"""
|
2018-11-03 14:00:48 +00:00
|
|
|
|
# This only exists to keep the convert/revert naming convention
|
2018-10-30 11:33:35 +00:00
|
|
|
|
convert_hebrew_parentheses(document)
|
|
|
|
|
|
|
|
|
|
|
2019-03-10 09:21:59 +00:00
|
|
|
|
def revert_malayalam(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Set the document language to English but assure Malayalam output"""
|
2019-03-10 09:21:59 +00:00
|
|
|
|
|
|
|
|
|
revert_language(document, "malayalam", "", "malayalam")
|
|
|
|
|
|
|
|
|
|
|
2019-03-22 17:29:50 +00:00
|
|
|
|
def revert_soul(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert soul module flex insets to ERT"""
|
2019-03-22 17:29:50 +00:00
|
|
|
|
|
|
|
|
|
flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
|
|
|
|
|
|
|
|
|
|
for flex in flexes:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{soul}"])
|
|
|
|
|
break
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{color}"])
|
2019-05-15 15:32:09 +00:00
|
|
|
|
|
2024-06-17 10:31:10 +00:00
|
|
|
|
revert_flex_inset(document, "Spaceletters", "\\so")
|
|
|
|
|
revert_flex_inset(document, "Strikethrough", "\\st")
|
|
|
|
|
revert_flex_inset(document, "Underline", "\\ul")
|
|
|
|
|
revert_flex_inset(document, "Highlight", "\\hl")
|
|
|
|
|
revert_flex_inset(document, "Capitalize", "\\caps")
|
2019-03-22 17:29:50 +00:00
|
|
|
|
|
|
|
|
|
|
2019-03-26 15:23:34 +00:00
|
|
|
|
def revert_tablestyle(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Remove tablestyle params"""
|
2019-03-26 15:23:34 +00:00
|
|
|
|
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = find_token(document.header, "\\tablestyle")
|
2019-03-26 15:23:34 +00:00
|
|
|
|
if i != -1:
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
|
|
|
2019-03-29 14:45:55 +00:00
|
|
|
|
def revert_bibfileencodings(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Revert individual Biblatex bibliography encodings"""
|
2019-03-29 14:45:55 +00:00
|
|
|
|
|
|
|
|
|
# Get cite engine
|
|
|
|
|
engine = "basic"
|
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
|
|
|
else:
|
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
|
|
|
|
|
|
# Check if biblatex
|
|
|
|
|
biblatex = False
|
|
|
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
|
|
|
biblatex = True
|
|
|
|
|
|
|
|
|
|
# Map lyx to latex encoding names
|
|
|
|
|
encodings = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"utf8": "utf8",
|
|
|
|
|
"utf8x": "utf8x",
|
|
|
|
|
"armscii8": "armscii8",
|
|
|
|
|
"iso8859-1": "latin1",
|
|
|
|
|
"iso8859-2": "latin2",
|
|
|
|
|
"iso8859-3": "latin3",
|
|
|
|
|
"iso8859-4": "latin4",
|
|
|
|
|
"iso8859-5": "iso88595",
|
|
|
|
|
"iso8859-6": "8859-6",
|
|
|
|
|
"iso8859-7": "iso-8859-7",
|
|
|
|
|
"iso8859-8": "8859-8",
|
|
|
|
|
"iso8859-9": "latin5",
|
|
|
|
|
"iso8859-13": "latin7",
|
|
|
|
|
"iso8859-15": "latin9",
|
|
|
|
|
"iso8859-16": "latin10",
|
|
|
|
|
"applemac": "applemac",
|
|
|
|
|
"cp437": "cp437",
|
|
|
|
|
"cp437de": "cp437de",
|
|
|
|
|
"cp850": "cp850",
|
|
|
|
|
"cp852": "cp852",
|
|
|
|
|
"cp855": "cp855",
|
|
|
|
|
"cp858": "cp858",
|
|
|
|
|
"cp862": "cp862",
|
|
|
|
|
"cp865": "cp865",
|
|
|
|
|
"cp866": "cp866",
|
|
|
|
|
"cp1250": "cp1250",
|
|
|
|
|
"cp1251": "cp1251",
|
|
|
|
|
"cp1252": "cp1252",
|
|
|
|
|
"cp1255": "cp1255",
|
|
|
|
|
"cp1256": "cp1256",
|
|
|
|
|
"cp1257": "cp1257",
|
|
|
|
|
"koi8-r": "koi8-r",
|
|
|
|
|
"koi8-u": "koi8-u",
|
|
|
|
|
"pt154": "pt154",
|
|
|
|
|
"utf8-platex": "utf8",
|
|
|
|
|
"ascii": "ascii",
|
2019-03-29 14:45:55 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i = 0
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i + 1)
|
2019-03-29 14:45:55 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Can't find end of bibtex inset at line %d!!" % (i))
|
2019-03-29 14:45:55 +00:00
|
|
|
|
continue
|
|
|
|
|
encodings = get_quoted_value(document.body, "file_encodings", i, j)
|
|
|
|
|
if not encodings:
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = j
|
2019-03-29 14:45:55 +00:00
|
|
|
|
continue
|
|
|
|
|
bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
|
|
|
|
|
opts = get_quoted_value(document.body, "biblatexopts", i, j)
|
|
|
|
|
if len(bibfiles) == 0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Bibtex inset at line %d does not have a bibfile!" % (i))
|
2019-03-29 14:45:55 +00:00
|
|
|
|
# remove encoding line
|
|
|
|
|
k = find_token(document.body, "file_encodings", i, j)
|
|
|
|
|
if k != -1:
|
|
|
|
|
del document.body[k]
|
|
|
|
|
# Re-find inset end line
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if biblatex:
|
|
|
|
|
enclist = encodings.split("\t")
|
|
|
|
|
encmap = dict()
|
|
|
|
|
for pp in enclist:
|
|
|
|
|
ppp = pp.split(" ", 1)
|
|
|
|
|
encmap[ppp[0]] = ppp[1]
|
|
|
|
|
for bib in bibfiles:
|
|
|
|
|
pr = "\\addbibresource"
|
|
|
|
|
if bib in encmap.keys():
|
|
|
|
|
pr += "[bibencoding=" + encmap[bib] + "]"
|
|
|
|
|
pr += "{" + bib + "}"
|
|
|
|
|
add_to_preamble(document, [pr])
|
|
|
|
|
# Insert ERT \\printbibliography and wrap bibtex inset to a Note
|
|
|
|
|
pcmd = "printbibliography"
|
|
|
|
|
if opts:
|
|
|
|
|
pcmd += "[" + opts + "]"
|
2024-06-15 09:06:06 +00:00
|
|
|
|
repl = [
|
|
|
|
|
"\\begin_inset ERT",
|
|
|
|
|
"status open",
|
|
|
|
|
"",
|
|
|
|
|
"\\begin_layout Plain Layout",
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
"\\backslash",
|
|
|
|
|
pcmd,
|
|
|
|
|
"\\end_layout",
|
|
|
|
|
"",
|
|
|
|
|
"\\end_inset",
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
"\\end_layout",
|
|
|
|
|
"",
|
|
|
|
|
"\\begin_layout Standard",
|
|
|
|
|
"\\begin_inset Note Note",
|
|
|
|
|
"status open",
|
|
|
|
|
"",
|
|
|
|
|
"\\begin_layout Plain Layout",
|
|
|
|
|
]
|
|
|
|
|
repl += document.body[i : j + 1]
|
2019-03-29 14:45:55 +00:00
|
|
|
|
repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i : j + 1] = repl
|
2019-03-29 14:45:55 +00:00
|
|
|
|
j += 27
|
|
|
|
|
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = j
|
2019-03-29 14:45:55 +00:00
|
|
|
|
|
|
|
|
|
|
2019-04-03 05:59:52 +00:00
|
|
|
|
def revert_cmidruletrimming(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Remove \\cmidrule trimming"""
|
2019-04-03 05:59:52 +00:00
|
|
|
|
|
|
|
|
|
# FIXME: Revert to TeX code?
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
# first, let's find out if we need to do anything
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "<cell ", i + 1)
|
2019-04-03 05:59:52 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
j = document.body[i].find('trim="')
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
continue
|
2019-04-03 05:59:52 +00:00
|
|
|
|
rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
|
|
|
|
|
# remove trim option
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i] = rgx.sub("", document.body[i])
|
2019-04-03 05:59:52 +00:00
|
|
|
|
|
2019-03-22 17:29:50 +00:00
|
|
|
|
|
2019-05-15 15:32:09 +00:00
|
|
|
|
ruby_inset_def = [
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"### Inserted by lyx2lyx (ruby inset) ###",
|
|
|
|
|
r"InsetLayout Flex:Ruby",
|
|
|
|
|
r" LyxType charstyle",
|
|
|
|
|
r" LatexType command",
|
|
|
|
|
r" LatexName ruby",
|
|
|
|
|
r" HTMLTag ruby",
|
2019-05-15 15:32:09 +00:00
|
|
|
|
r' HTMLAttr ""',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" HTMLInnerTag rb",
|
2019-05-15 15:32:09 +00:00
|
|
|
|
r' HTMLInnerAttr ""',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" BgColor none",
|
2019-05-15 15:32:09 +00:00
|
|
|
|
r' LabelString "Ruby"',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" Decoration Conglomerate",
|
|
|
|
|
r" Preamble",
|
|
|
|
|
r" \ifdefined\kanjiskip",
|
|
|
|
|
r" \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}",
|
|
|
|
|
r" \else \ifdefined\luatexversion",
|
|
|
|
|
r" \usepackage{luatexja-ruby}",
|
|
|
|
|
r" \else \ifdefined\XeTeXversion",
|
|
|
|
|
r" \usepackage{ruby}%",
|
|
|
|
|
r" \fi\fi\fi",
|
|
|
|
|
r" \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}",
|
|
|
|
|
r" EndPreamble",
|
|
|
|
|
r" Argument post:1",
|
2019-05-15 15:32:09 +00:00
|
|
|
|
r' LabelString "ruby text"',
|
|
|
|
|
r' MenuString "Ruby Text|R"',
|
|
|
|
|
r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" Decoration Conglomerate",
|
|
|
|
|
r" Font",
|
|
|
|
|
r" Size tiny",
|
|
|
|
|
r" EndFont",
|
|
|
|
|
r" LabelFont",
|
|
|
|
|
r" Size tiny",
|
|
|
|
|
r" EndFont",
|
|
|
|
|
r" Mandatory 1",
|
|
|
|
|
r" EndArgument",
|
|
|
|
|
r"End",
|
2019-05-15 15:32:09 +00:00
|
|
|
|
]
|
|
|
|
|
|
2020-06-21 15:37:23 +00:00
|
|
|
|
|
2019-05-15 15:32:09 +00:00
|
|
|
|
def convert_ruby_module(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Use ruby module instead of local module definition"""
|
2019-05-15 15:32:09 +00:00
|
|
|
|
if document.del_local_layout(ruby_inset_def):
|
|
|
|
|
document.add_module("ruby")
|
|
|
|
|
|
2020-06-21 15:37:23 +00:00
|
|
|
|
|
2019-05-15 15:32:09 +00:00
|
|
|
|
def revert_ruby_module(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Replace ruby module with local module definition"""
|
2019-05-15 15:32:09 +00:00
|
|
|
|
if document.del_module("ruby"):
|
|
|
|
|
document.append_local_layout(ruby_inset_def)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_utf8_japanese(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Use generic utf8 with Japanese documents."""
|
2019-05-15 15:32:09 +00:00
|
|
|
|
lang = get_value(document.header, "\\language")
|
|
|
|
|
if not lang.startswith("japanese"):
|
|
|
|
|
return
|
|
|
|
|
inputenc = get_value(document.header, "\\inputencoding")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (lang == "japanese" and inputenc == "utf8-platex") or (
|
|
|
|
|
lang == "japanese-cjk" and inputenc == "utf8-cjk"
|
|
|
|
|
):
|
2019-05-15 15:32:09 +00:00
|
|
|
|
document.set_parameter("inputencoding", "utf8")
|
|
|
|
|
|
2020-06-21 15:37:23 +00:00
|
|
|
|
|
2019-05-15 15:32:09 +00:00
|
|
|
|
def revert_utf8_japanese(document):
|
2020-05-08 17:31:05 +00:00
|
|
|
|
"""Use Japanese utf8 variants with Japanese documents."""
|
2019-05-15 15:32:09 +00:00
|
|
|
|
inputenc = get_value(document.header, "\\inputencoding")
|
|
|
|
|
if inputenc != "utf8":
|
|
|
|
|
return
|
|
|
|
|
lang = get_value(document.header, "\\language")
|
|
|
|
|
if lang == "japanese":
|
|
|
|
|
document.set_parameter("inputencoding", "utf8-platex")
|
|
|
|
|
if lang == "japanese-cjk":
|
|
|
|
|
document.set_parameter("inputencoding", "utf8-cjk")
|
|
|
|
|
|
2019-05-24 11:49:32 +00:00
|
|
|
|
|
2019-05-23 13:13:27 +00:00
|
|
|
|
def revert_lineno(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Replace lineno setting with user-preamble code."
|
2019-05-24 11:49:32 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
options = get_quoted_value(document.header, "\\lineno_options", delete=True)
|
2019-05-24 11:49:32 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_lineno", delete=True):
|
|
|
|
|
return
|
2019-05-25 11:55:34 +00:00
|
|
|
|
if options:
|
|
|
|
|
options = "[" + options + "]"
|
2024-06-15 09:06:06 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage%s{lineno}" % options, "\\linenumbers"])
|
|
|
|
|
|
2019-05-24 11:49:32 +00:00
|
|
|
|
|
|
|
|
|
def convert_lineno(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Replace user-preamble code with native lineno support."
|
2019-05-25 11:55:34 +00:00
|
|
|
|
use_lineno = 0
|
|
|
|
|
options = ""
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = find_token(document.preamble, "\\linenumbers", 1)
|
2024-01-26 09:30:31 +00:00
|
|
|
|
if i > 0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i - 1])
|
2019-05-25 11:55:34 +00:00
|
|
|
|
if usepkg:
|
|
|
|
|
use_lineno = 1
|
|
|
|
|
options = usepkg.group(1).strip("[]")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del document.preamble[i - 1 : i + 1]
|
2024-01-26 09:30:31 +00:00
|
|
|
|
if i > 1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del_token(document.preamble, "% Added by lyx2lyx", i - 2, i - 1)
|
2019-05-25 11:55:34 +00:00
|
|
|
|
|
2019-05-24 11:49:32 +00:00
|
|
|
|
k = find_token(document.header, "\\index ")
|
2019-06-02 17:07:01 +00:00
|
|
|
|
if options == "":
|
|
|
|
|
document.header[k:k] = ["\\use_lineno %d" % use_lineno]
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[k:k] = [
|
|
|
|
|
"\\use_lineno %d" % use_lineno,
|
|
|
|
|
"\\lineno_options %s" % options,
|
|
|
|
|
]
|
2019-05-23 13:13:27 +00:00
|
|
|
|
|
2019-05-15 15:32:09 +00:00
|
|
|
|
|
2019-08-30 06:27:34 +00:00
|
|
|
|
def convert_aaencoding(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Convert default document option due to encoding change in aa class."
|
2019-08-30 06:27:34 +00:00
|
|
|
|
|
|
|
|
|
if document.textclass != "aa":
|
|
|
|
|
return
|
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
i = find_token(document.header, "\\use_default_options true")
|
2019-08-30 06:27:34 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2020-03-03 22:44:08 +00:00
|
|
|
|
val = get_value(document.header, "\\inputencoding")
|
|
|
|
|
if not val:
|
|
|
|
|
document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
|
2019-08-30 06:27:34 +00:00
|
|
|
|
return
|
|
|
|
|
if val == "auto-legacy" or val == "latin9":
|
|
|
|
|
document.header[i] = "\\use_default_options false"
|
2020-03-03 22:44:08 +00:00
|
|
|
|
k = find_token(document.header, "\\options")
|
2019-08-30 06:27:34 +00:00
|
|
|
|
if k == -1:
|
|
|
|
|
document.header.insert(i, "\\options latin9")
|
|
|
|
|
else:
|
2020-03-03 22:44:08 +00:00
|
|
|
|
document.header[k] += ",latin9"
|
2019-08-30 06:27:34 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_aaencoding(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert default document option due to encoding change in aa class."
|
2019-08-30 06:27:34 +00:00
|
|
|
|
|
|
|
|
|
if document.textclass != "aa":
|
|
|
|
|
return
|
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
i = find_token(document.header, "\\use_default_options true")
|
2019-08-30 06:27:34 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2020-03-03 22:44:08 +00:00
|
|
|
|
val = get_value(document.header, "\\inputencoding")
|
|
|
|
|
if not val:
|
2019-08-30 06:27:34 +00:00
|
|
|
|
document.warning("Malformed LyX Document! Missing \\inputencoding header.")
|
|
|
|
|
return
|
|
|
|
|
if val == "utf8":
|
|
|
|
|
document.header[i] = "\\use_default_options false"
|
|
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
|
if k == -1:
|
|
|
|
|
document.header.insert(i, "\\options utf8")
|
|
|
|
|
else:
|
|
|
|
|
document.header[k] = document.header[k] + ",utf8"
|
|
|
|
|
|
|
|
|
|
|
2019-06-03 14:43:16 +00:00
|
|
|
|
def revert_new_languages(document):
|
|
|
|
|
"""Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
|
|
|
|
|
and Russian (Petrine orthography)."""
|
2019-07-04 18:49:06 +00:00
|
|
|
|
|
|
|
|
|
# lyxname: (babelname, polyglossianame)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
new_languages = {
|
|
|
|
|
"azerbaijani": ("azerbaijani", ""),
|
|
|
|
|
"bengali": ("", "bengali"),
|
|
|
|
|
"churchslavonic": ("", "churchslavonic"),
|
|
|
|
|
"oldrussian": ("", "russian"),
|
|
|
|
|
"korean": ("", "korean"),
|
|
|
|
|
}
|
2019-07-04 18:49:06 +00:00
|
|
|
|
if document.language in new_languages:
|
2022-10-26 08:52:21 +00:00
|
|
|
|
used_languages = {document.language}
|
2020-03-03 22:44:08 +00:00
|
|
|
|
else:
|
|
|
|
|
used_languages = set()
|
2019-07-04 18:49:06 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\lang", i + 1)
|
2019-07-04 18:49:06 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
2019-08-31 09:20:10 +00:00
|
|
|
|
val = get_value(document.body, "\\lang", i)
|
|
|
|
|
if val in new_languages:
|
|
|
|
|
used_languages.add(val)
|
2019-07-04 18:49:06 +00:00
|
|
|
|
|
2019-06-03 14:43:16 +00:00
|
|
|
|
# Korean is already supported via CJK, so leave as-is for Babel
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if "korean" in used_languages and (
|
|
|
|
|
not get_bool_value(document.header, "\\use_non_tex_fonts")
|
|
|
|
|
or get_value(document.header, "\\language_package") == "babel"
|
|
|
|
|
):
|
2020-03-03 22:44:08 +00:00
|
|
|
|
used_languages.discard("korean")
|
2019-07-04 18:49:06 +00:00
|
|
|
|
|
|
|
|
|
for lang in used_languages:
|
2020-03-03 22:44:08 +00:00
|
|
|
|
revert_language(document, lang, *new_languages[lang])
|
2019-06-03 14:43:16 +00:00
|
|
|
|
|
|
|
|
|
|
2019-06-04 09:01:19 +00:00
|
|
|
|
gloss_inset_def = [
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"### Inserted by lyx2lyx (deprecated ling glosses) ###",
|
|
|
|
|
r"InsetLayout Flex:Glosse",
|
|
|
|
|
r" LyXType custom",
|
2019-06-04 09:01:19 +00:00
|
|
|
|
r' LabelString "Gloss (old version)"',
|
|
|
|
|
r' MenuString "Gloss (old version)"',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" LatexType environment",
|
|
|
|
|
r" LatexName linggloss",
|
|
|
|
|
r" Decoration minimalistic",
|
|
|
|
|
r" LabelFont",
|
|
|
|
|
r" Size Small",
|
|
|
|
|
r" EndFont",
|
|
|
|
|
r" MultiPar true",
|
|
|
|
|
r" CustomPars false",
|
|
|
|
|
r" ForcePlain true",
|
|
|
|
|
r" ParbreakIsNewline true",
|
|
|
|
|
r" FreeSpacing true",
|
|
|
|
|
r" Requires covington",
|
|
|
|
|
r" Preamble",
|
|
|
|
|
r" \def\glosstr{}",
|
|
|
|
|
r" \@ifundefined{linggloss}{%",
|
|
|
|
|
r" \newenvironment{linggloss}[2][]{",
|
|
|
|
|
r" \def\glosstr{\glt #1}%",
|
|
|
|
|
r" \gll #2}",
|
|
|
|
|
r" {\glosstr\glend}}{}",
|
|
|
|
|
r" EndPreamble",
|
|
|
|
|
r" InToc true",
|
|
|
|
|
r" ResetsFont true",
|
|
|
|
|
r" Argument 1",
|
|
|
|
|
r" Decoration conglomerate",
|
2019-07-07 21:31:12 +00:00
|
|
|
|
r' LabelString "Translation"',
|
|
|
|
|
r' MenuString "Glosse Translation|s"',
|
|
|
|
|
r' Tooltip "Add a translation for the glosse"',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" EndArgument",
|
|
|
|
|
r"End",
|
2019-06-04 09:01:19 +00:00
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
glosss_inset_def = [
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"### Inserted by lyx2lyx (deprecated ling glosses) ###",
|
|
|
|
|
r"InsetLayout Flex:Tri-Glosse",
|
|
|
|
|
r" LyXType custom",
|
2019-06-04 09:01:19 +00:00
|
|
|
|
r' LabelString "Tri-Gloss (old version)"',
|
|
|
|
|
r' MenuString "Tri-Gloss (old version)"',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" LatexType environment",
|
|
|
|
|
r" LatexName lingglosss",
|
|
|
|
|
r" Decoration minimalistic",
|
|
|
|
|
r" LabelFont",
|
|
|
|
|
r" Size Small",
|
|
|
|
|
r" EndFont",
|
|
|
|
|
r" MultiPar true",
|
|
|
|
|
r" CustomPars false",
|
|
|
|
|
r" ForcePlain true",
|
|
|
|
|
r" ParbreakIsNewline true",
|
|
|
|
|
r" FreeSpacing true",
|
|
|
|
|
r" InToc true",
|
|
|
|
|
r" Requires covington",
|
|
|
|
|
r" Preamble",
|
|
|
|
|
r" \def\glosstr{}",
|
|
|
|
|
r" \@ifundefined{lingglosss}{%",
|
|
|
|
|
r" \newenvironment{lingglosss}[2][]{",
|
|
|
|
|
r" \def\glosstr{\glt #1}%",
|
|
|
|
|
r" \glll #2}",
|
|
|
|
|
r" {\glosstr\glend}}{}",
|
|
|
|
|
r" EndPreamble",
|
|
|
|
|
r" ResetsFont true",
|
|
|
|
|
r" Argument 1",
|
|
|
|
|
r" Decoration conglomerate",
|
2019-07-07 21:31:12 +00:00
|
|
|
|
r' LabelString "Translation"',
|
|
|
|
|
r' MenuString "Glosse Translation|s"',
|
|
|
|
|
r' Tooltip "Add a translation for the glosse"',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" EndArgument",
|
|
|
|
|
r"End",
|
2019-06-04 09:01:19 +00:00
|
|
|
|
]
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-06-04 09:01:19 +00:00
|
|
|
|
def convert_linggloss(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Move old ling glosses to local layout"
|
|
|
|
|
if find_token(document.body, "\\begin_inset Flex Glosse", 0) != -1:
|
2019-06-04 09:01:19 +00:00
|
|
|
|
document.append_local_layout(gloss_inset_def)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if find_token(document.body, "\\begin_inset Flex Tri-Glosse", 0) != -1:
|
2019-06-04 09:01:19 +00:00
|
|
|
|
document.append_local_layout(glosss_inset_def)
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-06-04 09:01:19 +00:00
|
|
|
|
def revert_linggloss(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert to old ling gloss definitions"
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if "linguistics" not in document.get_module_list():
|
2019-06-04 09:01:19 +00:00
|
|
|
|
return
|
2019-07-07 21:31:12 +00:00
|
|
|
|
document.del_local_layout(gloss_inset_def)
|
|
|
|
|
document.del_local_layout(glosss_inset_def)
|
2019-06-04 09:01:19 +00:00
|
|
|
|
|
|
|
|
|
cov_req = False
|
2024-06-15 09:06:06 +00:00
|
|
|
|
glosses = [
|
|
|
|
|
"\\begin_inset Flex Interlinear Gloss (2 Lines)",
|
|
|
|
|
"\\begin_inset Flex Interlinear Gloss (3 Lines)",
|
|
|
|
|
]
|
2019-06-04 09:01:19 +00:00
|
|
|
|
for glosse in glosses:
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, glosse, i + 1)
|
2019-06-04 09:01:19 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Gloss inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
optargcontent = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-04 09:01:19 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find optarg plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-04 09:01:19 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
marg1content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-04 09:01:19 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-04 09:01:19 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
marg2content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-04 09:01:19 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-04 09:01:19 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
marg3content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-04 09:01:19 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-04 09:01:19 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
cmd = "\\digloss"
|
|
|
|
|
if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
|
|
|
|
|
cmd = "\\trigloss"
|
|
|
|
|
|
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
|
endInset = find_end_of_inset(document.body, i)
|
2019-08-22 15:16:26 +00:00
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
2019-06-04 09:01:19 +00:00
|
|
|
|
precontent = put_cmd_in_ert(cmd)
|
|
|
|
|
if len(optargcontent) > 0:
|
|
|
|
|
precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
|
|
|
|
|
precontent += put_cmd_in_ert("{")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
postcontent = (
|
|
|
|
|
put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
|
|
|
|
|
)
|
2019-06-04 09:01:19 +00:00
|
|
|
|
if cmd == "\\trigloss":
|
|
|
|
|
postcontent += put_cmd_in_ert("}{") + marg3content
|
|
|
|
|
postcontent += put_cmd_in_ert("}")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[endPlain : endInset + 1] = postcontent
|
|
|
|
|
document.body[beginPlain + 1 : beginPlain] = precontent
|
2019-06-04 09:01:19 +00:00
|
|
|
|
del document.body[i : beginPlain + 1]
|
|
|
|
|
if not cov_req:
|
|
|
|
|
document.append_local_layout("Requires covington")
|
|
|
|
|
cov_req = True
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = beginPlain
|
2019-06-04 09:01:19 +00:00
|
|
|
|
|
2019-06-03 14:43:16 +00:00
|
|
|
|
|
2019-06-22 11:56:12 +00:00
|
|
|
|
def revert_subexarg(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert linguistic subexamples with argument to ERT"
|
2019-06-22 11:56:12 +00:00
|
|
|
|
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if "linguistics" not in document.get_module_list():
|
2019-06-22 11:56:12 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
cov_req = False
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_layout Subexample", i + 1)
|
2019-06-22 11:56:12 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Subexample layout")
|
|
|
|
|
continue
|
|
|
|
|
while True:
|
|
|
|
|
# check for consecutive layouts
|
|
|
|
|
k = find_token(document.body, "\\begin_layout", j)
|
|
|
|
|
if k == -1 or document.body[k] != "\\begin_layout Subexample":
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_layout(document.body, k)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Subexample layout")
|
|
|
|
|
continue
|
2019-06-22 11:56:12 +00:00
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
|
|
|
|
|
if arg == -1:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
optargcontent = ""
|
|
|
|
|
argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
|
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find optarg plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-22 11:56:12 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
|
|
|
|
|
|
|
|
|
|
# re-find end of layout
|
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Subexample layout")
|
|
|
|
|
continue
|
|
|
|
|
while True:
|
|
|
|
|
# check for consecutive layouts
|
|
|
|
|
k = find_token(document.body, "\\begin_layout", j)
|
|
|
|
|
if k == -1 or document.body[k] != "\\begin_layout Subexample":
|
|
|
|
|
break
|
|
|
|
|
document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
|
|
|
|
|
j = find_end_of_layout(document.body, k)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Subexample layout")
|
|
|
|
|
continue
|
2019-06-22 11:56:12 +00:00
|
|
|
|
|
|
|
|
|
endev = put_cmd_in_ert("\\end{subexamples}")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[j:j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
|
|
|
|
|
document.body[i : i + 1] = (
|
|
|
|
|
["\\begin_layout Standard"]
|
|
|
|
|
+ cmd
|
|
|
|
|
+ ["\\end_layout", "", "\\begin_layout Standard"]
|
|
|
|
|
+ put_cmd_in_ert("\\item ")
|
|
|
|
|
)
|
2019-06-22 11:56:12 +00:00
|
|
|
|
if not cov_req:
|
|
|
|
|
document.append_local_layout("Requires covington")
|
|
|
|
|
cov_req = True
|
|
|
|
|
|
|
|
|
|
|
2019-06-23 10:59:56 +00:00
|
|
|
|
def revert_drs(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert DRS insets (linguistics) to ERT"
|
2019-06-23 10:59:56 +00:00
|
|
|
|
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if "linguistics" not in document.get_module_list():
|
2019-06-23 10:59:56 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
cov_req = False
|
2024-06-15 09:06:06 +00:00
|
|
|
|
drses = [
|
|
|
|
|
"\\begin_inset Flex DRS",
|
|
|
|
|
"\\begin_inset Flex DRS*",
|
|
|
|
|
"\\begin_inset Flex IfThen-DRS",
|
|
|
|
|
"\\begin_inset Flex Cond-DRS",
|
|
|
|
|
"\\begin_inset Flex QDRS",
|
|
|
|
|
"\\begin_inset Flex NegDRS",
|
|
|
|
|
"\\begin_inset Flex SDRS",
|
|
|
|
|
]
|
2019-06-23 10:59:56 +00:00
|
|
|
|
for drs in drses:
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, drs, i + 1)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of DRS inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Check for arguments
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
prearg1content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
if argbeginPlain == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find Argument 1 plain Layout"
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-23 10:59:56 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
# re-find inset end
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of DRS inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
prearg2content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
if argbeginPlain == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find Argument 2 plain Layout"
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-23 10:59:56 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
# re-find inset end
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of DRS inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
postarg1content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
if argbeginPlain == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find Argument post:1 plain Layout"
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-23 10:59:56 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
# re-find inset end
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of DRS inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
postarg2content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
if argbeginPlain == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find Argument post:2 plain Layout"
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-23 10:59:56 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
# re-find inset end
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of DRS inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
postarg3content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
if argbeginPlain == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find Argument post:3 plain Layout"
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-23 10:59:56 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
# re-find inset end
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of DRS inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
postarg4content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
if argbeginPlain == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find Argument post:4 plain Layout"
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2019-06-23 10:59:56 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
# The respective LaTeX command
|
|
|
|
|
cmd = "\\drs"
|
|
|
|
|
if drs == "\\begin_inset Flex DRS*":
|
|
|
|
|
cmd = "\\drs*"
|
|
|
|
|
elif drs == "\\begin_inset Flex IfThen-DRS":
|
|
|
|
|
cmd = "\\ifdrs"
|
|
|
|
|
elif drs == "\\begin_inset Flex Cond-DRS":
|
|
|
|
|
cmd = "\\condrs"
|
|
|
|
|
elif drs == "\\begin_inset Flex QDRS":
|
|
|
|
|
cmd = "\\qdrs"
|
|
|
|
|
elif drs == "\\begin_inset Flex NegDRS":
|
|
|
|
|
cmd = "\\negdrs"
|
|
|
|
|
elif drs == "\\begin_inset Flex SDRS":
|
|
|
|
|
cmd = "\\sdrs"
|
|
|
|
|
|
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
|
endInset = find_end_of_inset(document.body, i)
|
|
|
|
|
endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
|
|
|
|
|
precontent = put_cmd_in_ert(cmd)
|
|
|
|
|
precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
|
|
|
|
|
if drs == "\\begin_inset Flex SDRS":
|
|
|
|
|
precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
|
|
|
|
|
precontent += put_cmd_in_ert("{")
|
|
|
|
|
|
|
|
|
|
postcontent = []
|
|
|
|
|
if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
postcontent = (
|
|
|
|
|
put_cmd_in_ert("}{")
|
|
|
|
|
+ postarg1content
|
|
|
|
|
+ put_cmd_in_ert("}{")
|
|
|
|
|
+ postarg2content
|
|
|
|
|
+ put_cmd_in_ert("}")
|
|
|
|
|
)
|
2019-06-23 10:59:56 +00:00
|
|
|
|
if cmd == "\\condrs" or cmd == "\\qdrs":
|
|
|
|
|
postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
|
|
|
|
|
if cmd == "\\qdrs":
|
|
|
|
|
postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
|
|
|
|
|
else:
|
|
|
|
|
postcontent = put_cmd_in_ert("}")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[endPlain : endInset + 1] = postcontent
|
|
|
|
|
document.body[beginPlain + 1 : beginPlain] = precontent
|
2019-06-23 10:59:56 +00:00
|
|
|
|
del document.body[i : beginPlain + 1]
|
|
|
|
|
if not cov_req:
|
2019-07-07 18:51:53 +00:00
|
|
|
|
document.append_local_layout("Provides covington 1")
|
2019-06-23 10:59:56 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage{drs,covington}"])
|
|
|
|
|
cov_req = True
|
2019-07-07 21:31:12 +00:00
|
|
|
|
i = beginPlain
|
2019-06-23 10:59:56 +00:00
|
|
|
|
|
|
|
|
|
|
2019-07-11 11:21:32 +00:00
|
|
|
|
def revert_babelfont(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Reverts the use of \\babelfont to user preamble"
|
2019-07-11 11:21:32 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-11 11:21:32 +00:00
|
|
|
|
return
|
2020-03-03 22:44:08 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, "\\language_package", 0)
|
2019-07-11 11:21:32 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\language_package.")
|
|
|
|
|
return
|
|
|
|
|
if get_value(document.header, "\\language_package", 0) != "babel":
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# check font settings
|
|
|
|
|
# defaults
|
|
|
|
|
roman = sans = typew = "default"
|
|
|
|
|
osf = False
|
|
|
|
|
sf_scale = tt_scale = 100.0
|
|
|
|
|
|
|
|
|
|
j = find_token(document.header, "\\font_roman", 0)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_roman.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
|
|
|
|
|
roman = romanfont[2].strip('"')
|
|
|
|
|
romanfont[2] = '"default"'
|
|
|
|
|
document.header[j] = " ".join(romanfont)
|
|
|
|
|
|
|
|
|
|
j = find_token(document.header, "\\font_sans", 0)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sans.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
|
|
|
|
|
sans = sansfont[2].strip('"')
|
|
|
|
|
sansfont[2] = '"default"'
|
|
|
|
|
document.header[j] = " ".join(sansfont)
|
|
|
|
|
|
|
|
|
|
j = find_token(document.header, "\\font_typewriter", 0)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_typewriter.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
|
|
|
|
|
typew = ttfont[2].strip('"')
|
|
|
|
|
ttfont[2] = '"default"'
|
|
|
|
|
document.header[j] = " ".join(ttfont)
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_osf", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_osf.")
|
|
|
|
|
else:
|
|
|
|
|
osf = str2bool(get_value(document.header, "\\font_osf", i))
|
|
|
|
|
|
|
|
|
|
j = find_token(document.header, "\\font_sf_scale", 0)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
|
|
|
|
|
else:
|
|
|
|
|
sfscale = document.header[j].split()
|
|
|
|
|
val = sfscale[2]
|
|
|
|
|
sfscale[2] = "100"
|
|
|
|
|
document.header[j] = " ".join(sfscale)
|
|
|
|
|
try:
|
|
|
|
|
# float() can throw
|
|
|
|
|
sf_scale = float(val)
|
|
|
|
|
except:
|
|
|
|
|
document.warning("Invalid font_sf_scale value: " + val)
|
|
|
|
|
|
|
|
|
|
j = find_token(document.header, "\\font_tt_scale", 0)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_tt_scale.")
|
|
|
|
|
else:
|
|
|
|
|
ttscale = document.header[j].split()
|
|
|
|
|
val = ttscale[2]
|
|
|
|
|
ttscale[2] = "100"
|
|
|
|
|
document.header[j] = " ".join(ttscale)
|
|
|
|
|
try:
|
|
|
|
|
# float() can throw
|
|
|
|
|
tt_scale = float(val)
|
|
|
|
|
except:
|
|
|
|
|
document.warning("Invalid font_tt_scale value: " + val)
|
|
|
|
|
|
|
|
|
|
# set preamble stuff
|
2024-06-15 09:06:06 +00:00
|
|
|
|
pretext = ["%% This document must be processed with xelatex or lualatex!"]
|
|
|
|
|
pretext.append("\\AtBeginDocument{%")
|
2024-09-17 13:53:18 +00:00
|
|
|
|
have_append = False
|
2019-07-11 11:21:32 +00:00
|
|
|
|
if roman != "default":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
pretext.append("\\babelfont{rm}[Mapping=tex-text]{" + roman + "}")
|
2024-09-17 13:53:18 +00:00
|
|
|
|
have_append = True
|
2019-07-11 11:21:32 +00:00
|
|
|
|
if sans != "default":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
sf = "\\babelfont{sf}["
|
2019-07-11 11:21:32 +00:00
|
|
|
|
if sf_scale != 100.0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
sf += "Scale=" + str(sf_scale / 100.0) + ","
|
|
|
|
|
sf += "Mapping=tex-text]{" + sans + "}"
|
2019-07-11 11:21:32 +00:00
|
|
|
|
pretext.append(sf)
|
2024-09-17 13:53:18 +00:00
|
|
|
|
have_append = True
|
2019-07-11 11:21:32 +00:00
|
|
|
|
if typew != "default":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tw = "\\babelfont{tt}"
|
2019-07-11 11:21:32 +00:00
|
|
|
|
if tt_scale != 100.0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
tw += "[Scale=" + str(tt_scale / 100.0) + "]"
|
|
|
|
|
tw += "{" + typew + "}"
|
2019-07-11 11:21:32 +00:00
|
|
|
|
pretext.append(tw)
|
2024-09-17 13:53:18 +00:00
|
|
|
|
have_append = True
|
2019-07-11 11:21:32 +00:00
|
|
|
|
if osf:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
pretext.append("\\defaultfontfeatures{Numbers=OldStyle}")
|
2024-09-17 13:53:18 +00:00
|
|
|
|
have_append = True
|
|
|
|
|
|
|
|
|
|
if have_append:
|
|
|
|
|
pretext.append("}")
|
|
|
|
|
insert_to_preamble(document, pretext)
|
2019-07-11 11:21:32 +00:00
|
|
|
|
|
|
|
|
|
|
2019-07-11 18:28:34 +00:00
|
|
|
|
def revert_minionpro(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native MinionPro font definition (with extra options) to LaTeX"
|
2019-07-11 18:28:34 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-11 18:28:34 +00:00
|
|
|
|
return
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_roman_opts)")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
x = find_re(document.header, regexp, 0)
|
|
|
|
|
if x == -1:
|
2019-07-11 18:28:34 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
2019-07-13 12:55:59 +00:00
|
|
|
|
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
2019-07-11 18:28:34 +00:00
|
|
|
|
opts = romanopts[1].strip('"')
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_roman.")
|
|
|
|
|
return
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
roman = romanfont[1].strip('"')
|
|
|
|
|
if roman != "minionpro":
|
|
|
|
|
return
|
|
|
|
|
romanfont[1] = '"default"'
|
|
|
|
|
document.header[i] = " ".join(romanfont)
|
|
|
|
|
osf = False
|
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
|
if j != -1:
|
|
|
|
|
osf = True
|
|
|
|
|
preamble = "\\usepackage["
|
|
|
|
|
if osf:
|
|
|
|
|
document.header[j] = "\\font_osf false"
|
|
|
|
|
else:
|
|
|
|
|
preamble += "lf,"
|
|
|
|
|
preamble += opts
|
|
|
|
|
preamble += "]{MinionPro}"
|
|
|
|
|
add_to_preamble(document, [preamble])
|
2019-07-13 12:55:59 +00:00
|
|
|
|
del document.header[x]
|
2019-07-11 18:28:34 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_font_opts(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"revert font options by outputting \\setxxxfont or \\babelfont to the preamble"
|
2019-07-11 18:28:34 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
Babel = get_value(document.header, "\\language_package") == "babel"
|
2019-07-11 18:28:34 +00:00
|
|
|
|
|
|
|
|
|
# 1. Roman
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_roman_opts)")
|
2019-07-11 18:28:34 +00:00
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
opts = romanopts[1].strip('"')
|
|
|
|
|
del document.header[i]
|
2019-07-13 12:55:59 +00:00
|
|
|
|
if NonTeXFonts:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_roman)")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
font = romanfont[2].strip('"')
|
|
|
|
|
romanfont[2] = '"default"'
|
|
|
|
|
document.header[i] = " ".join(romanfont)
|
|
|
|
|
if font != "default":
|
|
|
|
|
if Babel:
|
|
|
|
|
preamble = "\\babelfont{rm}["
|
|
|
|
|
else:
|
|
|
|
|
preamble = "\\setmainfont["
|
|
|
|
|
preamble += opts
|
|
|
|
|
preamble += ","
|
|
|
|
|
preamble += "Mapping=tex-text]{"
|
|
|
|
|
preamble += font
|
|
|
|
|
preamble += "}"
|
|
|
|
|
add_to_preamble(document, [preamble])
|
2019-07-11 18:28:34 +00:00
|
|
|
|
|
|
|
|
|
# 2. Sans
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_sans_opts)")
|
2019-07-11 18:28:34 +00:00
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
scaleval = 100
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
opts = sfopts[1].strip('"')
|
|
|
|
|
del document.header[i]
|
2019-07-13 12:55:59 +00:00
|
|
|
|
if NonTeXFonts:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_sf_scale)")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
|
if i != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
scaleval = get_value(document.header, "\\font_sf_scale", i).split()[1]
|
|
|
|
|
regexp = re.compile(r"(\\font_sans)")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
font = sffont[2].strip('"')
|
|
|
|
|
sffont[2] = '"default"'
|
|
|
|
|
document.header[i] = " ".join(sffont)
|
|
|
|
|
if font != "default":
|
|
|
|
|
if Babel:
|
|
|
|
|
preamble = "\\babelfont{sf}["
|
|
|
|
|
else:
|
|
|
|
|
preamble = "\\setsansfont["
|
|
|
|
|
preamble += opts
|
2019-07-11 18:28:34 +00:00
|
|
|
|
preamble += ","
|
2019-07-13 12:55:59 +00:00
|
|
|
|
if scaleval != 100:
|
|
|
|
|
preamble += "Scale=0."
|
|
|
|
|
preamble += scaleval
|
|
|
|
|
preamble += ","
|
|
|
|
|
preamble += "Mapping=tex-text]{"
|
|
|
|
|
preamble += font
|
|
|
|
|
preamble += "}"
|
|
|
|
|
add_to_preamble(document, [preamble])
|
2019-07-11 18:28:34 +00:00
|
|
|
|
|
|
|
|
|
# 3. Typewriter
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_typewriter_opts)")
|
2019-07-11 18:28:34 +00:00
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
scaleval = 100
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
opts = ttopts[1].strip('"')
|
|
|
|
|
del document.header[i]
|
2019-07-13 12:55:59 +00:00
|
|
|
|
if NonTeXFonts:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_tt_scale)")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
|
if i != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
scaleval = get_value(document.header, "\\font_tt_scale", i).split()[1]
|
|
|
|
|
regexp = re.compile(r"(\\font_typewriter)")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
font = ttfont[2].strip('"')
|
|
|
|
|
ttfont[2] = '"default"'
|
|
|
|
|
document.header[i] = " ".join(ttfont)
|
|
|
|
|
if font != "default":
|
|
|
|
|
if Babel:
|
|
|
|
|
preamble = "\\babelfont{tt}["
|
|
|
|
|
else:
|
|
|
|
|
preamble = "\\setmonofont["
|
|
|
|
|
preamble += opts
|
2019-07-11 18:28:34 +00:00
|
|
|
|
preamble += ","
|
2019-07-13 12:55:59 +00:00
|
|
|
|
if scaleval != 100:
|
|
|
|
|
preamble += "Scale=0."
|
|
|
|
|
preamble += scaleval
|
|
|
|
|
preamble += ","
|
|
|
|
|
preamble += "Mapping=tex-text]{"
|
|
|
|
|
preamble += font
|
|
|
|
|
preamble += "}"
|
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_plainNotoFonts_xopts(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native (straight) Noto font definition (with extra options) to LaTeX"
|
2019-07-13 12:55:59 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-13 12:55:59 +00:00
|
|
|
|
return
|
|
|
|
|
|
2019-07-13 15:03:25 +00:00
|
|
|
|
osf = False
|
|
|
|
|
y = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
|
if y != -1:
|
|
|
|
|
osf = True
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_roman_opts)")
|
2019-07-13 12:55:59 +00:00
|
|
|
|
x = find_re(document.header, regexp, 0)
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if x == -1 and not osf:
|
2019-07-13 12:55:59 +00:00
|
|
|
|
return
|
|
|
|
|
|
2019-07-13 15:03:25 +00:00
|
|
|
|
opts = ""
|
|
|
|
|
if x != -1:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
|
|
|
|
opts = romanopts[1].strip('"')
|
|
|
|
|
if osf:
|
|
|
|
|
if opts != "":
|
|
|
|
|
opts += ", "
|
|
|
|
|
opts += "osf"
|
2019-07-13 12:55:59 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
roman = romanfont[1].strip('"')
|
|
|
|
|
if roman != "NotoSerif-TLF":
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
j = find_token(document.header, "\\font_sans", 0)
|
|
|
|
|
if j == -1:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
|
|
|
|
|
sf = sffont[1].strip('"')
|
|
|
|
|
if sf != "default":
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
j = find_token(document.header, "\\font_typewriter", 0)
|
|
|
|
|
if j == -1:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
|
|
|
|
|
tt = ttfont[1].strip('"')
|
|
|
|
|
if tt != "default":
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# So we have noto as "complete font"
|
|
|
|
|
romanfont[1] = '"default"'
|
|
|
|
|
document.header[i] = " ".join(romanfont)
|
|
|
|
|
|
|
|
|
|
preamble = "\\usepackage["
|
|
|
|
|
preamble += opts
|
|
|
|
|
preamble += "]{noto}"
|
|
|
|
|
add_to_preamble(document, [preamble])
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if osf:
|
|
|
|
|
document.header[y] = "\\font_osf false"
|
|
|
|
|
if x != -1:
|
|
|
|
|
del document.header[x]
|
2019-07-13 12:55:59 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_notoFonts_xopts(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native (extended) Noto font definition (with extra options) to LaTeX"
|
2019-07-13 12:55:59 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-13 12:55:59 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Noto"])
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap, True):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
2019-07-13 12:55:59 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_IBMFonts_xopts(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native IBM font definition (with extra options) to LaTeX"
|
2019-07-13 12:55:59 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-13 12:55:59 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["IBM"])
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap, True):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
2019-07-13 12:55:59 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_AdobeFonts_xopts(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native Adobe font definition (with extra options) to LaTeX"
|
2019-07-13 12:55:59 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-13 12:55:59 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Adobe"])
|
2019-07-13 15:03:25 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap, True):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
2019-07-11 18:28:34 +00:00
|
|
|
|
|
2019-07-11 11:21:32 +00:00
|
|
|
|
|
2019-07-14 09:41:36 +00:00
|
|
|
|
def convert_osf(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Convert \\font_osf param to new format"
|
2019-07-14 09:41:36 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
|
2019-07-14 09:41:36 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, "\\font_osf", 0)
|
2019-07-14 09:41:36 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_osf.")
|
|
|
|
|
return
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
osfsf = [
|
|
|
|
|
"biolinum",
|
|
|
|
|
"ADOBESourceSansPro",
|
|
|
|
|
"NotoSansRegular",
|
|
|
|
|
"NotoSansMedium",
|
|
|
|
|
"NotoSansThin",
|
|
|
|
|
"NotoSansLight",
|
|
|
|
|
"NotoSansExtralight",
|
|
|
|
|
]
|
|
|
|
|
osftt = ["ADOBESourceCodePro", "NotoMonoRegular"]
|
2019-07-14 09:41:36 +00:00
|
|
|
|
|
|
|
|
|
osfval = str2bool(get_value(document.header, "\\font_osf", i))
|
|
|
|
|
document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
|
|
|
|
|
|
|
|
|
|
if NonTeXFonts:
|
|
|
|
|
document.header.insert(i, "\\font_sans_osf false")
|
|
|
|
|
document.header.insert(i + 1, "\\font_typewriter_osf false")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if osfval:
|
|
|
|
|
x = find_token(document.header, "\\font_sans", 0)
|
|
|
|
|
if x == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sans.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
|
|
|
|
sf = sffont[1].strip('"')
|
|
|
|
|
if sf in osfsf:
|
|
|
|
|
document.header.insert(i, "\\font_sans_osf true")
|
|
|
|
|
else:
|
|
|
|
|
document.header.insert(i, "\\font_sans_osf false")
|
|
|
|
|
|
|
|
|
|
x = find_token(document.header, "\\font_typewriter", 0)
|
|
|
|
|
if x == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_typewriter.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
|
|
|
|
tt = ttfont[1].strip('"')
|
|
|
|
|
if tt in osftt:
|
2019-07-15 05:11:27 +00:00
|
|
|
|
document.header.insert(i + 1, "\\font_typewriter_osf true")
|
2019-07-14 09:41:36 +00:00
|
|
|
|
else:
|
2019-07-15 05:11:27 +00:00
|
|
|
|
document.header.insert(i + 1, "\\font_typewriter_osf false")
|
2019-07-14 09:41:36 +00:00
|
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
document.header.insert(i, "\\font_sans_osf false")
|
|
|
|
|
document.header.insert(i + 1, "\\font_typewriter_osf false")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_osf(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert \\font_*_osf params"
|
2019-07-14 09:41:36 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
|
2019-07-14 09:41:36 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, "\\font_roman_osf", 0)
|
2019-07-14 09:41:36 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_roman_osf.")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
|
|
|
|
|
document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, "\\font_sans_osf", 0)
|
2019-07-14 09:41:36 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sans_osf.")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, "\\font_typewriter_osf", 0)
|
2019-07-14 09:41:36 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
|
|
if osfval:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, "\\font_osf", 0)
|
2019-07-14 09:41:36 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_osf.")
|
|
|
|
|
return
|
|
|
|
|
document.header[i] = "\\font_osf true"
|
|
|
|
|
|
|
|
|
|
|
2019-07-14 13:08:01 +00:00
|
|
|
|
def revert_texfontopts(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native TeX font definitions (with extra options) to LaTeX"
|
2019-07-14 13:08:01 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-14 13:08:01 +00:00
|
|
|
|
return
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
rmfonts = [
|
|
|
|
|
"ccfonts",
|
|
|
|
|
"cochineal",
|
|
|
|
|
"utopia",
|
|
|
|
|
"garamondx",
|
|
|
|
|
"libertine",
|
|
|
|
|
"lmodern",
|
|
|
|
|
"palatino",
|
|
|
|
|
"times",
|
|
|
|
|
"xcharter",
|
|
|
|
|
]
|
2019-07-14 13:08:01 +00:00
|
|
|
|
|
|
|
|
|
# First the sf (biolinum only)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_sans_opts)")
|
2019-07-14 13:08:01 +00:00
|
|
|
|
x = find_re(document.header, regexp, 0)
|
|
|
|
|
if x != -1:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
|
|
|
|
opts = sfopts[1].strip('"')
|
|
|
|
|
i = find_token(document.header, "\\font_sans", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sans.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
sans = sffont[1].strip('"')
|
|
|
|
|
if sans == "biolinum":
|
|
|
|
|
sf_scale = 100.0
|
|
|
|
|
sffont[1] = '"default"'
|
|
|
|
|
document.header[i] = " ".join(sffont)
|
|
|
|
|
osf = False
|
|
|
|
|
j = find_token(document.header, "\\font_sans_osf true", 0)
|
|
|
|
|
if j != -1:
|
|
|
|
|
osf = True
|
|
|
|
|
k = find_token(document.header, "\\font_sf_scale", 0)
|
|
|
|
|
if k == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
|
|
|
|
|
else:
|
|
|
|
|
sfscale = document.header[k].split()
|
|
|
|
|
val = sfscale[1]
|
|
|
|
|
sfscale[1] = "100"
|
|
|
|
|
document.header[k] = " ".join(sfscale)
|
|
|
|
|
try:
|
|
|
|
|
# float() can throw
|
|
|
|
|
sf_scale = float(val)
|
|
|
|
|
except:
|
|
|
|
|
document.warning("Invalid font_sf_scale value: " + val)
|
|
|
|
|
preamble = "\\usepackage["
|
|
|
|
|
if osf:
|
|
|
|
|
document.header[j] = "\\font_sans_osf false"
|
|
|
|
|
preamble += "osf,"
|
|
|
|
|
if sf_scale != 100.0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
preamble += "scaled=" + str(sf_scale / 100.0) + ","
|
2019-07-14 13:08:01 +00:00
|
|
|
|
preamble += opts
|
|
|
|
|
preamble += "]{biolinum}"
|
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
|
del document.header[x]
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_roman_opts)")
|
2019-07-14 13:08:01 +00:00
|
|
|
|
x = find_re(document.header, regexp, 0)
|
|
|
|
|
if x == -1:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
|
|
|
|
opts = romanopts[1].strip('"')
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_roman.")
|
|
|
|
|
return
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
roman = romanfont[1].strip('"')
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if roman not in rmfonts:
|
2019-07-14 13:08:01 +00:00
|
|
|
|
return
|
|
|
|
|
romanfont[1] = '"default"'
|
|
|
|
|
document.header[i] = " ".join(romanfont)
|
|
|
|
|
package = roman
|
|
|
|
|
if roman == "utopia":
|
|
|
|
|
package = "fourier"
|
|
|
|
|
elif roman == "palatino":
|
|
|
|
|
package = "mathpazo"
|
|
|
|
|
elif roman == "times":
|
|
|
|
|
package = "mathptmx"
|
|
|
|
|
elif roman == "xcharter":
|
|
|
|
|
package = "XCharter"
|
|
|
|
|
osf = ""
|
|
|
|
|
j = find_token(document.header, "\\font_roman_osf true", 0)
|
|
|
|
|
if j != -1:
|
|
|
|
|
if roman == "cochineal":
|
|
|
|
|
osf = "proportional,osf,"
|
|
|
|
|
elif roman == "utopia":
|
|
|
|
|
osf = "oldstyle,"
|
|
|
|
|
elif roman == "garamondx":
|
|
|
|
|
osf = "osfI,"
|
|
|
|
|
elif roman == "libertine":
|
|
|
|
|
osf = "osf,"
|
|
|
|
|
elif roman == "palatino":
|
|
|
|
|
osf = "osf,"
|
|
|
|
|
elif roman == "xcharter":
|
|
|
|
|
osf = "osf,"
|
|
|
|
|
document.header[j] = "\\font_roman_osf false"
|
|
|
|
|
k = find_token(document.header, "\\font_sc true", 0)
|
|
|
|
|
if k != -1:
|
|
|
|
|
if roman == "utopia":
|
|
|
|
|
osf += "expert,"
|
|
|
|
|
if roman == "palatino" and osf == "":
|
|
|
|
|
osf = "sc,"
|
|
|
|
|
document.header[k] = "\\font_sc false"
|
|
|
|
|
preamble = "\\usepackage["
|
|
|
|
|
preamble += osf
|
|
|
|
|
preamble += opts
|
|
|
|
|
preamble += "]{" + package + "}"
|
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
|
del document.header[x]
|
|
|
|
|
|
|
|
|
|
|
2019-07-15 04:34:28 +00:00
|
|
|
|
def convert_CantarellFont(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Handle Cantarell font definition to LaTeX"
|
2019-07-15 04:34:28 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Cantarell"])
|
2019-07-15 05:11:43 +00:00
|
|
|
|
convert_fonts(document, fm, "oldstyle")
|
2019-07-15 04:34:28 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-07-15 04:34:28 +00:00
|
|
|
|
def revert_CantarellFont(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native Cantarell font definition to LaTeX"
|
2019-07-15 04:34:28 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-15 04:34:28 +00:00
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Cantarell"])
|
2019-07-15 04:34:28 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap, False, True):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-07-17 16:03:31 +00:00
|
|
|
|
def convert_ChivoFont(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Handle Chivo font definition to LaTeX"
|
2019-07-17 16:03:31 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Chivo"])
|
2019-07-17 16:03:31 +00:00
|
|
|
|
convert_fonts(document, fm, "oldstyle")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-07-17 16:03:31 +00:00
|
|
|
|
def revert_ChivoFont(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native Chivo font definition to LaTeX"
|
2019-07-17 16:03:31 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-17 16:03:31 +00:00
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Chivo"])
|
2019-07-17 16:03:31 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap, False, True):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
|
|
|
|
|
2019-07-15 04:34:28 +00:00
|
|
|
|
|
2019-07-15 11:29:09 +00:00
|
|
|
|
def convert_FiraFont(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Handle Fira font definition to LaTeX"
|
2019-07-15 11:29:09 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Fira"])
|
2019-07-15 11:29:09 +00:00
|
|
|
|
convert_fonts(document, fm, "lf")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-07-15 11:29:09 +00:00
|
|
|
|
def revert_FiraFont(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native Fira font definition to LaTeX"
|
2019-07-15 11:29:09 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-15 11:29:09 +00:00
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Fira"])
|
2019-07-15 11:29:09 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap, False, True):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
|
|
|
|
|
|
|
|
|
|
2019-07-18 06:14:09 +00:00
|
|
|
|
def convert_Semibolds(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Move semibold options to extraopts"
|
2019-07-18 06:14:09 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
|
2019-07-18 06:14:09 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_roman.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
roman = romanfont[1].strip('"')
|
|
|
|
|
if roman == "IBMPlexSerifSemibold":
|
|
|
|
|
romanfont[1] = '"IBMPlexSerif"'
|
|
|
|
|
document.header[i] = " ".join(romanfont)
|
|
|
|
|
|
|
|
|
|
if NonTeXFonts == False:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_roman_opts)")
|
2019-07-18 06:14:09 +00:00
|
|
|
|
x = find_re(document.header, regexp, 0)
|
|
|
|
|
if x == -1:
|
|
|
|
|
# Sensible place to insert tag
|
|
|
|
|
fo = find_token(document.header, "\\font_sf_scale")
|
|
|
|
|
if fo == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\font_sf_scale")
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header.insert(fo, '\\font_roman_opts "semibold"')
|
2019-07-18 06:14:09 +00:00
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[x] = (
|
|
|
|
|
'\\font_roman_opts "semibold, ' + romanopts[1].strip('"') + '"'
|
|
|
|
|
)
|
2019-07-18 06:14:09 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_sans", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sans.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
sf = sffont[1].strip('"')
|
|
|
|
|
if sf == "IBMPlexSansSemibold":
|
|
|
|
|
sffont[1] = '"IBMPlexSans"'
|
|
|
|
|
document.header[i] = " ".join(sffont)
|
|
|
|
|
|
|
|
|
|
if NonTeXFonts == False:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_sans_opts)")
|
2019-07-18 06:14:09 +00:00
|
|
|
|
x = find_re(document.header, regexp, 0)
|
|
|
|
|
if x == -1:
|
|
|
|
|
# Sensible place to insert tag
|
|
|
|
|
fo = find_token(document.header, "\\font_sf_scale")
|
|
|
|
|
if fo == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\font_sf_scale")
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header.insert(fo, '\\font_sans_opts "semibold"')
|
2019-07-18 06:14:09 +00:00
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[x] = (
|
|
|
|
|
'\\font_sans_opts "semibold, ' + sfopts[1].strip('"') + '"'
|
|
|
|
|
)
|
2019-07-18 06:14:09 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_typewriter", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_typewriter.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
tt = ttfont[1].strip('"')
|
|
|
|
|
if tt == "IBMPlexMonoSemibold":
|
|
|
|
|
ttfont[1] = '"IBMPlexMono"'
|
|
|
|
|
document.header[i] = " ".join(ttfont)
|
|
|
|
|
|
|
|
|
|
if NonTeXFonts == False:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
regexp = re.compile(r"(\\font_typewriter_opts)")
|
2019-07-18 06:14:09 +00:00
|
|
|
|
x = find_re(document.header, regexp, 0)
|
|
|
|
|
if x == -1:
|
|
|
|
|
# Sensible place to insert tag
|
|
|
|
|
fo = find_token(document.header, "\\font_tt_scale")
|
|
|
|
|
if fo == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\font_tt_scale")
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header.insert(fo, '\\font_typewriter_opts "semibold"')
|
2019-07-18 06:14:09 +00:00
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[x] = (
|
|
|
|
|
'\\font_typewriter_opts "semibold, ' + ttopts[1].strip('"') + '"'
|
|
|
|
|
)
|
2019-07-18 06:14:09 +00:00
|
|
|
|
|
|
|
|
|
|
2019-07-18 06:41:00 +00:00
|
|
|
|
def convert_NotoRegulars(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Merge diverse noto reagular fonts"
|
2019-07-18 06:41:00 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_roman.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
roman = romanfont[1].strip('"')
|
|
|
|
|
if roman == "NotoSerif-TLF":
|
|
|
|
|
romanfont[1] = '"NotoSerifRegular"'
|
|
|
|
|
document.header[i] = " ".join(romanfont)
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_sans", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sans.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
sf = sffont[1].strip('"')
|
|
|
|
|
if sf == "NotoSans-TLF":
|
|
|
|
|
sffont[1] = '"NotoSansRegular"'
|
|
|
|
|
document.header[i] = " ".join(sffont)
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_typewriter", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_typewriter.")
|
|
|
|
|
else:
|
|
|
|
|
# We need to use this regex since split() does not handle quote protection
|
|
|
|
|
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
|
|
|
|
tt = ttfont[1].strip('"')
|
|
|
|
|
if tt == "NotoMono-TLF":
|
|
|
|
|
ttfont[1] = '"NotoMonoRegular"'
|
|
|
|
|
document.header[i] = " ".join(ttfont)
|
|
|
|
|
|
|
|
|
|
|
2019-07-18 08:50:46 +00:00
|
|
|
|
def convert_CrimsonProFont(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Handle CrimsonPro font definition to LaTeX"
|
2019-07-18 08:50:46 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["CrimsonPro"])
|
2019-07-18 08:50:46 +00:00
|
|
|
|
convert_fonts(document, fm, "lf")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-07-18 08:50:46 +00:00
|
|
|
|
def revert_CrimsonProFont(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert native CrimsonPro font definition to LaTeX"
|
2019-07-18 08:50:46 +00:00
|
|
|
|
|
2020-03-03 22:44:08 +00:00
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2019-07-18 08:50:46 +00:00
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["CrimsonPro"])
|
2019-07-18 08:50:46 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap, False, True):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
|
|
|
|
|
2019-08-06 16:18:37 +00:00
|
|
|
|
|
|
|
|
|
def revert_pagesizes(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert new page sizes in memoir and KOMA to options"
|
2019-08-06 16:18:37 +00:00
|
|
|
|
|
|
|
|
|
if document.textclass != "memoir" and document.textclass[:2] != "scr":
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\use_geometry true", 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
return
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
defsizes = [
|
|
|
|
|
"default",
|
|
|
|
|
"custom",
|
|
|
|
|
"letterpaper",
|
|
|
|
|
"legalpaper",
|
|
|
|
|
"executivepaper",
|
|
|
|
|
"a4paper",
|
|
|
|
|
"a5paper",
|
|
|
|
|
"b5paper",
|
|
|
|
|
]
|
2019-08-06 16:18:37 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\papersize", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\papersize header.")
|
|
|
|
|
return
|
|
|
|
|
val = get_value(document.header, "\\papersize", i)
|
|
|
|
|
if val in defsizes:
|
|
|
|
|
# nothing to do
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
document.header[i] = "\\papersize default"
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\options", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
i = find_token(document.header, "\\textclass", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\textclass header.")
|
|
|
|
|
return
|
|
|
|
|
document.header.insert(i, "\\options " + val)
|
|
|
|
|
return
|
|
|
|
|
document.header[i] = document.header[i] + "," + val
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_pagesizes(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Convert to new page sizes in memoir and KOMA to options"
|
2019-08-06 16:18:37 +00:00
|
|
|
|
|
2019-08-06 17:29:18 +00:00
|
|
|
|
if document.textclass != "memoir" and document.textclass[:3] != "scr":
|
2019-08-06 16:18:37 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\use_geometry true", 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
return
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
defsizes = [
|
|
|
|
|
"default",
|
|
|
|
|
"custom",
|
|
|
|
|
"letterpaper",
|
|
|
|
|
"legalpaper",
|
|
|
|
|
"executivepaper",
|
|
|
|
|
"a4paper",
|
|
|
|
|
"a5paper",
|
|
|
|
|
"b5paper",
|
|
|
|
|
]
|
2019-08-06 16:18:37 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\papersize", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\papersize header.")
|
|
|
|
|
return
|
|
|
|
|
val = get_value(document.header, "\\papersize", i)
|
|
|
|
|
if val in defsizes:
|
|
|
|
|
# nothing to do
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\use_geometry false", 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
# Maintain use of geometry
|
|
|
|
|
document.header[1] = "\\use_geometry true"
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-08-06 17:29:18 +00:00
|
|
|
|
def revert_komafontsizes(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert new font sizes in KOMA to options"
|
2019-08-06 17:29:18 +00:00
|
|
|
|
|
|
|
|
|
if document.textclass[:3] != "scr":
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\paperfontsize", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\paperfontsize header.")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
defsizes = ["default", "10", "11", "12"]
|
|
|
|
|
|
|
|
|
|
val = get_value(document.header, "\\paperfontsize", i)
|
|
|
|
|
if val in defsizes:
|
|
|
|
|
# nothing to do
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
document.header[i] = "\\paperfontsize default"
|
|
|
|
|
|
|
|
|
|
fsize = "fontsize=" + val
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\options", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
i = find_token(document.header, "\\textclass", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\textclass header.")
|
|
|
|
|
return
|
|
|
|
|
document.header.insert(i, "\\options " + fsize)
|
|
|
|
|
return
|
|
|
|
|
document.header[i] = document.header[i] + "," + fsize
|
|
|
|
|
|
2019-08-07 11:00:29 +00:00
|
|
|
|
|
|
|
|
|
def revert_dupqualicites(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert qualified citation list commands with duplicate keys to ERT"
|
2019-08-07 11:00:29 +00:00
|
|
|
|
|
|
|
|
|
# LyX 2.3 only supports qualified citation lists with unique keys. Thus,
|
|
|
|
|
# we need to revert those with multiple uses of the same key.
|
|
|
|
|
|
|
|
|
|
# Get cite engine
|
|
|
|
|
engine = "basic"
|
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
|
|
|
else:
|
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
|
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if engine not in ["biblatex", "biblatex-natbib"]:
|
2019-08-07 11:00:29 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# Citation insets that support qualified lists, with their LaTeX code
|
|
|
|
|
ql_citations = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"cite": "cites",
|
|
|
|
|
"Cite": "Cites",
|
|
|
|
|
"citet": "textcites",
|
|
|
|
|
"Citet": "Textcites",
|
|
|
|
|
"citep": "parencites",
|
|
|
|
|
"Citep": "Parencites",
|
|
|
|
|
"Footcite": "Smartcites",
|
|
|
|
|
"footcite": "smartcites",
|
|
|
|
|
"Autocite": "Autocites",
|
|
|
|
|
"autocite": "autocites",
|
|
|
|
|
}
|
2019-08-07 11:00:29 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while True:
|
2019-08-07 11:00:29 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Can't find end of citation inset at line %d!!" % (i))
|
2019-08-07 11:00:29 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
|
|
|
if k == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
|
2019-08-07 11:00:29 +00:00
|
|
|
|
i = j + 1
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if cmd not in list(ql_citations.keys()):
|
2019-08-07 11:00:29 +00:00
|
|
|
|
i = j + 1
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
pres = find_token(document.body, "pretextlist", i, j)
|
|
|
|
|
posts = find_token(document.body, "posttextlist", i, j)
|
|
|
|
|
if pres == -1 and posts == -1:
|
|
|
|
|
# nothing to do.
|
|
|
|
|
i = j + 1
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
|
|
|
if not key:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Citation inset at line %d does not have a key!" % (i))
|
2019-08-07 11:00:29 +00:00
|
|
|
|
i = j + 1
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
keys = key.split(",")
|
|
|
|
|
ukeys = list(set(keys))
|
|
|
|
|
if len(keys) == len(ukeys):
|
|
|
|
|
# no duplicates.
|
|
|
|
|
i = j + 1
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
pretexts = get_quoted_value(document.body, "pretextlist", pres)
|
|
|
|
|
posttexts = get_quoted_value(document.body, "posttextlist", posts)
|
|
|
|
|
|
|
|
|
|
pre = get_quoted_value(document.body, "before", i, j)
|
|
|
|
|
post = get_quoted_value(document.body, "after", i, j)
|
|
|
|
|
prelist = pretexts.split("\t")
|
|
|
|
|
premap = dict()
|
|
|
|
|
for pp in prelist:
|
|
|
|
|
ppp = pp.split(" ", 1)
|
|
|
|
|
val = ""
|
|
|
|
|
if len(ppp) > 1:
|
|
|
|
|
val = ppp[1]
|
|
|
|
|
else:
|
|
|
|
|
val = ""
|
|
|
|
|
if ppp[0] in premap:
|
|
|
|
|
premap[ppp[0]] = premap[ppp[0]] + "\t" + val
|
|
|
|
|
else:
|
|
|
|
|
premap[ppp[0]] = val
|
|
|
|
|
postlist = posttexts.split("\t")
|
|
|
|
|
postmap = dict()
|
|
|
|
|
for pp in postlist:
|
|
|
|
|
ppp = pp.split(" ", 1)
|
|
|
|
|
val = ""
|
|
|
|
|
if len(ppp) > 1:
|
|
|
|
|
val = ppp[1]
|
|
|
|
|
else:
|
|
|
|
|
val = ""
|
|
|
|
|
if ppp[0] in postmap:
|
|
|
|
|
postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
|
|
|
|
|
else:
|
|
|
|
|
postmap[ppp[0]] = val
|
|
|
|
|
# Replace known new commands with ERT
|
|
|
|
|
if "(" in pre or ")" in pre:
|
|
|
|
|
pre = "{" + pre + "}"
|
|
|
|
|
if "(" in post or ")" in post:
|
|
|
|
|
post = "{" + post + "}"
|
|
|
|
|
res = "\\" + ql_citations[cmd]
|
|
|
|
|
if pre:
|
|
|
|
|
res += "(" + pre + ")"
|
|
|
|
|
if post:
|
|
|
|
|
res += "(" + post + ")"
|
|
|
|
|
elif pre:
|
|
|
|
|
res += "()"
|
|
|
|
|
for kk in keys:
|
|
|
|
|
if premap.get(kk, "") != "":
|
|
|
|
|
akeys = premap[kk].split("\t", 1)
|
|
|
|
|
akey = akeys[0]
|
|
|
|
|
if akey != "":
|
|
|
|
|
res += "[" + akey + "]"
|
|
|
|
|
if len(akeys) > 1:
|
|
|
|
|
premap[kk] = "\t".join(akeys[1:])
|
|
|
|
|
else:
|
|
|
|
|
premap[kk] = ""
|
|
|
|
|
if postmap.get(kk, "") != "":
|
|
|
|
|
akeys = postmap[kk].split("\t", 1)
|
|
|
|
|
akey = akeys[0]
|
|
|
|
|
if akey != "":
|
|
|
|
|
res += "[" + akey + "]"
|
|
|
|
|
if len(akeys) > 1:
|
|
|
|
|
postmap[kk] = "\t".join(akeys[1:])
|
|
|
|
|
else:
|
|
|
|
|
postmap[kk] = ""
|
|
|
|
|
elif premap.get(kk, "") != "":
|
|
|
|
|
res += "[]"
|
|
|
|
|
res += "{" + kk + "}"
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i : j + 1] = put_cmd_in_ert([res])
|
2019-08-07 11:00:29 +00:00
|
|
|
|
|
2019-08-07 14:44:11 +00:00
|
|
|
|
|
|
|
|
|
def convert_pagesizenames(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Convert LyX page sizes names"
|
2019-08-07 14:44:11 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\papersize", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\papersize header.")
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
oldnames = [
|
|
|
|
|
"letterpaper",
|
|
|
|
|
"legalpaper",
|
|
|
|
|
"executivepaper",
|
|
|
|
|
"a0paper",
|
|
|
|
|
"a1paper",
|
|
|
|
|
"a2paper",
|
|
|
|
|
"a3paper",
|
|
|
|
|
"a4paper",
|
|
|
|
|
"a5paper",
|
|
|
|
|
"a6paper",
|
|
|
|
|
"b0paper",
|
|
|
|
|
"b1paper",
|
|
|
|
|
"b2paper",
|
|
|
|
|
"b3paper",
|
|
|
|
|
"b4paper",
|
|
|
|
|
"b5paper",
|
|
|
|
|
"b6paper",
|
|
|
|
|
"c0paper",
|
|
|
|
|
"c1paper",
|
|
|
|
|
"c2paper",
|
|
|
|
|
"c3paper",
|
|
|
|
|
"c4paper",
|
|
|
|
|
"c5paper",
|
|
|
|
|
"c6paper",
|
|
|
|
|
]
|
2019-08-07 14:44:11 +00:00
|
|
|
|
val = get_value(document.header, "\\papersize", i)
|
|
|
|
|
if val in oldnames:
|
|
|
|
|
newval = val.replace("paper", "")
|
|
|
|
|
document.header[i] = "\\papersize " + newval
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2019-08-07 14:44:11 +00:00
|
|
|
|
def revert_pagesizenames(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Convert LyX page sizes names"
|
2019-08-07 14:44:11 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\papersize", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\papersize header.")
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
newnames = [
|
|
|
|
|
"letter",
|
|
|
|
|
"legal",
|
|
|
|
|
"executive",
|
|
|
|
|
"a0",
|
|
|
|
|
"a1",
|
|
|
|
|
"a2",
|
|
|
|
|
"a3",
|
|
|
|
|
"a4",
|
|
|
|
|
"a5",
|
|
|
|
|
"a6",
|
|
|
|
|
"b0",
|
|
|
|
|
"b1",
|
|
|
|
|
"b2",
|
|
|
|
|
"b3",
|
|
|
|
|
"b4",
|
|
|
|
|
"b5",
|
|
|
|
|
"b6",
|
|
|
|
|
"c0",
|
|
|
|
|
"c1",
|
|
|
|
|
"c2",
|
|
|
|
|
"c3",
|
|
|
|
|
"c4",
|
|
|
|
|
"c5",
|
|
|
|
|
"c6",
|
|
|
|
|
]
|
2019-08-07 14:44:11 +00:00
|
|
|
|
val = get_value(document.header, "\\papersize", i)
|
|
|
|
|
if val in newnames:
|
|
|
|
|
newval = val + "paper"
|
|
|
|
|
document.header[i] = "\\papersize " + newval
|
2019-08-12 11:00:54 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_theendnotes(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Reverts native support of \\theendnotes to TeX-code"
|
2019-08-12 11:00:54 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
2024-06-15 10:26:28 +00:00
|
|
|
|
"endnotes" not in document.get_module_list()
|
|
|
|
|
and "foottoend" not in document.get_module_list()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
):
|
2019-08-12 11:00:54 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of FloatList inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
|
2019-08-13 05:30:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_enotez(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Reverts native support of enotez package to TeX-code"
|
2019-08-13 05:30:20 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
2024-06-15 10:26:28 +00:00
|
|
|
|
"enotez" not in document.get_module_list()
|
|
|
|
|
and "foottoenotez" not in document.get_module_list()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
):
|
2019-08-13 05:30:20 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
use = False
|
|
|
|
|
if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
|
|
|
|
|
use = True
|
|
|
|
|
|
2024-06-17 10:31:10 +00:00
|
|
|
|
revert_flex_inset(document, "Endnote", "\\endnote")
|
2019-08-13 05:30:20 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of FloatList inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
use = True
|
|
|
|
|
document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
|
|
|
|
|
|
|
|
|
|
if use:
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{enotez}"])
|
|
|
|
|
document.del_module("enotez")
|
|
|
|
|
document.del_module("foottoenotez")
|
2019-08-13 08:38:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_memoir_endnotes(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Reverts native support of memoir endnotes to TeX-code"
|
2019-08-13 08:38:26 +00:00
|
|
|
|
|
|
|
|
|
if document.textclass != "memoir":
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
encommand = "\\pagenote"
|
|
|
|
|
modules = document.get_module_list()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
"enotez" in modules
|
|
|
|
|
or "foottoenotez" in modules
|
|
|
|
|
or "endnotes" in modules
|
|
|
|
|
or "foottoend" in modules
|
|
|
|
|
):
|
2019-08-13 08:38:26 +00:00
|
|
|
|
encommand = "\\endnote"
|
|
|
|
|
|
2024-06-17 10:31:10 +00:00
|
|
|
|
revert_flex_inset(document, "Endnote", encommand)
|
2019-08-13 08:38:26 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of FloatList inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if document.body[i] == "\\begin_inset FloatList pagenote*":
|
|
|
|
|
document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
|
|
|
|
|
else:
|
|
|
|
|
document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
|
|
|
|
|
add_to_preamble(document, ["\\makepagenote"])
|
2019-08-14 14:55:43 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_totalheight(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Reverts graphics height parameter from totalheight to height"
|
2019-08-14 14:55:43 +00:00
|
|
|
|
|
2020-08-30 09:44:08 +00:00
|
|
|
|
relative_heights = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"\\textwidth": "text%",
|
|
|
|
|
"\\columnwidth": "col%",
|
|
|
|
|
"\\paperwidth": "page%",
|
|
|
|
|
"\\linewidth": "line%",
|
|
|
|
|
"\\textheight": "theight%",
|
|
|
|
|
"\\paperheight": "pheight%",
|
|
|
|
|
"\\baselineskip ": "baselineskip%",
|
2020-08-30 09:44:08 +00:00
|
|
|
|
}
|
2019-08-14 14:55:43 +00:00
|
|
|
|
i = 0
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while True:
|
2019-08-14 14:55:43 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Graphics", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Can't find end of graphics inset at line %d!!" % (i))
|
2019-08-14 14:55:43 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
rx = re.compile(r"\s*special\s*(\S+)$")
|
|
|
|
|
rxx = re.compile(r"(\d*\.*\d+)(\S+)$")
|
2019-08-14 14:55:43 +00:00
|
|
|
|
k = find_re(document.body, rx, i, j)
|
|
|
|
|
special = ""
|
|
|
|
|
oldheight = ""
|
|
|
|
|
if k != -1:
|
|
|
|
|
m = rx.match(document.body[k])
|
|
|
|
|
if m:
|
|
|
|
|
special = m.group(1)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
mspecial = special.split(",")
|
2019-08-14 14:55:43 +00:00
|
|
|
|
for spc in mspecial:
|
2020-09-21 13:36:52 +00:00
|
|
|
|
if spc.startswith("height="):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
oldheight = spc.split("=")[1]
|
2020-08-30 09:44:08 +00:00
|
|
|
|
ms = rxx.search(oldheight)
|
|
|
|
|
if ms:
|
|
|
|
|
oldunit = ms.group(2)
|
|
|
|
|
if oldunit in list(relative_heights.keys()):
|
2020-09-21 13:36:52 +00:00
|
|
|
|
oldval = str(float(ms.group(1)) * 100)
|
2020-08-30 09:44:08 +00:00
|
|
|
|
oldunit = relative_heights[oldunit]
|
|
|
|
|
oldheight = oldval + oldunit
|
2019-08-14 14:55:43 +00:00
|
|
|
|
mspecial.remove(spc)
|
|
|
|
|
break
|
|
|
|
|
if len(mspecial) > 0:
|
|
|
|
|
special = ",".join(mspecial)
|
|
|
|
|
else:
|
|
|
|
|
special = ""
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
rx = re.compile(r"(\s*height\s*)(\S+)$")
|
2019-08-14 14:55:43 +00:00
|
|
|
|
kk = find_re(document.body, rx, i, j)
|
|
|
|
|
if kk != -1:
|
|
|
|
|
m = rx.match(document.body[kk])
|
|
|
|
|
val = ""
|
|
|
|
|
if m:
|
|
|
|
|
val = m.group(2)
|
|
|
|
|
if k != -1:
|
|
|
|
|
if special != "":
|
|
|
|
|
val = val + "," + special
|
|
|
|
|
document.body[k] = "\tspecial " + "totalheight=" + val
|
|
|
|
|
else:
|
2020-03-03 22:44:08 +00:00
|
|
|
|
document.body.insert(kk, "\tspecial totalheight=" + val)
|
2019-08-14 14:55:43 +00:00
|
|
|
|
if oldheight != "":
|
|
|
|
|
document.body[kk] = m.group(1) + oldheight
|
|
|
|
|
else:
|
|
|
|
|
del document.body[kk]
|
|
|
|
|
elif oldheight != "":
|
2019-08-28 05:27:28 +00:00
|
|
|
|
if special != "":
|
|
|
|
|
document.body[k] = "\tspecial " + special
|
|
|
|
|
document.body.insert(k, "\theight " + oldheight)
|
|
|
|
|
else:
|
|
|
|
|
document.body[k] = "\theight " + oldheight
|
2019-08-14 14:55:43 +00:00
|
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_totalheight(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Converts graphics height parameter from totalheight to height"
|
2019-08-14 14:55:43 +00:00
|
|
|
|
|
2020-08-30 09:44:08 +00:00
|
|
|
|
relative_heights = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"text%": "\\textwidth",
|
|
|
|
|
"col%": "\\columnwidth",
|
|
|
|
|
"page%": "\\paperwidth",
|
|
|
|
|
"line%": "\\linewidth",
|
|
|
|
|
"theight%": "\\textheight",
|
|
|
|
|
"pheight%": "\\paperheight",
|
|
|
|
|
"baselineskip%": "\\baselineskip",
|
2020-08-30 09:44:08 +00:00
|
|
|
|
}
|
2019-08-14 14:55:43 +00:00
|
|
|
|
i = 0
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while True:
|
2019-08-14 14:55:43 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Graphics", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Can't find end of graphics inset at line %d!!" % (i))
|
2019-08-14 14:55:43 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
rx = re.compile(r"\s*special\s*(\S+)$")
|
2019-08-14 14:55:43 +00:00
|
|
|
|
k = find_re(document.body, rx, i, j)
|
|
|
|
|
special = ""
|
|
|
|
|
newheight = ""
|
|
|
|
|
if k != -1:
|
|
|
|
|
m = rx.match(document.body[k])
|
|
|
|
|
if m:
|
|
|
|
|
special = m.group(1)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
mspecial = special.split(",")
|
2019-08-14 14:55:43 +00:00
|
|
|
|
for spc in mspecial:
|
|
|
|
|
if spc[:12] == "totalheight=":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
newheight = spc.split("=")[1]
|
2019-08-14 14:55:43 +00:00
|
|
|
|
mspecial.remove(spc)
|
|
|
|
|
break
|
|
|
|
|
if len(mspecial) > 0:
|
|
|
|
|
special = ",".join(mspecial)
|
|
|
|
|
else:
|
|
|
|
|
special = ""
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
rx = re.compile(r"(\s*height\s*)(\d+\.?\d*)(\S+)$")
|
2019-08-14 14:55:43 +00:00
|
|
|
|
kk = find_re(document.body, rx, i, j)
|
|
|
|
|
if kk != -1:
|
|
|
|
|
m = rx.match(document.body[kk])
|
|
|
|
|
val = ""
|
|
|
|
|
if m:
|
|
|
|
|
val = m.group(2)
|
2020-08-30 09:44:08 +00:00
|
|
|
|
unit = m.group(3)
|
|
|
|
|
if unit in list(relative_heights.keys()):
|
|
|
|
|
val = str(float(val) / 100)
|
|
|
|
|
unit = relative_heights[unit]
|
2019-08-14 14:55:43 +00:00
|
|
|
|
if k != -1:
|
|
|
|
|
if special != "":
|
2020-09-17 06:43:04 +00:00
|
|
|
|
val = val + unit + "," + special
|
2019-08-14 14:55:43 +00:00
|
|
|
|
document.body[k] = "\tspecial " + "height=" + val
|
|
|
|
|
else:
|
2020-08-30 09:44:08 +00:00
|
|
|
|
document.body.insert(kk + 1, "\tspecial height=" + val + unit)
|
2019-08-14 14:55:43 +00:00
|
|
|
|
if newheight != "":
|
|
|
|
|
document.body[kk] = m.group(1) + newheight
|
|
|
|
|
else:
|
|
|
|
|
del document.body[kk]
|
|
|
|
|
elif newheight != "":
|
2020-03-03 22:44:08 +00:00
|
|
|
|
document.body.insert(k, "\theight " + newheight)
|
2019-08-14 14:55:43 +00:00
|
|
|
|
i = j + 1
|
2019-08-06 16:18:37 +00:00
|
|
|
|
|
2019-12-24 17:12:22 +00:00
|
|
|
|
|
|
|
|
|
def convert_changebars(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Converts the changebars module to native solution"
|
2019-12-24 17:12:22 +00:00
|
|
|
|
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if "changebars" not in document.get_module_list():
|
2019-12-24 17:12:22 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\output_changes", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\output_changes header.")
|
|
|
|
|
document.del_module("changebars")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
document.header.insert(i, "\\change_bars true")
|
|
|
|
|
document.del_module("changebars")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_changebars(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Converts native changebar param to module"
|
2019-12-24 17:12:22 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\change_bars", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\change_bars header.")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
val = get_value(document.header, "\\change_bars", i)
|
|
|
|
|
|
|
|
|
|
if val == "true":
|
|
|
|
|
document.add_module("changebars")
|
|
|
|
|
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
|
|
|
2020-01-10 09:21:09 +00:00
|
|
|
|
def convert_postpone_fragile(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Adds false \\postpone_fragile_content buffer param"
|
2020-01-10 09:21:09 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\output_changes", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\output_changes header.")
|
|
|
|
|
return
|
|
|
|
|
# Set this to false for old documents (see #2154)
|
|
|
|
|
document.header.insert(i, "\\postpone_fragile_content false")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_postpone_fragile(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Remove \\postpone_fragile_content buffer param"
|
2020-01-10 09:21:09 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\postpone_fragile_content", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
2020-03-13 14:46:35 +00:00
|
|
|
|
|
2020-01-11 15:17:04 +00:00
|
|
|
|
def revert_colrow_tracking(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Remove change tag from tabular columns/rows"
|
2020-01-11 15:17:04 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
|
2020-01-11 15:17:04 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2024-06-15 09:06:06 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2020-01-11 15:17:04 +00:00
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of tabular.")
|
|
|
|
|
continue
|
|
|
|
|
for k in range(i, j):
|
|
|
|
|
m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
|
|
|
|
|
if m:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', "")
|
2020-01-11 15:17:04 +00:00
|
|
|
|
m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
|
|
|
|
|
if m:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', "")
|
2020-01-10 09:21:09 +00:00
|
|
|
|
|
2020-03-13 14:46:35 +00:00
|
|
|
|
|
|
|
|
|
def convert_counter_maintenance(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Convert \\maintain_unincluded_children buffer param from boolean value tro tristate"
|
2020-03-13 14:46:35 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\maintain_unincluded_children", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
val = get_value(document.header, "\\maintain_unincluded_children", i)
|
|
|
|
|
|
|
|
|
|
if val == "true":
|
|
|
|
|
document.header[i] = "\\maintain_unincluded_children strict"
|
|
|
|
|
else:
|
|
|
|
|
document.header[i] = "\\maintain_unincluded_children no"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_counter_maintenance(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert \\maintain_unincluded_children buffer param to previous boolean value"
|
2020-03-13 14:46:35 +00:00
|
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\maintain_unincluded_children", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
val = get_value(document.header, "\\maintain_unincluded_children", i)
|
|
|
|
|
|
|
|
|
|
if val == "no":
|
|
|
|
|
document.header[i] = "\\maintain_unincluded_children false"
|
|
|
|
|
else:
|
|
|
|
|
document.header[i] = "\\maintain_unincluded_children true"
|
|
|
|
|
|
2020-05-01 01:57:50 +00:00
|
|
|
|
|
|
|
|
|
def revert_counter_inset(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert counter inset to ERT, where possible"
|
2020-05-01 01:57:50 +00:00
|
|
|
|
i = 0
|
|
|
|
|
needed_counters = {}
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset counter", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Can't find end of counter inset at line %d!" % i)
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
lyx = get_quoted_value(document.body, "lyxonly", i, j)
|
|
|
|
|
if lyx == "true":
|
|
|
|
|
# there is nothing we can do to affect the LyX counters
|
|
|
|
|
document.body[i : j + 1] = []
|
|
|
|
|
i = j + 1
|
|
|
|
|
continue
|
|
|
|
|
cnt = get_quoted_value(document.body, "counter", i, j)
|
|
|
|
|
if not cnt:
|
|
|
|
|
document.warning("No counter given for inset at line %d!" % i)
|
|
|
|
|
i = j + 1
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
cmd = get_quoted_value(document.body, "LatexCommand", i, j)
|
|
|
|
|
document.warning(cmd)
|
|
|
|
|
ert = ""
|
|
|
|
|
if cmd == "set":
|
|
|
|
|
val = get_quoted_value(document.body, "value", i, j)
|
|
|
|
|
if not val:
|
|
|
|
|
document.warning("Can't convert counter inset at line %d!" % i)
|
|
|
|
|
else:
|
2024-06-10 09:55:40 +00:00
|
|
|
|
ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{{val}}}")
|
2020-05-01 01:57:50 +00:00
|
|
|
|
elif cmd == "addto":
|
|
|
|
|
val = get_quoted_value(document.body, "value", i, j)
|
|
|
|
|
if not val:
|
|
|
|
|
document.warning("Can't convert counter inset at line %d!" % i)
|
|
|
|
|
else:
|
2024-06-10 09:55:40 +00:00
|
|
|
|
ert = put_cmd_in_ert(f"\\addtocounter{{{cnt}}}{{{val}}}")
|
2020-05-01 01:57:50 +00:00
|
|
|
|
elif cmd == "reset":
|
|
|
|
|
ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
|
|
|
|
|
elif cmd == "save":
|
|
|
|
|
needed_counters[cnt] = 1
|
|
|
|
|
savecnt = "LyXSave" + cnt
|
2024-06-10 09:55:40 +00:00
|
|
|
|
ert = put_cmd_in_ert(f"\\setcounter{{{savecnt}}}{{\\value{{{cnt}}}}}")
|
2020-05-01 01:57:50 +00:00
|
|
|
|
elif cmd == "restore":
|
|
|
|
|
needed_counters[cnt] = 1
|
|
|
|
|
savecnt = "LyXSave" + cnt
|
2024-06-10 09:55:40 +00:00
|
|
|
|
ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{\\value{{{savecnt}}}}}")
|
2020-05-01 01:57:50 +00:00
|
|
|
|
else:
|
|
|
|
|
document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-05-01 01:57:50 +00:00
|
|
|
|
if ert:
|
|
|
|
|
document.body[i : j + 1] = ert
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
pretext = []
|
|
|
|
|
for cnt in needed_counters:
|
|
|
|
|
pretext.append("\\newcounter{LyXSave%s}" % (cnt))
|
|
|
|
|
if pretext:
|
|
|
|
|
add_to_preamble(document, pretext)
|
|
|
|
|
|
2020-06-26 09:12:35 +00:00
|
|
|
|
|
|
|
|
|
def revert_ams_spaces(document):
|
|
|
|
|
"Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
|
|
|
|
|
Found = False
|
|
|
|
|
insets = ["\\medspace{}", "\\thickspace{}"]
|
|
|
|
|
for inset in insets:
|
|
|
|
|
i = 0
|
|
|
|
|
i = find_token(document.body, "\\begin_inset space " + inset, i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
continue
|
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
|
subst = put_cmd_in_ert(inset)
|
|
|
|
|
document.body[i : end + 1] = subst
|
|
|
|
|
Found = True
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-26 09:12:35 +00:00
|
|
|
|
if Found == True:
|
|
|
|
|
# load amsmath in the preamble if not already loaded
|
|
|
|
|
i = find_token(document.header, "\\use_package amsmath 2", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
|
|
|
|
|
return
|
2020-06-28 15:20:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_parskip(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Move old parskip settings to preamble"
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
i = find_token(document.header, "\\paragraph_separation skip", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
j = find_token(document.header, "\\defskip", 0)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\defskip.")
|
|
|
|
|
return
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
val = get_value(document.header, "\\defskip", j)
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
skipval = "\\medskipamount"
|
|
|
|
|
if val == "smallskip" or val == "medskip" or val == "bigskip":
|
|
|
|
|
skipval = "\\" + val + "amount"
|
|
|
|
|
else:
|
|
|
|
|
skipval = val
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
add_to_preamble(
|
|
|
|
|
document,
|
|
|
|
|
["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"],
|
|
|
|
|
)
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
document.header[i] = "\\paragraph_separation indent"
|
|
|
|
|
document.header[j] = "\\paragraph_indentation default"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_parskip(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert new parskip settings to preamble"
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
i = find_token(document.header, "\\paragraph_separation skip", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
j = find_token(document.header, "\\defskip", 0)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document! Missing \\defskip.")
|
|
|
|
|
return
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
val = get_value(document.header, "\\defskip", j)
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
skipval = ""
|
|
|
|
|
if val == "smallskip" or val == "medskip" or val == "bigskip":
|
|
|
|
|
skipval = "[skip=\\" + val + "amount]"
|
|
|
|
|
elif val == "fullline":
|
|
|
|
|
skipval = "[skip=\\baselineskip]"
|
|
|
|
|
elif val != "halfline":
|
|
|
|
|
skipval = "[skip={" + val + "}]"
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
|
2020-09-18 00:07:39 +00:00
|
|
|
|
|
2020-06-28 15:20:18 +00:00
|
|
|
|
document.header[i] = "\\paragraph_separation indent"
|
|
|
|
|
document.header[j] = "\\paragraph_indentation default"
|
|
|
|
|
|
|
|
|
|
|
2020-06-28 16:27:59 +00:00
|
|
|
|
def revert_line_vspaces(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert fulline and halfline vspaces to TeX"
|
2020-06-28 16:27:59 +00:00
|
|
|
|
insets = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"fullline*": "\\vspace*{\\baselineskip}",
|
|
|
|
|
"fullline": "\\vspace{\\baselineskip}",
|
|
|
|
|
"halfline*": "\\vspace*{0.5\\baselineskip}",
|
|
|
|
|
"halfline": "\\vspace{0.5\\baselineskip}",
|
|
|
|
|
}
|
2020-06-28 16:27:59 +00:00
|
|
|
|
for inset in insets.keys():
|
|
|
|
|
i = 0
|
|
|
|
|
i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
continue
|
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
|
subst = put_cmd_in_ert(insets[inset])
|
|
|
|
|
document.body[i : end + 1] = subst
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2020-07-09 09:46:07 +00:00
|
|
|
|
def convert_libertinus_rm_fonts(document):
|
|
|
|
|
"""Handle Libertinus serif fonts definition to LaTeX"""
|
|
|
|
|
|
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["Libertinus"])
|
2020-07-09 09:46:07 +00:00
|
|
|
|
convert_fonts(document, fm)
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2020-07-09 09:46:07 +00:00
|
|
|
|
def revert_libertinus_rm_fonts(document):
|
|
|
|
|
"""Revert Libertinus serif font definition to LaTeX"""
|
|
|
|
|
|
|
|
|
|
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
|
|
|
|
|
fontmap = dict()
|
2024-06-15 09:06:06 +00:00
|
|
|
|
fm = createFontMapping(["libertinus"])
|
2020-07-09 09:46:07 +00:00
|
|
|
|
if revert_fonts(document, fm, fontmap):
|
|
|
|
|
add_preamble_fonts(document, fontmap)
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2020-07-09 09:46:07 +00:00
|
|
|
|
def revert_libertinus_sftt_fonts(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert Libertinus sans and tt font definitions to LaTeX"
|
2020-07-09 09:46:07 +00:00
|
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
|
# first sf font
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, '\\font_sans "LibertinusSans-LF"', 0)
|
2020-07-09 09:46:07 +00:00
|
|
|
|
if i != -1:
|
|
|
|
|
j = find_token(document.header, "\\font_sans_osf true", 0)
|
|
|
|
|
if j != -1:
|
|
|
|
|
add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
|
|
|
|
|
document.header[j] = "\\font_sans_osf false"
|
|
|
|
|
else:
|
|
|
|
|
add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
|
|
|
|
|
document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
|
|
|
|
|
sf_scale = 100.0
|
|
|
|
|
sfval = find_token(document.header, "\\font_sf_scale", 0)
|
|
|
|
|
if sfval == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
|
|
|
|
|
else:
|
|
|
|
|
sfscale = document.header[sfval].split()
|
|
|
|
|
val = sfscale[1]
|
|
|
|
|
sfscale[1] = "100"
|
|
|
|
|
document.header[sfval] = " ".join(sfscale)
|
|
|
|
|
try:
|
|
|
|
|
# float() can throw
|
|
|
|
|
sf_scale = float(val)
|
|
|
|
|
except:
|
|
|
|
|
document.warning("Invalid font_sf_scale value: " + val)
|
|
|
|
|
if sf_scale != "100.0":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
add_to_preamble(
|
|
|
|
|
document,
|
|
|
|
|
[
|
|
|
|
|
"\\renewcommand*{\\LibertinusSans@scale}{"
|
|
|
|
|
+ str(sf_scale / 100.0)
|
|
|
|
|
+ "}"
|
|
|
|
|
],
|
|
|
|
|
)
|
2020-07-09 09:46:07 +00:00
|
|
|
|
# now tt font
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, '\\font_typewriter "LibertinusMono-TLF"', 0)
|
2020-07-09 09:46:07 +00:00
|
|
|
|
if i != -1:
|
|
|
|
|
add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
|
|
|
|
|
document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
|
|
|
|
|
tt_scale = 100.0
|
|
|
|
|
ttval = find_token(document.header, "\\font_tt_scale", 0)
|
|
|
|
|
if ttval == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_tt_scale.")
|
|
|
|
|
else:
|
|
|
|
|
ttscale = document.header[ttval].split()
|
|
|
|
|
val = ttscale[1]
|
|
|
|
|
ttscale[1] = "100"
|
|
|
|
|
document.header[ttval] = " ".join(ttscale)
|
|
|
|
|
try:
|
|
|
|
|
# float() can throw
|
|
|
|
|
tt_scale = float(val)
|
|
|
|
|
except:
|
|
|
|
|
document.warning("Invalid font_tt_scale value: " + val)
|
|
|
|
|
if tt_scale != "100.0":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
add_to_preamble(
|
|
|
|
|
document,
|
|
|
|
|
[
|
|
|
|
|
"\\renewcommand*{\\LibertinusMono@scale}{"
|
|
|
|
|
+ str(tt_scale / 100.0)
|
|
|
|
|
+ "}"
|
|
|
|
|
],
|
|
|
|
|
)
|
2020-07-09 09:46:07 +00:00
|
|
|
|
|
2020-06-28 16:27:59 +00:00
|
|
|
|
|
2020-07-13 01:31:48 +00:00
|
|
|
|
def revert_docbook_table_output(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.header, "\\docbook_table_output")
|
2020-07-13 01:31:48 +00:00
|
|
|
|
if i != -1:
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
|
|
|
2020-10-13 17:13:59 +00:00
|
|
|
|
def revert_nopagebreak(document):
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Newpage nopagebreak")
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
|
if end == 1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Newpage inset.")
|
|
|
|
|
continue
|
|
|
|
|
subst = put_cmd_in_ert("\\nopagebreak{}")
|
|
|
|
|
document.body[i : end + 1] = subst
|
|
|
|
|
|
|
|
|
|
|
2020-12-02 16:00:40 +00:00
|
|
|
|
def revert_hrquotes(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert Hungarian Quotation marks"
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2020-12-02 16:00:40 +00:00
|
|
|
|
i = find_token(document.header, "\\quotes_style hungarian", 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
document.header[i] = "\\quotes_style polish"
|
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Quotes h")
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
if document.body[i] == "\\begin_inset Quotes hld":
|
|
|
|
|
document.body[i] = "\\begin_inset Quotes pld"
|
|
|
|
|
elif document.body[i] == "\\begin_inset Quotes hrd":
|
|
|
|
|
document.body[i] = "\\begin_inset Quotes prd"
|
|
|
|
|
elif document.body[i] == "\\begin_inset Quotes hls":
|
|
|
|
|
document.body[i] = "\\begin_inset Quotes ald"
|
|
|
|
|
elif document.body[i] == "\\begin_inset Quotes hrs":
|
|
|
|
|
document.body[i] = "\\begin_inset Quotes ard"
|
|
|
|
|
|
|
|
|
|
|
2021-01-05 22:25:27 +00:00
|
|
|
|
def convert_math_refs(document):
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Formula", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Can't find end of inset at line %d of body!" % i)
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
while i < j:
|
|
|
|
|
document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
|
|
|
|
|
i += 1
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2021-01-05 22:25:27 +00:00
|
|
|
|
|
|
|
|
|
def revert_math_refs(document):
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Formula", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Can't find end of inset at line %d of body!" % i)
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
while i < j:
|
|
|
|
|
document.body[i] = document.body[i].replace("\\formatted", "\\prettyref")
|
|
|
|
|
if "\\labelonly" in document.body[i]:
|
|
|
|
|
document.body[i] = re.sub("\\\\labelonly{([^}]+?)}", "\\1", document.body[i])
|
|
|
|
|
i += 1
|
2021-01-18 08:56:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_branch_colors(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Convert branch colors to semantic values"
|
2021-01-18 08:56:53 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.header, "\\branch", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_token(document.header, "\\end_branch", i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Malformed LyX document. Can't find end of branch definition!")
|
|
|
|
|
break
|
2021-01-18 08:56:53 +00:00
|
|
|
|
# We only support the standard LyX background for now
|
|
|
|
|
k = find_token(document.header, "\\color #faf0e6", i, j)
|
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.header[k] = "\\color background"
|
2021-01-18 08:56:53 +00:00
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_branch_colors(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert semantic branch colors"
|
2021-01-18 08:56:53 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.header, "\\branch", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_token(document.header, "\\end_branch", i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Malformed LyX document. Can't find end of branch definition!")
|
|
|
|
|
break
|
2021-01-18 08:56:53 +00:00
|
|
|
|
k = find_token(document.header, "\\color", i, j)
|
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
bcolor = get_value(document.header, "\\color", k)
|
|
|
|
|
if bcolor[1] != "#":
|
|
|
|
|
# this will be read as background by LyX 2.3
|
|
|
|
|
document.header[k] = "\\color none"
|
2021-01-18 08:56:53 +00:00
|
|
|
|
i += 1
|
|
|
|
|
|
2021-01-05 22:25:27 +00:00
|
|
|
|
|
2021-01-18 09:46:16 +00:00
|
|
|
|
def revert_darkmode_graphics(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert darkModeSensitive InsetGraphics param"
|
2021-01-18 09:46:16 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
2024-06-15 09:06:06 +00:00
|
|
|
|
while True:
|
2021-01-18 09:46:16 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Graphics", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Can't find end of graphics inset at line %d!!" % (i))
|
2021-01-18 09:46:16 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
k = find_token(document.body, "\tdarkModeSensitive", i, j)
|
|
|
|
|
if k != -1:
|
|
|
|
|
del document.body[k]
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
2021-01-19 16:04:04 +00:00
|
|
|
|
def revert_branch_darkcols(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert dark branch colors"
|
2021-01-19 16:04:04 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.header, "\\branch", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_token(document.header, "\\end_branch", i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Malformed LyX document. Can't find end of branch definition!")
|
|
|
|
|
break
|
2021-01-19 16:04:04 +00:00
|
|
|
|
k = find_token(document.header, "\\color", i, j)
|
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
m = re.search("\\\\color (\\S+) (\\S+)", document.header[k])
|
2021-01-19 16:04:04 +00:00
|
|
|
|
if m:
|
|
|
|
|
document.header[k] = "\\color " + m.group(1)
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
2021-01-22 18:16:43 +00:00
|
|
|
|
def revert_vcolumns2(document):
|
|
|
|
|
"""Revert varwidth columns with line breaks etc."""
|
|
|
|
|
i = 0
|
|
|
|
|
needvarwidth = False
|
|
|
|
|
needarray = False
|
|
|
|
|
needcellvarwidth = False
|
|
|
|
|
try:
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
|
2021-01-22 18:16:43 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of tabular.")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Collect necessary column information
|
|
|
|
|
m = i + 1
|
2024-06-15 09:06:06 +00:00
|
|
|
|
nrows = int(document.body[i + 1].split('"')[3])
|
|
|
|
|
ncols = int(document.body[i + 1].split('"')[5])
|
2021-01-22 18:16:43 +00:00
|
|
|
|
col_info = []
|
|
|
|
|
for k in range(ncols):
|
|
|
|
|
m = find_token(document.body, "<column", m)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
width = get_option_value(document.body[m], "width")
|
|
|
|
|
varwidth = get_option_value(document.body[m], "varwidth")
|
|
|
|
|
alignment = get_option_value(document.body[m], "alignment")
|
|
|
|
|
valignment = get_option_value(document.body[m], "valignment")
|
|
|
|
|
special = get_option_value(document.body[m], "special")
|
2021-01-22 18:16:43 +00:00
|
|
|
|
col_info.append([width, varwidth, alignment, valignment, special, m])
|
|
|
|
|
m += 1
|
|
|
|
|
|
|
|
|
|
# Now parse cells
|
|
|
|
|
m = i + 1
|
|
|
|
|
for row in range(nrows):
|
|
|
|
|
for col in range(ncols):
|
|
|
|
|
m = find_token(document.body, "<cell", m)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
multicolumn = get_option_value(document.body[m], "multicolumn") != ""
|
|
|
|
|
multirow = get_option_value(document.body[m], "multirow") != ""
|
|
|
|
|
fixedwidth = get_option_value(document.body[m], "width") != ""
|
|
|
|
|
rotate = get_option_value(document.body[m], "rotate")
|
|
|
|
|
cellalign = get_option_value(document.body[m], "alignment")
|
|
|
|
|
cellvalign = get_option_value(document.body[m], "valignment")
|
2021-01-22 18:16:43 +00:00
|
|
|
|
# Check for: linebreaks, multipars, non-standard environments
|
|
|
|
|
begcell = m
|
|
|
|
|
endcell = find_token(document.body, "</cell>", begcell)
|
|
|
|
|
vcand = False
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
find_token(document.body, "\\begin_inset Newline", begcell, endcell)
|
|
|
|
|
!= -1
|
|
|
|
|
):
|
2021-01-23 13:24:54 +00:00
|
|
|
|
vcand = not fixedwidth
|
2021-01-22 18:16:43 +00:00
|
|
|
|
elif count_pars_in_inset(document.body, begcell + 2) > 1:
|
2021-01-23 13:24:54 +00:00
|
|
|
|
vcand = not fixedwidth
|
2021-01-22 18:16:43 +00:00
|
|
|
|
elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
|
2021-01-23 13:24:54 +00:00
|
|
|
|
vcand = not fixedwidth
|
2021-01-22 18:16:43 +00:00
|
|
|
|
colalignment = col_info[col][2]
|
|
|
|
|
colvalignment = col_info[col][3]
|
|
|
|
|
if vcand:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if rotate == "" and (
|
|
|
|
|
(colalignment == "left" and colvalignment == "top")
|
|
|
|
|
or (
|
|
|
|
|
multicolumn == True
|
|
|
|
|
and cellalign == "left"
|
|
|
|
|
and cellvalign == "top"
|
|
|
|
|
)
|
|
|
|
|
):
|
|
|
|
|
if (
|
|
|
|
|
col_info[col][0] == ""
|
|
|
|
|
and col_info[col][1] == ""
|
|
|
|
|
and col_info[col][4] == ""
|
|
|
|
|
):
|
2021-01-22 18:16:43 +00:00
|
|
|
|
needvarwidth = True
|
|
|
|
|
col_line = col_info[col][5]
|
|
|
|
|
needarray = True
|
|
|
|
|
vval = "V{\\linewidth}"
|
|
|
|
|
if multicolumn:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[m] = (
|
|
|
|
|
document.body[m][:-1] + ' special="' + vval + '">'
|
|
|
|
|
)
|
2021-01-22 18:16:43 +00:00
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[col_line] = (
|
|
|
|
|
document.body[col_line][:-1]
|
|
|
|
|
+ ' special="'
|
|
|
|
|
+ vval
|
|
|
|
|
+ '">'
|
|
|
|
|
)
|
2021-01-22 18:16:43 +00:00
|
|
|
|
else:
|
|
|
|
|
alarg = ""
|
2021-01-23 13:24:54 +00:00
|
|
|
|
if multicolumn or multirow:
|
2021-01-22 18:16:43 +00:00
|
|
|
|
if cellvalign == "middle":
|
|
|
|
|
alarg = "[m]"
|
|
|
|
|
elif cellvalign == "bottom":
|
|
|
|
|
alarg = "[b]"
|
|
|
|
|
else:
|
|
|
|
|
if colvalignment == "middle":
|
|
|
|
|
alarg = "[m]"
|
|
|
|
|
elif colvalignment == "bottom":
|
|
|
|
|
alarg = "[b]"
|
|
|
|
|
flt = find_token(document.body, "\\begin_layout", begcell, endcell)
|
|
|
|
|
elt = find_token_backwards(document.body, "\\end_layout", endcell)
|
|
|
|
|
if flt != -1 and elt != -1:
|
2021-01-25 11:04:45 +00:00
|
|
|
|
extralines = []
|
|
|
|
|
# we need to reset character layouts if necessary
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\emph on", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\emph default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\noun on", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\noun default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\series", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\series default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\family", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\family default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\shape", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\shape default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\color", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\color inherit")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\size", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\size default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\bar under", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\bar default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\uuline on", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\uuline default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\uwave on", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\uwave default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
el = find_token(document.body, "\\strikeout on", flt, elt)
|
2021-01-25 11:04:45 +00:00
|
|
|
|
if el != -1:
|
|
|
|
|
extralines.append("\\strikeout default")
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[elt : elt + 1] = (
|
|
|
|
|
extralines
|
|
|
|
|
+ put_cmd_in_ert("\\end{cellvarwidth}")
|
|
|
|
|
+ [r"\end_layout"]
|
|
|
|
|
)
|
2021-01-28 08:25:20 +00:00
|
|
|
|
parlang = -1
|
|
|
|
|
for q in range(flt, elt):
|
|
|
|
|
if document.body[q] != "" and document.body[q][0] != "\\":
|
|
|
|
|
break
|
|
|
|
|
if document.body[q][:5] == "\\lang":
|
|
|
|
|
parlang = q
|
|
|
|
|
break
|
|
|
|
|
if parlang != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[parlang + 1 : parlang + 1] = put_cmd_in_ert(
|
|
|
|
|
"\\begin{cellvarwidth}" + alarg
|
|
|
|
|
)
|
2021-01-28 08:25:20 +00:00
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[flt + 1 : flt + 1] = put_cmd_in_ert(
|
|
|
|
|
"\\begin{cellvarwidth}" + alarg
|
|
|
|
|
)
|
2021-01-22 18:16:43 +00:00
|
|
|
|
needcellvarwidth = True
|
|
|
|
|
needvarwidth = True
|
|
|
|
|
# ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
|
|
|
|
|
# with newlines, and we do not want that)
|
|
|
|
|
while True:
|
|
|
|
|
endcell = find_token(document.body, "</cell>", begcell)
|
|
|
|
|
linebreak = False
|
2024-06-15 09:06:06 +00:00
|
|
|
|
nl = find_token(
|
|
|
|
|
document.body,
|
|
|
|
|
"\\begin_inset Newline newline",
|
|
|
|
|
begcell,
|
|
|
|
|
endcell,
|
|
|
|
|
)
|
2021-01-22 18:16:43 +00:00
|
|
|
|
if nl == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
nl = find_token(
|
|
|
|
|
document.body,
|
|
|
|
|
"\\begin_inset Newline linebreak",
|
|
|
|
|
begcell,
|
|
|
|
|
endcell,
|
|
|
|
|
)
|
2021-01-22 18:16:43 +00:00
|
|
|
|
if nl == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
break
|
2021-01-22 18:16:43 +00:00
|
|
|
|
linebreak = True
|
|
|
|
|
nle = find_end_of_inset(document.body, nl)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del document.body[nle : nle + 1]
|
2021-01-22 18:16:43 +00:00
|
|
|
|
if linebreak:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[nl : nl + 1] = put_cmd_in_ert("\\linebreak{}")
|
2021-01-22 18:16:43 +00:00
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[nl : nl + 1] = put_cmd_in_ert("\\\\")
|
2021-01-23 13:24:54 +00:00
|
|
|
|
# Replace parbreaks in multirow with \\endgraf
|
|
|
|
|
if multirow == True:
|
|
|
|
|
flt = find_token(document.body, "\\begin_layout", begcell, endcell)
|
|
|
|
|
if flt != -1:
|
|
|
|
|
while True:
|
|
|
|
|
elt = find_end_of_layout(document.body, flt)
|
|
|
|
|
if elt == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document! Missing layout end."
|
|
|
|
|
)
|
2021-01-23 13:24:54 +00:00
|
|
|
|
break
|
|
|
|
|
endcell = find_token(document.body, "</cell>", begcell)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
flt = find_token(
|
|
|
|
|
document.body, "\\begin_layout", elt, endcell
|
|
|
|
|
)
|
2021-01-23 13:24:54 +00:00
|
|
|
|
if flt == -1:
|
|
|
|
|
break
|
|
|
|
|
document.body[elt : flt + 1] = put_cmd_in_ert("\\endgraf{}")
|
2021-01-22 18:16:43 +00:00
|
|
|
|
m += 1
|
|
|
|
|
|
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
|
finally:
|
|
|
|
|
if needarray == True:
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{array}"])
|
|
|
|
|
if needcellvarwidth == True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
add_to_preamble(
|
|
|
|
|
document,
|
|
|
|
|
[
|
|
|
|
|
"%% Variable width box for table cells",
|
|
|
|
|
"\\newenvironment{cellvarwidth}[1][t]",
|
|
|
|
|
" {\\begin{varwidth}[#1]{\\linewidth}}",
|
|
|
|
|
" {\\@finalstrut\\@arstrutbox\\end{varwidth}}",
|
|
|
|
|
],
|
|
|
|
|
)
|
2021-01-22 18:16:43 +00:00
|
|
|
|
if needvarwidth == True:
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{varwidth}"])
|
|
|
|
|
|
|
|
|
|
|
2021-01-28 07:44:21 +00:00
|
|
|
|
def convert_vcolumns2(document):
|
|
|
|
|
"""Convert varwidth ERT to native"""
|
|
|
|
|
i = 0
|
|
|
|
|
try:
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
|
2021-01-28 07:44:21 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Could not find end of tabular.")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Parse cells
|
2024-06-15 09:06:06 +00:00
|
|
|
|
nrows = int(document.body[i + 1].split('"')[3])
|
|
|
|
|
ncols = int(document.body[i + 1].split('"')[5])
|
2021-01-28 07:44:21 +00:00
|
|
|
|
m = i + 1
|
|
|
|
|
lines = []
|
|
|
|
|
for row in range(nrows):
|
|
|
|
|
for col in range(ncols):
|
|
|
|
|
m = find_token(document.body, "<cell", m)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
multirow = get_option_value(document.body[m], "multirow") != ""
|
2021-01-28 07:44:21 +00:00
|
|
|
|
begcell = m
|
|
|
|
|
endcell = find_token(document.body, "</cell>", begcell)
|
|
|
|
|
vcand = False
|
|
|
|
|
cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
|
|
|
|
|
if cvw != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
vcand = (
|
|
|
|
|
document.body[cvw - 1] == "\\backslash"
|
|
|
|
|
and get_containing_inset(document.body, cvw)[0] == "ERT"
|
|
|
|
|
)
|
2021-01-28 07:44:21 +00:00
|
|
|
|
if vcand:
|
|
|
|
|
# Remove ERTs with cellvarwidth env
|
|
|
|
|
ecvw = find_token(document.body, "end{cellvarwidth}", begcell, endcell)
|
|
|
|
|
if ecvw != -1:
|
|
|
|
|
if document.body[ecvw - 1] == "\\backslash":
|
|
|
|
|
eertins = get_containing_inset(document.body, ecvw)
|
|
|
|
|
if eertins and eertins[0] == "ERT":
|
|
|
|
|
del document.body[eertins[1] : eertins[2] + 1]
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
|
|
|
|
cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
|
2021-01-28 07:44:21 +00:00
|
|
|
|
ertins = get_containing_inset(document.body, cvw)
|
|
|
|
|
if ertins and ertins[0] == "ERT":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del document.body[ertins[1] : ertins[2] + 1]
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2021-01-28 07:44:21 +00:00
|
|
|
|
# Convert ERT newlines (as cellvarwidth detection relies on that)
|
|
|
|
|
while True:
|
|
|
|
|
endcell = find_token(document.body, "</cell>", begcell)
|
|
|
|
|
nl = find_token(document.body, "\\backslash", begcell, endcell)
|
|
|
|
|
if nl == -1 or document.body[nl + 2] != "\\backslash":
|
|
|
|
|
break
|
|
|
|
|
ertins = get_containing_inset(document.body, nl)
|
|
|
|
|
if ertins and ertins[0] == "ERT":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[ertins[1] : ertins[2] + 1] = [
|
|
|
|
|
"\\begin_inset Newline newline",
|
|
|
|
|
"",
|
|
|
|
|
"\\end_inset",
|
|
|
|
|
]
|
2021-01-28 07:44:21 +00:00
|
|
|
|
|
|
|
|
|
# Same for linebreaks
|
|
|
|
|
while True:
|
|
|
|
|
endcell = find_token(document.body, "</cell>", begcell)
|
|
|
|
|
nl = find_token(document.body, "linebreak", begcell, endcell)
|
|
|
|
|
if nl == -1 or document.body[nl - 1] != "\\backslash":
|
|
|
|
|
break
|
|
|
|
|
ertins = get_containing_inset(document.body, nl)
|
|
|
|
|
if ertins and ertins[0] == "ERT":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[ertins[1] : ertins[2] + 1] = [
|
|
|
|
|
"\\begin_inset Newline linebreak",
|
|
|
|
|
"",
|
|
|
|
|
"\\end_inset",
|
|
|
|
|
]
|
2021-01-28 07:44:21 +00:00
|
|
|
|
|
|
|
|
|
# And \\endgraf
|
|
|
|
|
if multirow == True:
|
|
|
|
|
endcell = find_token(document.body, "</cell>", begcell)
|
|
|
|
|
nl = find_token(document.body, "endgraf{}", begcell, endcell)
|
|
|
|
|
if nl == -1 or document.body[nl - 1] != "\\backslash":
|
|
|
|
|
break
|
|
|
|
|
ertins = get_containing_inset(document.body, nl)
|
|
|
|
|
if ertins and ertins[0] == "ERT":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[ertins[1] : ertins[2] + 1] = [
|
|
|
|
|
"\\end_layout",
|
|
|
|
|
"",
|
|
|
|
|
"\\begin_layout Plain Layout",
|
|
|
|
|
]
|
2021-01-28 07:44:21 +00:00
|
|
|
|
m += 1
|
|
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
finally:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del_complete_lines(
|
|
|
|
|
document.preamble,
|
|
|
|
|
[
|
|
|
|
|
"% Added by lyx2lyx",
|
|
|
|
|
"%% Variable width box for table cells",
|
|
|
|
|
r"\newenvironment{cellvarwidth}[1][t]",
|
|
|
|
|
r" {\begin{varwidth}[#1]{\linewidth}}",
|
|
|
|
|
r" {\@finalstrut\@arstrutbox\end{varwidth}}",
|
|
|
|
|
],
|
|
|
|
|
)
|
|
|
|
|
del_complete_lines(document.preamble, ["% Added by lyx2lyx", r"\usepackage{varwidth}"])
|
2021-01-28 07:44:21 +00:00
|
|
|
|
|
|
|
|
|
|
2021-01-28 09:24:28 +00:00
|
|
|
|
frontispiece_def = [
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"### Inserted by lyx2lyx (frontispiece layout) ###",
|
|
|
|
|
r"Style Frontispiece",
|
|
|
|
|
r" CopyStyle Titlehead",
|
|
|
|
|
r" LatexName frontispiece",
|
|
|
|
|
r"End",
|
2021-01-28 09:24:28 +00:00
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_koma_frontispiece(document):
|
|
|
|
|
"""Remove local KOMA frontispiece definition"""
|
|
|
|
|
if document.textclass[:3] != "scr":
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if document.del_local_layout(frontispiece_def):
|
|
|
|
|
document.add_module("ruby")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_koma_frontispiece(document):
|
|
|
|
|
"""Add local KOMA frontispiece definition"""
|
|
|
|
|
if document.textclass[:3] != "scr":
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if find_token(document.body, "\\begin_layout Frontispiece", 0) != -1:
|
|
|
|
|
document.append_local_layout(frontispiece_def)
|
|
|
|
|
|
|
|
|
|
|
2021-03-06 15:53:33 +00:00
|
|
|
|
def revert_spellchecker_ignore(document):
|
2021-03-23 20:12:34 +00:00
|
|
|
|
"""Revert document spellchecker dictionary"""
|
2021-03-06 15:53:33 +00:00
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.header, "\\spellchecker_ignore")
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
2021-03-23 20:12:34 +00:00
|
|
|
|
|
|
|
|
|
def revert_docbook_mathml_prefix(document):
|
|
|
|
|
"""Revert the DocBook parameter to choose the prefix for the MathML name space"""
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.header, "\\docbook_mathml_prefix")
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
2022-10-29 13:11:11 +00:00
|
|
|
|
|
2022-02-12 16:10:34 +00:00
|
|
|
|
def revert_document_metadata(document):
|
|
|
|
|
"""Revert document metadata"""
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.header, "\\begin_metadata", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
j = find_end_of(document.header, i, "\\begin_metadata", "\\end_metadata")
|
|
|
|
|
if j == -1:
|
|
|
|
|
# this should not happen
|
|
|
|
|
break
|
|
|
|
|
document.header[i : j + 1] = []
|
|
|
|
|
|
2022-10-29 13:11:11 +00:00
|
|
|
|
|
|
|
|
|
def revert_index_macros(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert inset index macros"
|
2022-10-29 13:11:11 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2022-11-02 08:33:09 +00:00
|
|
|
|
# trailing blank needed here to exclude IndexMacro insets
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Index ", i + 1)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of index inset at line %d" % i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
pl = find_token(document.body, "\\begin_layout Plain Layout", i, j)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
if pl == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find plain layout in index inset at line %d" % i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
continue
|
2022-10-29 13:40:06 +00:00
|
|
|
|
# find, store and remove inset params
|
2024-06-15 09:06:06 +00:00
|
|
|
|
pr = find_token(document.body, "range", i, pl)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
prval = get_quoted_value(document.body, "range", pr)
|
|
|
|
|
pagerange = ""
|
|
|
|
|
if prval == "start":
|
|
|
|
|
pagerange = "("
|
|
|
|
|
elif prval == "end":
|
|
|
|
|
pagerange = ")"
|
2024-06-15 09:06:06 +00:00
|
|
|
|
pf = find_token(document.body, "pageformat", i, pl)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
pageformat = get_quoted_value(document.body, "pageformat", pf)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del document.body[pr : pf + 1]
|
2022-10-29 13:40:06 +00:00
|
|
|
|
# Now re-find (potentially moved) inset end again, and search for subinsets
|
2022-10-29 13:11:11 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of index inset at line %d" % i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
continue
|
2022-10-31 05:51:38 +00:00
|
|
|
|
# We search for all possible subentries in turn, store their
|
2022-10-29 13:40:06 +00:00
|
|
|
|
# content and delete them
|
2022-10-29 13:11:11 +00:00
|
|
|
|
see = []
|
|
|
|
|
seealso = []
|
2022-10-31 05:51:38 +00:00
|
|
|
|
subentry = []
|
|
|
|
|
subentry2 = []
|
2022-10-29 13:11:11 +00:00
|
|
|
|
sortkey = []
|
2022-10-31 05:51:38 +00:00
|
|
|
|
# Two subentries are allowed, thus the duplication
|
|
|
|
|
imacros = ["seealso", "see", "subentry", "subentry", "sortkey"]
|
2022-10-29 13:11:11 +00:00
|
|
|
|
for imacro in imacros:
|
|
|
|
|
iim = find_token(document.body, "\\begin_inset IndexMacro %s" % imacro, i, j)
|
|
|
|
|
if iim == -1:
|
|
|
|
|
continue
|
|
|
|
|
iime = find_end_of_inset(document.body, iim)
|
|
|
|
|
if iime == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of index macro inset at line %d" % i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
iimpl = find_token(document.body, "\\begin_layout Plain Layout", iim, iime)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
if iimpl == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find plain layout in index macro inset at line %d"
|
|
|
|
|
% i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
continue
|
|
|
|
|
iimple = find_end_of_layout(document.body, iimpl)
|
|
|
|
|
if iimple == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
|
|
|
|
|
% i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
continue
|
|
|
|
|
icont = document.body[iimpl:iimple]
|
2022-10-29 13:40:06 +00:00
|
|
|
|
if imacro == "seealso":
|
2022-10-29 13:11:11 +00:00
|
|
|
|
seealso = icont[1:]
|
2022-10-29 13:40:06 +00:00
|
|
|
|
elif imacro == "see":
|
|
|
|
|
see = icont[1:]
|
2022-10-31 05:51:38 +00:00
|
|
|
|
elif imacro == "subentry":
|
|
|
|
|
# subentries might hace their own sortkey!
|
2024-06-15 09:06:06 +00:00
|
|
|
|
xiim = find_token(
|
|
|
|
|
document.body, "\\begin_inset IndexMacro sortkey", iimpl, iimple
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
if xiim != -1:
|
|
|
|
|
xiime = find_end_of_inset(document.body, xiim)
|
|
|
|
|
if xiime == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of index macro inset at line %d"
|
|
|
|
|
% i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
xiimpl = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", xiim, xiime
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
if xiimpl == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find plain layout in index macro inset at line %d"
|
|
|
|
|
% i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
else:
|
|
|
|
|
xiimple = find_end_of_layout(document.body, xiimpl)
|
|
|
|
|
if xiimple == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
|
|
|
|
|
% i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
else:
|
2022-11-03 06:14:14 +00:00
|
|
|
|
# the sortkey
|
2024-06-15 09:06:06 +00:00
|
|
|
|
xicont = document.body[xiimpl + 1 : xiimple]
|
2022-11-03 06:14:14 +00:00
|
|
|
|
# everything before ................... or after
|
2024-06-15 09:06:06 +00:00
|
|
|
|
xxicont = (
|
|
|
|
|
document.body[iimpl + 1 : xiim]
|
|
|
|
|
+ document.body[xiime + 1 : iimple]
|
|
|
|
|
)
|
2022-11-03 06:14:14 +00:00
|
|
|
|
# construct the latex sequence
|
2022-11-06 16:17:33 +00:00
|
|
|
|
icont = xicont + put_cmd_in_ert("@") + xxicont[1:]
|
2022-10-31 05:51:38 +00:00
|
|
|
|
if len(subentry) > 0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if icont[0] == "\\begin_layout Plain Layout":
|
2023-01-28 16:06:35 +00:00
|
|
|
|
subentry2 = icont[1:]
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
subentry2 = icont
|
2022-10-29 13:11:11 +00:00
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if icont[0] == "\\begin_layout Plain Layout":
|
2023-01-28 16:06:35 +00:00
|
|
|
|
subentry = icont[1:]
|
|
|
|
|
else:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
subentry = icont
|
2022-10-29 13:11:11 +00:00
|
|
|
|
elif imacro == "sortkey":
|
|
|
|
|
sortkey = icont
|
2022-10-29 13:40:06 +00:00
|
|
|
|
# Everything stored. Delete subinset.
|
2024-06-15 09:06:06 +00:00
|
|
|
|
del document.body[iim : iime + 1]
|
2022-10-29 13:40:06 +00:00
|
|
|
|
# Again re-find (potentially moved) index inset end
|
2022-10-29 13:11:11 +00:00
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of index inset at line %d" % i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
continue
|
2022-10-29 13:40:06 +00:00
|
|
|
|
# Now insert all stuff, starting from the inset end
|
2024-06-15 09:06:06 +00:00
|
|
|
|
pl = find_token(document.body, "\\begin_layout Plain Layout", i, j)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
if pl == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find plain layout in index inset at line %d" % i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
continue
|
|
|
|
|
ple = find_end_of_layout(document.body, pl)
|
|
|
|
|
if ple == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
|
|
|
|
|
% i
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
continue
|
|
|
|
|
if len(see) > 0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[ple:ple] = (
|
|
|
|
|
put_cmd_in_ert("|" + pagerange + "see{") + see + put_cmd_in_ert("}")
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
elif len(seealso) > 0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[ple:ple] = (
|
|
|
|
|
put_cmd_in_ert("|" + pagerange + "seealso{") + seealso + put_cmd_in_ert("}")
|
|
|
|
|
)
|
2022-10-29 13:11:11 +00:00
|
|
|
|
elif pageformat != "default":
|
2022-11-06 16:17:33 +00:00
|
|
|
|
document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + pageformat)
|
2022-10-31 05:51:38 +00:00
|
|
|
|
if len(subentry2) > 0:
|
2022-11-06 16:17:33 +00:00
|
|
|
|
document.body[ple:ple] = put_cmd_in_ert("!") + subentry2
|
2022-10-31 05:51:38 +00:00
|
|
|
|
if len(subentry) > 0:
|
2022-11-06 16:17:33 +00:00
|
|
|
|
document.body[ple:ple] = put_cmd_in_ert("!") + subentry
|
2022-10-29 13:11:11 +00:00
|
|
|
|
if len(sortkey) > 0:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[pl : pl + 1] = document.body[pl:pl] + sortkey + put_cmd_in_ert("@")
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2022-10-29 13:11:11 +00:00
|
|
|
|
|
2022-12-04 23:33:58 +00:00
|
|
|
|
def revert_starred_refs(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert starred refs"
|
2022-12-04 23:33:58 +00:00
|
|
|
|
i = find_token(document.header, "\\use_hyperref true", 0)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
use_hyperref = i != -1
|
2022-12-04 23:33:58 +00:00
|
|
|
|
i = 0
|
|
|
|
|
in_inset = False
|
|
|
|
|
cmd = ref = ""
|
2022-12-08 20:17:57 +00:00
|
|
|
|
nolink = False
|
2022-12-04 23:33:58 +00:00
|
|
|
|
nolinkline = -1
|
|
|
|
|
while True:
|
|
|
|
|
if not in_inset:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
start = i
|
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
|
if end == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of inset at line %d" % i
|
|
|
|
|
)
|
2022-12-04 23:33:58 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
# If we are not using hyperref, then we just need to delete the line
|
|
|
|
|
if not use_hyperref:
|
2022-12-06 04:37:40 +00:00
|
|
|
|
k = find_token(document.body, "nolink", i, end)
|
|
|
|
|
if k == -1:
|
|
|
|
|
i = end
|
2022-12-04 23:33:58 +00:00
|
|
|
|
continue
|
2022-12-06 04:37:40 +00:00
|
|
|
|
del document.body[k]
|
2022-12-05 19:45:40 +00:00
|
|
|
|
i = end - 1
|
2022-12-04 23:33:58 +00:00
|
|
|
|
continue
|
|
|
|
|
# If we are using hyperref, then we'll need to do more.
|
|
|
|
|
in_inset = True
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
# so we are in an InsetRef
|
|
|
|
|
if i == end:
|
|
|
|
|
in_inset = False
|
|
|
|
|
# If nolink is False, just remove that line
|
2022-12-08 20:17:57 +00:00
|
|
|
|
if nolink == False or cmd == "formatted" or cmd == "labelonly":
|
2022-12-04 23:33:58 +00:00
|
|
|
|
# document.warning("Skipping " + cmd + " " + ref)
|
|
|
|
|
if nolinkline != -1:
|
|
|
|
|
del document.body[nolinkline]
|
2022-12-21 07:52:44 +00:00
|
|
|
|
nolinkline = -1
|
2022-12-04 23:33:58 +00:00
|
|
|
|
continue
|
|
|
|
|
# We need to construct a new command and put it in ERT
|
|
|
|
|
newcmd = "\\" + cmd + "*{" + ref + "}"
|
|
|
|
|
# document.warning(newcmd)
|
|
|
|
|
newlines = put_cmd_in_ert(newcmd)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[start : end + 1] = newlines
|
2022-12-04 23:33:58 +00:00
|
|
|
|
i += len(newlines) - (end - start) + 1
|
|
|
|
|
# reset variables
|
|
|
|
|
cmd = ref = ""
|
2022-12-08 20:17:57 +00:00
|
|
|
|
nolink = False
|
2022-12-04 23:33:58 +00:00
|
|
|
|
nolinkline = -1
|
|
|
|
|
continue
|
|
|
|
|
l = document.body[i]
|
|
|
|
|
if l.startswith("LatexCommand"):
|
|
|
|
|
cmd = l[13:]
|
|
|
|
|
elif l.startswith("reference"):
|
|
|
|
|
ref = l[11:-1]
|
|
|
|
|
elif l.startswith("nolink"):
|
|
|
|
|
tmp = l[8:-1]
|
2024-06-15 09:06:06 +00:00
|
|
|
|
nolink = tmp == "true"
|
2022-12-04 23:33:58 +00:00
|
|
|
|
nolinkline = i
|
|
|
|
|
i += 1
|
2022-12-05 19:45:40 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_starred_refs(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Convert starred refs"
|
2022-12-05 19:45:40 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
|
if end == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of inset at line %d" % i)
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
2023-06-16 05:21:56 +00:00
|
|
|
|
newlineat = end - 1
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body.insert(newlineat, 'nolink "false"')
|
2022-12-05 19:45:40 +00:00
|
|
|
|
i = end + 1
|
2022-12-11 13:46:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_familydefault(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert \\font_default_family for non-TeX fonts"
|
2022-12-11 13:46:10 +00:00
|
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts true", 0) == -1:
|
|
|
|
|
return
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2022-12-11 13:46:10 +00:00
|
|
|
|
i = find_token(document.header, "\\font_default_family", 0)
|
|
|
|
|
if i == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find \\font_default_family header")
|
|
|
|
|
return
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2022-12-11 13:46:10 +00:00
|
|
|
|
dfamily = get_value(document.header, "\\font_default_family", i)
|
|
|
|
|
if dfamily == "default":
|
|
|
|
|
return
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2022-12-11 13:46:10 +00:00
|
|
|
|
document.header[i] = "\\font_default_family default"
|
|
|
|
|
add_to_preamble(document, ["\\renewcommand{\\familydefault}{\\" + dfamily + "}"])
|
2022-12-25 17:42:07 +00:00
|
|
|
|
|
|
|
|
|
|
2022-12-26 12:19:52 +00:00
|
|
|
|
def convert_hyper_other(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
'Classify "run:" links as other'
|
2022-12-26 12:19:52 +00:00
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset href", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Cannot find end of inset at line " << str(i))
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, 'type "', i, j)
|
2022-12-26 12:19:52 +00:00
|
|
|
|
if k != -1:
|
|
|
|
|
# not a "Web" type. Continue.
|
|
|
|
|
i = j
|
|
|
|
|
continue
|
|
|
|
|
t = find_token(document.body, "target", i, j)
|
|
|
|
|
if t == -1:
|
|
|
|
|
document.warning("Malformed hyperlink inset at line " + str(i))
|
|
|
|
|
i = j
|
|
|
|
|
continue
|
|
|
|
|
if document.body[t][8:12] == "run:":
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body.insert(t, 'type "other"')
|
2022-12-26 12:19:52 +00:00
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
2022-12-25 17:42:07 +00:00
|
|
|
|
def revert_hyper_other(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
'Revert other link type to ERT and "run:" to Web'
|
2022-12-26 12:19:52 +00:00
|
|
|
|
|
2022-12-25 17:42:07 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset href", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Cannot find end of inset at line " << str(i))
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, 'type "other"', i, j)
|
2022-12-25 17:42:07 +00:00
|
|
|
|
if k == -1:
|
|
|
|
|
i = j
|
|
|
|
|
continue
|
|
|
|
|
# build command
|
|
|
|
|
n = find_token(document.body, "name", i, j)
|
|
|
|
|
t = find_token(document.body, "target", i, j)
|
|
|
|
|
if n == -1 or t == -1:
|
|
|
|
|
document.warning("Malformed hyperlink inset at line " + str(i))
|
|
|
|
|
i = j
|
|
|
|
|
continue
|
|
|
|
|
name = document.body[n][6:-1]
|
|
|
|
|
target = document.body[t][8:-1]
|
2022-12-26 12:19:52 +00:00
|
|
|
|
if target[:4] == "run:":
|
|
|
|
|
del document.body[k]
|
|
|
|
|
else:
|
2023-09-30 11:07:00 +00:00
|
|
|
|
cmd = r"\href{" + target + "}{" + name + "}"
|
2022-12-26 12:19:52 +00:00
|
|
|
|
ecmd = put_cmd_in_ert(cmd)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i : j + 1] = ecmd
|
2022-12-25 17:42:07 +00:00
|
|
|
|
i += 1
|
|
|
|
|
|
2023-02-04 16:47:52 +00:00
|
|
|
|
|
|
|
|
|
ack_layouts_new = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"aa": "Acknowledgments",
|
|
|
|
|
"aapaper": "Acknowledgments",
|
|
|
|
|
"aastex": "Acknowledgments",
|
|
|
|
|
"aastex62": "Acknowledgments",
|
|
|
|
|
"achemso": "Acknowledgments",
|
|
|
|
|
"acmart": "Acknowledgments",
|
|
|
|
|
"AEA": "Acknowledgments",
|
|
|
|
|
"apa": "Acknowledgments",
|
|
|
|
|
"copernicus": "Acknowledgments",
|
|
|
|
|
"egs": "Acknowledgments", # + Acknowledgment
|
|
|
|
|
"elsart": "Acknowledgment",
|
|
|
|
|
"isprs": "Acknowledgments",
|
|
|
|
|
"iucr": "Acknowledgments",
|
|
|
|
|
"kluwer": "Acknowledgments",
|
|
|
|
|
"svglobal3": "Acknowledgments",
|
|
|
|
|
"svglobal": "Acknowledgment",
|
|
|
|
|
"svjog": "Acknowledgment",
|
|
|
|
|
"svmono": "Acknowledgment",
|
|
|
|
|
"svmult": "Acknowledgment",
|
|
|
|
|
"svprobth": "Acknowledgment",
|
2023-02-04 16:47:52 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ack_layouts_old = {
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"aa": "Acknowledgement",
|
|
|
|
|
"aapaper": "Acknowledgement",
|
|
|
|
|
"aastex": "Acknowledgement",
|
|
|
|
|
"aastex62": "Acknowledgement",
|
|
|
|
|
"achemso": "Acknowledgement",
|
|
|
|
|
"acmart": "Acknowledgements",
|
|
|
|
|
"AEA": "Acknowledgement",
|
|
|
|
|
"apa": "Acknowledgements",
|
|
|
|
|
"copernicus": "Acknowledgements",
|
|
|
|
|
"egs": "Acknowledgements", # + Acknowledgement
|
|
|
|
|
"elsart": "Acknowledegment",
|
|
|
|
|
"isprs": "Acknowledgements",
|
|
|
|
|
"iucr": "Acknowledgements",
|
|
|
|
|
"kluwer": "Acknowledgements",
|
|
|
|
|
"svglobal3": "Acknowledgements",
|
|
|
|
|
"svglobal": "Acknowledgement",
|
|
|
|
|
"svjog": "Acknowledgement",
|
|
|
|
|
"svmono": "Acknowledgement",
|
|
|
|
|
"svmult": "Acknowledgement",
|
|
|
|
|
"svprobth": "Acknowledgement",
|
2023-02-04 16:47:52 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_acknowledgment(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Fix spelling of acknowledgment styles"
|
2023-02-04 16:47:52 +00:00
|
|
|
|
|
|
|
|
|
if document.textclass not in list(ack_layouts_old.keys()):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(
|
|
|
|
|
document.body, "\\begin_layout " + ack_layouts_old[document.textclass], i
|
|
|
|
|
)
|
2023-02-04 16:47:52 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
document.body[i] = "\\begin_layout " + ack_layouts_new[document.textclass]
|
|
|
|
|
if document.textclass != "egs":
|
|
|
|
|
return
|
|
|
|
|
# egs has two styles
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_layout Acknowledgement", i)
|
2023-02-04 16:47:52 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
document.body[i] = "\\begin_layout Acknowledgment"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_acknowledgment(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Restore old spelling of acknowledgment styles"
|
2023-02-04 16:47:52 +00:00
|
|
|
|
|
|
|
|
|
if document.textclass not in list(ack_layouts_new.keys()):
|
|
|
|
|
return
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(
|
|
|
|
|
document.body, "\\begin_layout " + ack_layouts_new[document.textclass], i
|
|
|
|
|
)
|
2023-02-04 16:47:52 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
document.body[i] = "\\begin_layout " + ack_layouts_old[document.textclass]
|
|
|
|
|
if document.textclass != "egs":
|
|
|
|
|
return
|
|
|
|
|
# egs has two styles
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_layout Acknowledgment", i)
|
2023-02-04 16:47:52 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
document.body[i] = "\\begin_layout Acknowledgement"
|
|
|
|
|
|
2023-02-05 11:07:37 +00:00
|
|
|
|
|
|
|
|
|
ack_theorem_def = [
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"### Inserted by lyx2lyx (ams extended theorems) ###",
|
|
|
|
|
r"### This requires theorems-ams-extended module to be loaded",
|
|
|
|
|
r"Style Acknowledgement",
|
|
|
|
|
r" CopyStyle Remark",
|
|
|
|
|
r" LatexName acknowledgement",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
r' LabelString "Acknowledgement \thetheorem."',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" Preamble",
|
|
|
|
|
r" \theoremstyle{remark}",
|
|
|
|
|
r" \newtheorem{acknowledgement}[thm]{\protect\acknowledgementname}",
|
|
|
|
|
r" EndPreamble",
|
|
|
|
|
r" LangPreamble",
|
|
|
|
|
r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
|
|
|
|
|
r" EndLangPreamble",
|
|
|
|
|
r" BabelPreamble",
|
|
|
|
|
r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
|
|
|
|
|
r" EndBabelPreamble",
|
|
|
|
|
r" DocBookTag para",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
r' DocBookAttr role="acknowledgement"',
|
|
|
|
|
r' DocBookItemTag ""',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"End",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
ackStar_theorem_def = [
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"### Inserted by lyx2lyx (ams extended theorems) ###",
|
|
|
|
|
r"### This requires a theorems-ams-extended-* module to be loaded",
|
|
|
|
|
r"Style Acknowledgement*",
|
|
|
|
|
r" CopyStyle Remark*",
|
|
|
|
|
r" LatexName acknowledgement*",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
r' LabelString "Acknowledgement."',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" Preamble",
|
|
|
|
|
r" \theoremstyle{remark}",
|
|
|
|
|
r" \newtheorem*{acknowledgement*}{\protect\acknowledgementname}",
|
|
|
|
|
r" EndPreamble",
|
|
|
|
|
r" LangPreamble",
|
|
|
|
|
r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
|
|
|
|
|
r" EndLangPreamble",
|
|
|
|
|
r" BabelPreamble",
|
|
|
|
|
r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
|
|
|
|
|
r" EndBabelPreamble",
|
|
|
|
|
r" DocBookTag para",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
r' DocBookAttr role="acknowledgement"',
|
|
|
|
|
r' DocBookItemTag ""',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"End",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
ack_bytype_theorem_def = [
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"### Inserted by lyx2lyx (ams extended theorems) ###",
|
|
|
|
|
r"### This requires theorems-ams-extended-bytype module to be loaded",
|
|
|
|
|
r"Counter acknowledgement",
|
|
|
|
|
r" GuiName Acknowledgment",
|
|
|
|
|
r"End",
|
|
|
|
|
r"Style Acknowledgement",
|
|
|
|
|
r" CopyStyle Remark",
|
|
|
|
|
r" LatexName acknowledgement",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
r' LabelString "Acknowledgement \theacknowledgement."',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" Preamble",
|
|
|
|
|
r" \theoremstyle{remark}",
|
|
|
|
|
r" \newtheorem{acknowledgement}{\protect\acknowledgementname}",
|
|
|
|
|
r" EndPreamble",
|
|
|
|
|
r" LangPreamble",
|
|
|
|
|
r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
|
|
|
|
|
r" EndLangPreamble",
|
|
|
|
|
r" BabelPreamble",
|
|
|
|
|
r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
|
|
|
|
|
r" EndBabelPreamble",
|
|
|
|
|
r" DocBookTag para",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
r' DocBookAttr role="acknowledgement"',
|
|
|
|
|
r' DocBookItemTag ""',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"End",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
ack_chap_bytype_theorem_def = [
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"### Inserted by lyx2lyx (ams extended theorems) ###",
|
|
|
|
|
r"### This requires theorems-ams-extended-chap-bytype module to be loaded",
|
|
|
|
|
r"Counter acknowledgement",
|
|
|
|
|
r" GuiName Acknowledgment",
|
|
|
|
|
r" Within chapter",
|
|
|
|
|
r"End",
|
|
|
|
|
r"Style Acknowledgement",
|
|
|
|
|
r" CopyStyle Remark",
|
|
|
|
|
r" LatexName acknowledgement",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
r' LabelString "Acknowledgement \theacknowledgement."',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r" Preamble",
|
|
|
|
|
r" \theoremstyle{remark}",
|
|
|
|
|
r" \ifx\thechapter\undefined",
|
|
|
|
|
r" \newtheorem{acknowledgement}{\protect\acknowledgementname}",
|
|
|
|
|
r" \else",
|
|
|
|
|
r" \newtheorem{acknowledgement}{\protect\acknowledgementname}[chapter]",
|
|
|
|
|
r" \fi",
|
|
|
|
|
r" EndPreamble",
|
|
|
|
|
r" LangPreamble",
|
|
|
|
|
r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
|
|
|
|
|
r" EndLangPreamble",
|
|
|
|
|
r" BabelPreamble",
|
|
|
|
|
r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
|
|
|
|
|
r" EndBabelPreamble",
|
|
|
|
|
r" DocBookTag para",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
r' DocBookAttr role="acknowledgement"',
|
|
|
|
|
r' DocBookItemTag ""',
|
2024-06-15 09:06:06 +00:00
|
|
|
|
r"End",
|
2023-02-05 11:07:37 +00:00
|
|
|
|
]
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2023-02-05 11:07:37 +00:00
|
|
|
|
def convert_ack_theorems(document):
|
|
|
|
|
"""Put removed acknowledgement theorems to local layout"""
|
|
|
|
|
|
|
|
|
|
haveAck = False
|
|
|
|
|
haveStarAck = False
|
|
|
|
|
if "theorems-ams-extended-bytype" in document.get_module_list():
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
if haveAck and haveStarAck:
|
|
|
|
|
break
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_layout Acknowledgement", i)
|
2023-02-05 11:07:37 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
|
|
|
|
|
document.append_local_layout(ackStar_theorem_def)
|
|
|
|
|
haveStarAck = True
|
|
|
|
|
elif not haveAck:
|
|
|
|
|
document.append_local_layout(ack_bytype_theorem_def)
|
|
|
|
|
haveAck = True
|
|
|
|
|
i += 1
|
|
|
|
|
elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
if haveAck and haveStarAck:
|
|
|
|
|
break
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_layout Acknowledgement", i)
|
2023-02-05 11:07:37 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
|
|
|
|
|
document.append_local_layout(ackStar_theorem_def)
|
|
|
|
|
haveStarAck = True
|
|
|
|
|
elif not haveAck:
|
|
|
|
|
document.append_local_layout(ack_chap_bytype_theorem_def)
|
|
|
|
|
haveAck = True
|
|
|
|
|
i += 1
|
|
|
|
|
elif "theorems-ams-extended" in document.get_module_list():
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
if haveAck and haveStarAck:
|
|
|
|
|
break
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_layout Acknowledgement", i)
|
2023-02-05 11:07:37 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
|
|
|
|
|
document.append_local_layout(ackStar_theorem_def)
|
|
|
|
|
haveStarAck = True
|
|
|
|
|
elif not haveAck:
|
|
|
|
|
document.append_local_layout(ack_theorem_def)
|
|
|
|
|
haveAck = True
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_ack_theorems(document):
|
|
|
|
|
"""Remove acknowledgement theorems from local layout"""
|
|
|
|
|
if "theorems-ams-extended-bytype" in document.get_module_list():
|
|
|
|
|
document.del_local_layout(ackStar_theorem_def)
|
|
|
|
|
document.del_local_layout(ack_bytype_theorem_def)
|
|
|
|
|
elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
|
|
|
|
|
document.del_local_layout(ackStar_theorem_def)
|
|
|
|
|
document.del_local_layout(ack_chap_bytype_theorem_def)
|
|
|
|
|
elif "theorems-ams-extended" in document.get_module_list():
|
|
|
|
|
document.del_local_layout(ackStar_theorem_def)
|
|
|
|
|
document.del_local_layout(ack_theorem_def)
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2023-06-11 16:51:29 +00:00
|
|
|
|
def revert_empty_macro(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"""Remove macros with empty LaTeX part"""
|
2023-06-11 16:51:29 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset FormulaMacro", i)
|
2023-06-11 16:51:29 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
2024-06-15 09:06:06 +00:00
|
|
|
|
cmd = document.body[i + 1]
|
2023-06-11 16:51:29 +00:00
|
|
|
|
if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i : j + 1] = []
|
2023-06-11 16:51:29 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_empty_macro(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"""In the unlikely event someone defined a macro with empty LaTeX, add {}"""
|
2023-06-11 16:51:29 +00:00
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset FormulaMacro", i)
|
2023-06-11 16:51:29 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
2024-06-15 09:06:06 +00:00
|
|
|
|
cmd = document.body[i + 1]
|
2023-06-11 16:51:29 +00:00
|
|
|
|
if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
|
|
|
|
|
i += 1
|
|
|
|
|
continue
|
|
|
|
|
newstr = cmd[:-2] + "{\\{\\}}"
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[i + 1] = newstr
|
2023-06-11 16:51:29 +00:00
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
2023-07-20 13:09:47 +00:00
|
|
|
|
def convert_cov_options(document):
|
|
|
|
|
"""Update examples item argument structure"""
|
|
|
|
|
|
|
|
|
|
if "linguistics" not in document.get_module_list():
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
layouts = ["Numbered Examples (consecutive)", "Subexample"]
|
|
|
|
|
|
|
|
|
|
for layout in layouts:
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_layout %s" % layout, i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of example layout at line %d" % i
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument item:1", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument item:2"
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
# Shift gloss arguments
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of gloss inset at line %d" % i
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:4"
|
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:1", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:2"
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of gloss inset at line %d" % i
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:3", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:6"
|
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:4"
|
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:1", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:2"
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_linggloss2(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert gloss with new args to ERT"
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if "linguistics" not in document.get_module_list():
|
2023-07-20 13:09:47 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
cov_req = False
|
2024-06-15 09:06:06 +00:00
|
|
|
|
glosses = [
|
|
|
|
|
"\\begin_inset Flex Interlinear Gloss (2 Lines)",
|
|
|
|
|
"\\begin_inset Flex Interlinear Gloss (3 Lines)",
|
|
|
|
|
]
|
2023-07-20 13:09:47 +00:00
|
|
|
|
for glosse in glosses:
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, glosse, i + 1)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Gloss inset")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Check if we have new options
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
|
|
|
|
|
if arg == -1:
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
|
|
|
|
|
if arg == -1:
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
|
|
|
|
|
if arg == -1:
|
|
|
|
|
# nothing to do
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
optargcontent = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find optarg plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
marg1content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
marg2content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
marg3content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2023-07-20 13:09:47 +00:00
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
marg4content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find arg 4 plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
marg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2023-07-20 13:09:47 +00:00
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
marg5content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find arg 5 plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
marg5content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2023-07-20 13:09:47 +00:00
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument post:6", i, j)
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
marg6content = []
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find arg 6 plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
|
|
|
|
marg6content = document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
|
|
|
|
|
cmd = "\\digloss"
|
|
|
|
|
if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
|
|
|
|
|
cmd = "\\trigloss"
|
|
|
|
|
|
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
|
endInset = find_end_of_inset(document.body, i)
|
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
|
precontent = put_cmd_in_ert(cmd)
|
|
|
|
|
if len(optargcontent) > 0:
|
|
|
|
|
precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
|
|
|
|
|
precontent += put_cmd_in_ert("{")
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2023-07-20 13:09:47 +00:00
|
|
|
|
postcontent = put_cmd_in_ert("}")
|
|
|
|
|
if len(marg1content) > 0:
|
|
|
|
|
postcontent += put_cmd_in_ert("[") + marg1content + put_cmd_in_ert("]")
|
|
|
|
|
postcontent += put_cmd_in_ert("{") + marg2content + put_cmd_in_ert("}")
|
|
|
|
|
if len(marg3content) > 0:
|
|
|
|
|
postcontent += put_cmd_in_ert("[") + marg3content + put_cmd_in_ert("]")
|
|
|
|
|
postcontent += put_cmd_in_ert("{") + marg4content + put_cmd_in_ert("}")
|
|
|
|
|
if cmd == "\\trigloss":
|
|
|
|
|
if len(marg5content) > 0:
|
|
|
|
|
postcontent += put_cmd_in_ert("[") + marg5content + put_cmd_in_ert("]")
|
|
|
|
|
postcontent += put_cmd_in_ert("{") + marg6content + put_cmd_in_ert("}")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[endPlain : endInset + 1] = postcontent
|
|
|
|
|
document.body[beginPlain + 1 : beginPlain] = precontent
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[i : beginPlain + 1]
|
|
|
|
|
if not cov_req:
|
|
|
|
|
document.append_local_layout("Requires covington")
|
|
|
|
|
cov_req = True
|
|
|
|
|
i = beginPlain
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_exarg2(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert linguistic examples with new arguments to ERT"
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
2024-06-15 10:26:28 +00:00
|
|
|
|
if "linguistics" not in document.get_module_list():
|
2023-07-20 13:09:47 +00:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
cov_req = False
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2023-07-20 13:09:47 +00:00
|
|
|
|
layouts = ["Numbered Example", "Subexample"]
|
|
|
|
|
|
|
|
|
|
for layout in layouts:
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_layout %s" % layout, i + 1)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of example layout")
|
|
|
|
|
continue
|
|
|
|
|
consecex = document.body[i] == "\\begin_layout Numbered Examples (consecutive)"
|
|
|
|
|
subexpl = document.body[i] == "\\begin_layout Subexample"
|
|
|
|
|
singleex = document.body[i] == "\\begin_layout Numbered Examples (multiline)"
|
|
|
|
|
layouttype = "\\begin_layout Numbered Examples (multiline)"
|
|
|
|
|
if consecex:
|
|
|
|
|
layouttype = "\\begin_layout Numbered Examples (consecutive)"
|
|
|
|
|
elif subexpl:
|
|
|
|
|
layouttype = "\\begin_layout Subexample"
|
|
|
|
|
k = i
|
|
|
|
|
l = j
|
|
|
|
|
while True:
|
|
|
|
|
if singleex:
|
|
|
|
|
break
|
|
|
|
|
m = find_end_of_layout(document.body, k)
|
|
|
|
|
# check for consecutive layouts
|
|
|
|
|
k = find_token(document.body, "\\begin_layout", m)
|
|
|
|
|
if k == -1 or document.body[k] != layouttype:
|
|
|
|
|
break
|
|
|
|
|
l = find_end_of_layout(document.body, k)
|
|
|
|
|
if l == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Malformed LyX document: Can't find end of example layout")
|
|
|
|
|
continue
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, l)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
arg != -1
|
|
|
|
|
and layouttype
|
|
|
|
|
!= "\\begin_layout " + get_containing_layout(document.body, arg)[0]
|
|
|
|
|
):
|
|
|
|
|
# this is not our argument!
|
|
|
|
|
arg = -1
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if subexpl or arg == -1:
|
|
|
|
|
iarg = find_token(document.body, "\\begin_inset Argument item:1", i, l)
|
|
|
|
|
if iarg == -1:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if arg != -1:
|
|
|
|
|
endarg = find_end_of_inset(document.body, arg)
|
|
|
|
|
optargcontent = ""
|
2024-06-15 09:06:06 +00:00
|
|
|
|
argbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", arg, endarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if argbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find optarg plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
argendPlain = find_end_of_inset(document.body, argbeginPlain)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
optargcontent = lyx2latex(
|
|
|
|
|
document, document.body[argbeginPlain + 1 : argendPlain - 2]
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
# This is a verbatim argument
|
2024-06-15 09:06:06 +00:00
|
|
|
|
optargcontent = re.sub(r"textbackslash{}", r"", optargcontent)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
|
|
|
|
itemarg = ""
|
|
|
|
|
iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
|
|
|
|
|
if iarg != -1:
|
|
|
|
|
endiarg = find_end_of_inset(document.body, iarg)
|
|
|
|
|
iargcontent = ""
|
2024-06-15 09:06:06 +00:00
|
|
|
|
iargbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", iarg, endiarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if iargbeginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find optarg plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
itemarg = (
|
|
|
|
|
"<" + lyx2latex(document, document.body[iargbeginPlain:iargendPlain]) + ">"
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
|
|
|
|
iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
|
|
|
|
|
if iarg2 != -1:
|
|
|
|
|
endiarg2 = find_end_of_inset(document.body, iarg2)
|
|
|
|
|
iarg2content = ""
|
2024-06-15 09:06:06 +00:00
|
|
|
|
iarg2beginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", iarg2, endiarg2
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if iarg2beginPlain == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find optarg plain Layout")
|
|
|
|
|
continue
|
|
|
|
|
iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
itemarg += (
|
|
|
|
|
"["
|
|
|
|
|
+ lyx2latex(document, document.body[iarg2beginPlain:iarg2endPlain])
|
|
|
|
|
+ "]"
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
|
|
|
|
if itemarg == "":
|
|
|
|
|
itemarg = " "
|
|
|
|
|
|
|
|
|
|
# remove Arg insets and paragraph, if it only contains this inset
|
|
|
|
|
if arg != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[arg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, arg - 1) == endarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[arg - 1 : endarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[arg : endarg + 1]
|
|
|
|
|
if iarg != -1:
|
|
|
|
|
iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
|
|
|
|
|
if iarg == -1:
|
|
|
|
|
document.warning("Unable to re-find item:1 Argument")
|
|
|
|
|
else:
|
|
|
|
|
endiarg = find_end_of_inset(document.body, iarg)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[iarg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, iarg - 1) == endiarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[iarg - 1 : endiarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[iarg : endiarg + 1]
|
|
|
|
|
if iarg2 != -1:
|
|
|
|
|
iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
|
|
|
|
|
if iarg2 == -1:
|
|
|
|
|
document.warning("Unable to re-find item:2 Argument")
|
|
|
|
|
else:
|
|
|
|
|
endiarg2 = find_end_of_inset(document.body, iarg2)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[iarg2 - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[iarg2 - 1 : endiarg2 + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[iarg2 : endiarg2 + 1]
|
|
|
|
|
|
|
|
|
|
envname = "example"
|
|
|
|
|
if consecex:
|
|
|
|
|
envname = "examples"
|
|
|
|
|
elif subexpl:
|
|
|
|
|
envname = "subexamples"
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2023-07-20 13:09:47 +00:00
|
|
|
|
cmd = put_cmd_in_ert("\\begin{" + envname + "}[" + optargcontent + "]")
|
|
|
|
|
|
|
|
|
|
# re-find end of layout
|
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
|
if j == -1:
|
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Subexample layout")
|
|
|
|
|
continue
|
|
|
|
|
l = j
|
|
|
|
|
while True:
|
|
|
|
|
# check for consecutive layouts
|
|
|
|
|
k = find_token(document.body, "\\begin_layout", l)
|
|
|
|
|
if k == -1 or document.body[k] != layouttype:
|
|
|
|
|
break
|
|
|
|
|
if not singleex:
|
|
|
|
|
subitemarg = ""
|
|
|
|
|
m = find_end_of_layout(document.body, k)
|
|
|
|
|
iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
|
|
|
|
|
if iarg != -1:
|
|
|
|
|
endiarg = find_end_of_inset(document.body, iarg)
|
|
|
|
|
iargcontent = ""
|
2024-06-15 09:06:06 +00:00
|
|
|
|
iargbeginPlain = find_token(
|
|
|
|
|
document.body, "\\begin_layout Plain Layout", iarg, endiarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if iargbeginPlain == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find optarg plain Layout"
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
continue
|
|
|
|
|
iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
subitemarg = (
|
|
|
|
|
"<"
|
|
|
|
|
+ lyx2latex(document, document.body[iargbeginPlain:iargendPlain])
|
|
|
|
|
+ ">"
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
|
|
|
|
iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
|
|
|
|
|
if iarg2 != -1:
|
|
|
|
|
endiarg2 = find_end_of_inset(document.body, iarg2)
|
|
|
|
|
iarg2content = ""
|
2024-06-15 09:06:06 +00:00
|
|
|
|
iarg2beginPlain = find_token(
|
|
|
|
|
document.body,
|
|
|
|
|
"\\begin_layout Plain Layout",
|
|
|
|
|
iarg2,
|
|
|
|
|
endiarg2,
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if iarg2beginPlain == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find optarg plain Layout"
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
continue
|
|
|
|
|
iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
subitemarg += (
|
|
|
|
|
"["
|
|
|
|
|
+ lyx2latex(document, document.body[iarg2beginPlain:iarg2endPlain])
|
|
|
|
|
+ "]"
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
|
|
|
|
if subitemarg == "":
|
|
|
|
|
subitemarg = " "
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert(
|
|
|
|
|
"\\item" + subitemarg
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
# Refind and remove arg insets
|
|
|
|
|
if iarg != -1:
|
|
|
|
|
iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
|
|
|
|
|
if iarg == -1:
|
|
|
|
|
document.warning("Unable to re-find item:1 Argument")
|
|
|
|
|
else:
|
|
|
|
|
endiarg = find_end_of_inset(document.body, iarg)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[iarg - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, iarg - 1) == endiarg + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[iarg - 1 : endiarg + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[iarg : endiarg + 1]
|
|
|
|
|
if iarg2 != -1:
|
|
|
|
|
iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
|
|
|
|
|
if iarg2 == -1:
|
|
|
|
|
document.warning("Unable to re-find item:2 Argument")
|
|
|
|
|
else:
|
|
|
|
|
endiarg2 = find_end_of_inset(document.body, iarg2)
|
2024-06-15 09:06:06 +00:00
|
|
|
|
if (
|
|
|
|
|
document.body[iarg2 - 1] == "\\begin_layout Plain Layout"
|
|
|
|
|
and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3
|
|
|
|
|
):
|
2023-07-20 13:09:47 +00:00
|
|
|
|
del document.body[iarg2 - 1 : endiarg2 + 4]
|
|
|
|
|
else:
|
|
|
|
|
del document.body[iarg2 : endiarg2 + 1]
|
|
|
|
|
else:
|
|
|
|
|
document.body[k : k + 1] = ["\\begin_layout Standard"]
|
|
|
|
|
l = find_end_of_layout(document.body, k)
|
|
|
|
|
if l == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning("Malformed LyX document: Can't find end of example layout")
|
|
|
|
|
continue
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
|
|
|
|
endev = put_cmd_in_ert("\\end{" + envname + "}")
|
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[l:l] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
|
|
|
|
|
document.body[i : i + 1] = (
|
|
|
|
|
["\\begin_layout Standard"]
|
|
|
|
|
+ cmd
|
|
|
|
|
+ ["\\end_layout", "", "\\begin_layout Standard"]
|
|
|
|
|
+ put_cmd_in_ert("\\item" + itemarg)
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if not cov_req:
|
|
|
|
|
document.append_local_layout("Requires covington")
|
|
|
|
|
cov_req = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_cov_options(document):
|
|
|
|
|
"""Revert examples item argument structure"""
|
|
|
|
|
|
|
|
|
|
if "linguistics" not in document.get_module_list():
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
layouts = ["Numbered Examples (consecutive)", "Subexample"]
|
|
|
|
|
|
|
|
|
|
for layout in layouts:
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_layout %s" % layout, i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of example layout at line %d" % i
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument item:2", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument item:1"
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
# Shift gloss arguments
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of gloss inset at line %d" % i
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:1"
|
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:4", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:2"
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
|
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of gloss inset at line %d" % i
|
|
|
|
|
)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
continue
|
2024-06-15 09:06:06 +00:00
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:1"
|
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:4", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:2"
|
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument post:6", i, j)
|
2023-07-20 13:09:47 +00:00
|
|
|
|
if k != -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.body[k] = "\\begin_inset Argument post:3"
|
2023-07-20 13:09:47 +00:00
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_expreambles(document):
|
|
|
|
|
"""Revert covington example preamble flex insets to ERT"""
|
|
|
|
|
|
2024-06-17 10:31:10 +00:00
|
|
|
|
revert_flex_inset(document, "Example Preamble", "\\expreamble")
|
|
|
|
|
revert_flex_inset(document, "Subexample Preamble", "\\subexpreamble")
|
|
|
|
|
revert_flex_inset(document, "Example Postamble", "\\expostamble")
|
|
|
|
|
revert_flex_inset(document, "Subexample Postamble", "\\subexpostamble")
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2024-06-15 09:06:06 +00:00
|
|
|
|
|
2023-08-06 14:50:07 +00:00
|
|
|
|
def revert_hequotes(document):
|
2024-06-15 09:06:06 +00:00
|
|
|
|
"Revert Hebrew Quotation marks"
|
2024-06-10 09:55:40 +00:00
|
|
|
|
|
2023-08-06 14:50:07 +00:00
|
|
|
|
i = find_token(document.header, "\\quotes_style hebrew", 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
|
|
|
|
i = find_token(document.body, "\\begin_inset Quotes d")
|
|
|
|
|
if i == -1:
|
|
|
|
|
return
|
|
|
|
|
if document.body[i] == "\\begin_inset Quotes dld":
|
|
|
|
|
document.body[i] = "\\begin_inset Quotes prd"
|
|
|
|
|
elif document.body[i] == "\\begin_inset Quotes drd":
|
|
|
|
|
document.body[i] = "\\begin_inset Quotes pld"
|
|
|
|
|
elif document.body[i] == "\\begin_inset Quotes dls":
|
|
|
|
|
document.body[i] = "\\begin_inset Quotes prd"
|
|
|
|
|
elif document.body[i] == "\\begin_inset Quotes drs":
|
|
|
|
|
document.body[i] = "\\begin_inset Quotes pld"
|
2023-07-20 13:09:47 +00:00
|
|
|
|
|
|
|
|
|
|
2023-07-29 18:07:28 +00:00
|
|
|
|
def revert_formatted_refs(document):
|
|
|
|
|
i = find_token(document.header, "\\use_formatted_ref", 0)
|
|
|
|
|
if i != -1:
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
|
|
|
2023-09-30 07:56:27 +00:00
|
|
|
|
def revert_box_fcolor(document):
|
|
|
|
|
i = 0
|
|
|
|
|
while True:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
i = find_token(document.body, "\\begin_inset Box Boxed", i + 1)
|
2023-09-30 07:56:27 +00:00
|
|
|
|
if i == -1:
|
|
|
|
|
break
|
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
|
if j == -1:
|
2024-06-15 09:06:06 +00:00
|
|
|
|
document.warning(
|
|
|
|
|
"Malformed LyX document: Can't find end of framed box inset at line %d" % i
|
|
|
|
|
)
|
2023-09-30 07:56:27 +00:00
|
|
|
|
continue
|
|
|
|
|
k = find_token(document.body, 'framecolor "default"', i, j)
|
|
|
|
|
if k != -1:
|
|
|
|
|
document.body[k] = 'framecolor "black"'
|
|
|
|
|
|
|
|
|
|
|
2018-02-23 07:58:16 +00:00
|
|
|
|
##
|
|
|
|
|
# Conversion hub
|
|
|
|
|
#
|
|
|
|
|
|
|
|
|
|
supported_versions = ["2.4.0", "2.4"]
|
|
|
|
|
convert = [
|
2024-06-15 09:06:06 +00:00
|
|
|
|
[545, [convert_lst_literalparam]],
|
|
|
|
|
[546, []],
|
|
|
|
|
[547, []],
|
|
|
|
|
[548, []],
|
|
|
|
|
[549, []],
|
|
|
|
|
[550, [convert_fontenc]],
|
|
|
|
|
[551, []],
|
|
|
|
|
[552, []],
|
|
|
|
|
[553, []],
|
|
|
|
|
[554, []],
|
|
|
|
|
[555, []],
|
|
|
|
|
[556, []],
|
|
|
|
|
[557, [convert_vcsinfo]],
|
|
|
|
|
[558, [removeFrontMatterStyles]],
|
|
|
|
|
[559, []],
|
|
|
|
|
[560, []],
|
|
|
|
|
[561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
|
|
|
|
|
[562, []],
|
|
|
|
|
[563, []],
|
|
|
|
|
[564, []],
|
|
|
|
|
[565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
|
|
|
|
|
[566, [convert_hebrew_parentheses]],
|
|
|
|
|
[567, []],
|
|
|
|
|
[568, []],
|
|
|
|
|
[569, []],
|
|
|
|
|
[570, []],
|
|
|
|
|
[571, []],
|
|
|
|
|
[572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
|
|
|
|
|
[573, [convert_inputencoding_namechange]],
|
|
|
|
|
[574, [convert_ruby_module, convert_utf8_japanese]],
|
|
|
|
|
[575, [convert_lineno, convert_aaencoding]],
|
|
|
|
|
[576, []],
|
|
|
|
|
[577, [convert_linggloss]],
|
|
|
|
|
[578, []],
|
|
|
|
|
[579, []],
|
|
|
|
|
[580, []],
|
|
|
|
|
[581, [convert_osf]],
|
|
|
|
|
[
|
|
|
|
|
582,
|
|
|
|
|
[
|
|
|
|
|
convert_AdobeFonts,
|
|
|
|
|
convert_latexFonts,
|
|
|
|
|
convert_notoFonts,
|
|
|
|
|
convert_CantarellFont,
|
|
|
|
|
convert_FiraFont,
|
|
|
|
|
],
|
|
|
|
|
], # old font re-converterted due to extra options
|
|
|
|
|
[
|
|
|
|
|
583,
|
|
|
|
|
[
|
|
|
|
|
convert_ChivoFont,
|
|
|
|
|
convert_Semibolds,
|
|
|
|
|
convert_NotoRegulars,
|
|
|
|
|
convert_CrimsonProFont,
|
|
|
|
|
],
|
|
|
|
|
],
|
|
|
|
|
[584, []],
|
|
|
|
|
[585, [convert_pagesizes]],
|
|
|
|
|
[586, []],
|
|
|
|
|
[587, [convert_pagesizenames]],
|
|
|
|
|
[588, []],
|
|
|
|
|
[589, [convert_totalheight]],
|
|
|
|
|
[590, [convert_changebars]],
|
|
|
|
|
[591, [convert_postpone_fragile]],
|
|
|
|
|
[592, []],
|
|
|
|
|
[593, [convert_counter_maintenance]],
|
|
|
|
|
[594, []],
|
|
|
|
|
[595, []],
|
|
|
|
|
[596, [convert_parskip]],
|
|
|
|
|
[597, [convert_libertinus_rm_fonts]],
|
|
|
|
|
[598, []],
|
|
|
|
|
[599, []],
|
|
|
|
|
[600, []],
|
|
|
|
|
[601, [convert_math_refs]],
|
|
|
|
|
[602, [convert_branch_colors]],
|
|
|
|
|
[603, []],
|
|
|
|
|
[604, []],
|
|
|
|
|
[605, [convert_vcolumns2]],
|
|
|
|
|
[606, [convert_koma_frontispiece]],
|
|
|
|
|
[607, []],
|
|
|
|
|
[608, []],
|
|
|
|
|
[609, []],
|
|
|
|
|
[610, []],
|
|
|
|
|
[611, []],
|
|
|
|
|
[612, [convert_starred_refs]],
|
|
|
|
|
[613, []],
|
|
|
|
|
[614, [convert_hyper_other]],
|
|
|
|
|
[615, [convert_acknowledgment, convert_ack_theorems]],
|
|
|
|
|
[616, [convert_empty_macro]],
|
|
|
|
|
[617, [convert_cov_options]],
|
|
|
|
|
[618, []],
|
|
|
|
|
[619, []],
|
|
|
|
|
[620, []],
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
revert = [
|
|
|
|
|
[619, [revert_box_fcolor]],
|
|
|
|
|
[618, [revert_formatted_refs]],
|
|
|
|
|
[617, [revert_hequotes]],
|
|
|
|
|
[616, [revert_expreambles, revert_exarg2, revert_linggloss2, revert_cov_options]],
|
|
|
|
|
[615, [revert_empty_macro]],
|
|
|
|
|
[614, [revert_ack_theorems, revert_acknowledgment]],
|
|
|
|
|
[613, [revert_hyper_other]],
|
|
|
|
|
[612, [revert_familydefault]],
|
|
|
|
|
[611, [revert_starred_refs]],
|
|
|
|
|
[610, []],
|
|
|
|
|
[609, [revert_index_macros]],
|
|
|
|
|
[608, [revert_document_metadata]],
|
|
|
|
|
[607, [revert_docbook_mathml_prefix]],
|
|
|
|
|
[606, [revert_spellchecker_ignore]],
|
|
|
|
|
[605, [revert_koma_frontispiece]],
|
|
|
|
|
[604, [revert_vcolumns2]],
|
|
|
|
|
[603, [revert_branch_darkcols]],
|
|
|
|
|
[602, [revert_darkmode_graphics]],
|
|
|
|
|
[601, [revert_branch_colors]],
|
|
|
|
|
[600, []],
|
|
|
|
|
[599, [revert_math_refs]],
|
|
|
|
|
[598, [revert_hrquotes]],
|
|
|
|
|
[598, [revert_nopagebreak]],
|
|
|
|
|
[597, [revert_docbook_table_output]],
|
|
|
|
|
[596, [revert_libertinus_rm_fonts, revert_libertinus_sftt_fonts]],
|
|
|
|
|
[595, [revert_parskip, revert_line_vspaces]],
|
|
|
|
|
[594, [revert_ams_spaces]],
|
|
|
|
|
[593, [revert_counter_inset]],
|
|
|
|
|
[592, [revert_counter_maintenance]],
|
|
|
|
|
[591, [revert_colrow_tracking]],
|
|
|
|
|
[590, [revert_postpone_fragile]],
|
|
|
|
|
[589, [revert_changebars]],
|
|
|
|
|
[588, [revert_totalheight]],
|
|
|
|
|
[587, [revert_memoir_endnotes, revert_enotez, revert_theendnotes]],
|
|
|
|
|
[586, [revert_pagesizenames]],
|
|
|
|
|
[585, [revert_dupqualicites]],
|
|
|
|
|
[584, [revert_pagesizes, revert_komafontsizes]],
|
|
|
|
|
[583, [revert_vcsinfo_rev_abbrev]],
|
|
|
|
|
[582, [revert_ChivoFont, revert_CrimsonProFont]],
|
|
|
|
|
[581, [revert_CantarellFont, revert_FiraFont]],
|
|
|
|
|
[580, [revert_texfontopts, revert_osf]],
|
|
|
|
|
[
|
|
|
|
|
579,
|
|
|
|
|
[
|
|
|
|
|
revert_minionpro,
|
|
|
|
|
revert_plainNotoFonts_xopts,
|
|
|
|
|
revert_notoFonts_xopts,
|
|
|
|
|
revert_IBMFonts_xopts,
|
|
|
|
|
revert_AdobeFonts_xopts,
|
|
|
|
|
revert_font_opts,
|
|
|
|
|
],
|
|
|
|
|
], # keep revert_font_opts last!
|
|
|
|
|
[578, [revert_babelfont]],
|
|
|
|
|
[577, [revert_drs]],
|
|
|
|
|
[576, [revert_linggloss, revert_subexarg]],
|
|
|
|
|
[575, [revert_new_languages]],
|
|
|
|
|
[574, [revert_lineno, revert_aaencoding]],
|
|
|
|
|
[573, [revert_ruby_module, revert_utf8_japanese]],
|
|
|
|
|
[572, [revert_inputencoding_namechange]],
|
|
|
|
|
[571, [revert_notoFonts]],
|
|
|
|
|
[570, [revert_cmidruletrimming]],
|
|
|
|
|
[569, [revert_bibfileencodings]],
|
|
|
|
|
[568, [revert_tablestyle]],
|
|
|
|
|
[567, [revert_soul]],
|
|
|
|
|
[566, [revert_malayalam]],
|
|
|
|
|
[565, [revert_hebrew_parentheses]],
|
|
|
|
|
[564, [revert_AdobeFonts]],
|
|
|
|
|
[563, [revert_lformatinfo]],
|
|
|
|
|
[562, [revert_listpargs]],
|
|
|
|
|
[561, [revert_l7ninfo]],
|
|
|
|
|
[560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
|
|
|
|
|
[559, [revert_timeinfo, revert_namenoextinfo]],
|
|
|
|
|
[558, [revert_dateinfo]],
|
|
|
|
|
[557, [addFrontMatterStyles]],
|
|
|
|
|
[556, [revert_vcsinfo]],
|
|
|
|
|
[555, [revert_bibencoding]],
|
|
|
|
|
[554, [revert_vcolumns]],
|
|
|
|
|
[553, [revert_stretchcolumn]],
|
|
|
|
|
[552, [revert_tuftecite]],
|
|
|
|
|
[551, [revert_floatpclass, revert_floatalignment]],
|
|
|
|
|
[550, [revert_nospellcheck]],
|
|
|
|
|
[549, [revert_fontenc]],
|
|
|
|
|
[548, []], # dummy format change
|
|
|
|
|
[547, [revert_lscape]],
|
|
|
|
|
[546, [revert_xcharter]],
|
|
|
|
|
[545, [revert_paratype]],
|
|
|
|
|
[544, [revert_lst_literalparam]],
|
|
|
|
|
]
|
2018-02-23 07:58:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
pass
|