lyx_mirror/lib/lyx2lyx/lyx_2_4.py
2024-09-17 15:53:18 +02:00

6888 lines
250 KiB
Python
Raw Permalink Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# This file is part of lyx2lyx
# Copyright (C) 2018 The LyX team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""Convert files to the file format generated by lyx 2.4"""
import re
from datetime import date, datetime, time
from lyx2lyx_tools import (
add_to_preamble,
insert_to_preamble,
lyx2latex,
put_cmd_in_ert,
revert_flex_inset,
revert_language,
str2bool,
)
from parser_tools import (
count_pars_in_inset,
del_complete_lines,
del_token,
find_end_of,
find_end_of_inset,
find_end_of_layout,
find_re,
find_token,
find_token_backwards,
find_token_exact,
get_bool_value,
get_containing_inset,
get_containing_layout,
get_option_value,
get_quoted_value,
get_value,
is_in_inset,
)
####################################################################
# Private helper functions
def add_preamble_fonts(document, fontmap):
"""Add collected font-packages with their option to user-preamble"""
for pkg in fontmap:
if len(fontmap[pkg]) > 0:
xoption = "[" + ",".join(fontmap[pkg]) + "]"
else:
xoption = ""
preamble = f"\\usepackage{xoption}{{{pkg}}}"
add_to_preamble(document, [preamble])
def createkey(pkg, options):
options.sort()
return pkg + ":" + "-".join(options)
class fontinfo:
def __init__(self):
self.fontname = None # key into font2pkgmap
self.fonttype = None # roman,sans,typewriter,math
self.scaletype = None # None,sf,tt
self.scaleopt = None # None, 'scaled', 'scale'
self.scaleval = 1
self.package = None
self.options = []
self.pkgkey = None # key into pkg2fontmap
self.osfopt = None # None, string
self.osfdef = "false" # "false" or "true"
def addkey(self):
self.pkgkey = createkey(self.package, self.options)
class fontmapping:
def __init__(self):
self.font2pkgmap = dict()
self.pkg2fontmap = dict()
self.pkginmap = dict() # defines, if a map for package exists
def expandFontMapping(
self,
font_list,
font_type,
scale_type,
pkg,
scaleopt=None,
osfopt=None,
osfdef="false",
):
"""Expand fontinfo mapping"""
#
# fontlist: list of fontnames, each element
# may contain a ','-separated list of needed options
# like e.g. 'IBMPlexSansCondensed,condensed'
# font_type: one of 'roman', 'sans', 'typewriter', 'math'
# scale_type: one of None, 'sf', 'tt'
# pkg: package defining the font. Defaults to fontname if None
# scaleopt: one of None, 'scale', 'scaled', or some other string
# to be used in scale option (e.g. scaled=0.7)
# osfopt: None or some other string to be used in osf option
# osfdef: "true" if osf is default
for fl in font_list:
fe = fontinfo()
fe.fonttype = font_type
fe.scaletype = scale_type
flt = fl.split(",")
font_name = flt[0]
fe.fontname = font_name
fe.options = flt[1:]
fe.scaleopt = scaleopt
fe.osfopt = osfopt
fe.osfdef = osfdef
if pkg == None:
fe.package = font_name
else:
fe.package = pkg
fe.addkey()
self.font2pkgmap[font_name] = fe
if fe.pkgkey in self.pkg2fontmap:
# Repeated the same entry? Check content
if self.pkg2fontmap[fe.pkgkey] != font_name:
document.error("Something is wrong in pkgname+options <-> fontname mapping")
self.pkg2fontmap[fe.pkgkey] = font_name
self.pkginmap[fe.package] = 1
def getfontname(self, pkg, options):
options.sort()
pkgkey = createkey(pkg, options)
if pkgkey not in self.pkg2fontmap:
return None
fontname = self.pkg2fontmap[pkgkey]
if fontname not in self.font2pkgmap:
document.error("Something is wrong in pkgname+options <-> fontname mapping")
return None
if pkgkey == self.font2pkgmap[fontname].pkgkey:
return fontname
return None
def createFontMapping(fontlist):
# Create info for known fonts for the use in
# convert_latexFonts() and
# revert_latexFonts()
#
# * Would be more handy to parse latexFonts file,
# but the path to this file is unknown
# * For now, add DejaVu and IBMPlex only.
# * Expand, if desired
fm = fontmapping()
for font in fontlist:
if font == "DejaVu":
fm.expandFontMapping(["DejaVuSerif", "DejaVuSerifCondensed"], "roman", None, None)
fm.expandFontMapping(
["DejaVuSans", "DejaVuSansCondensed"], "sans", "sf", None, "scaled"
)
fm.expandFontMapping(["DejaVuSansMono"], "typewriter", "tt", None, "scaled")
elif font == "IBM":
fm.expandFontMapping(
[
"IBMPlexSerif",
"IBMPlexSerifThin,thin",
"IBMPlexSerifExtraLight,extralight",
"IBMPlexSerifLight,light",
"IBMPlexSerifSemibold,semibold",
],
"roman",
None,
"plex-serif",
)
fm.expandFontMapping(
[
"IBMPlexSans",
"IBMPlexSansCondensed,condensed",
"IBMPlexSansThin,thin",
"IBMPlexSansExtraLight,extralight",
"IBMPlexSansLight,light",
"IBMPlexSansSemibold,semibold",
],
"sans",
"sf",
"plex-sans",
"scale",
)
fm.expandFontMapping(
[
"IBMPlexMono",
"IBMPlexMonoThin,thin",
"IBMPlexMonoExtraLight,extralight",
"IBMPlexMonoLight,light",
"IBMPlexMonoSemibold,semibold",
],
"typewriter",
"tt",
"plex-mono",
"scale",
)
elif font == "Adobe":
fm.expandFontMapping(
["ADOBESourceSerifPro"], "roman", None, "sourceserifpro", None, "osf"
)
fm.expandFontMapping(
["ADOBESourceSansPro"], "sans", "sf", "sourcesanspro", "scaled", "osf"
)
fm.expandFontMapping(
["ADOBESourceCodePro"],
"typewriter",
"tt",
"sourcecodepro",
"scaled",
"osf",
)
elif font == "Noto":
fm.expandFontMapping(
[
"NotoSerifRegular,regular",
"NotoSerifMedium,medium",
"NotoSerifThin,thin",
"NotoSerifLight,light",
"NotoSerifExtralight,extralight",
],
"roman",
None,
"noto-serif",
None,
"osf",
)
fm.expandFontMapping(
[
"NotoSansRegular,regular",
"NotoSansMedium,medium",
"NotoSansThin,thin",
"NotoSansLight,light",
"NotoSansExtralight,extralight",
],
"sans",
"sf",
"noto-sans",
"scaled",
)
fm.expandFontMapping(
["NotoMonoRegular,regular"], "typewriter", "tt", "noto-mono", "scaled"
)
elif font == "Cantarell":
fm.expandFontMapping(
["cantarell,defaultsans"],
"sans",
"sf",
"cantarell",
"scaled",
"oldstyle",
)
elif font == "Chivo":
fm.expandFontMapping(
[
"ChivoThin,thin",
"ChivoLight,light",
"Chivo,regular",
"ChivoMedium,medium",
],
"sans",
"sf",
"Chivo",
"scale",
"oldstyle",
)
elif font == "CrimsonPro":
fm.expandFontMapping(
[
"CrimsonPro",
"CrimsonProExtraLight,extralight",
"CrimsonProLight,light",
"CrimsonProMedium,medium",
],
"roman",
None,
"CrimsonPro",
None,
"lf",
"true",
)
elif font == "Fira":
fm.expandFontMapping(
[
"FiraSans",
"FiraSansBook,book",
"FiraSansThin,thin",
"FiraSansLight,light",
"FiraSansExtralight,extralight",
"FiraSansUltralight,ultralight",
],
"sans",
"sf",
"FiraSans",
"scaled",
"lf",
"true",
)
fm.expandFontMapping(
["FiraMono"], "typewriter", "tt", "FiraMono", "scaled", "lf", "true"
)
elif font == "libertinus":
fm.expandFontMapping(["libertinus,serif"], "roman", None, "libertinus", None, "osf")
fm.expandFontMapping(
["libertinusmath"], "math", None, "libertinust1math", None, None
)
return fm
def convert_fonts(document, fm, osfoption="osf"):
"""Handle font definition (LaTeX preamble -> native)"""
rpkg = re.compile(r"^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}")
rscaleopt = re.compile(r"^scaled?=(.*)")
# Check whether we go beyond font option feature introduction
haveFontOpts = document.end_format > 580
i = 0
while True:
i = find_re(document.preamble, rpkg, i + 1)
if i == -1:
return
mo = rpkg.search(document.preamble[i])
if mo == None or mo.group(2) == None:
options = []
else:
options = mo.group(2).replace(" ", "").split(",")
pkg = mo.group(3)
o = 0
oscale = 1
has_osf = False
while o < len(options):
if options[o] == osfoption:
has_osf = True
del options[o]
continue
mo = rscaleopt.search(options[o])
if mo == None:
o += 1
continue
oscale = mo.group(1)
del options[o]
continue
if pkg not in fm.pkginmap:
continue
# determine fontname
fn = None
if haveFontOpts:
# Try with name-option combination first
# (only one default option supported currently)
o = 0
while o < len(options):
opt = options[o]
fn = fm.getfontname(pkg, [opt])
if fn != None:
del options[o]
break
o += 1
continue
if fn == None:
fn = fm.getfontname(pkg, [])
else:
fn = fm.getfontname(pkg, options)
if fn == None:
continue
del document.preamble[i]
fontinfo = fm.font2pkgmap[fn]
if fontinfo.scaletype == None:
fontscale = None
else:
fontscale = "\\font_" + fontinfo.scaletype + "_scale"
fontinfo.scaleval = oscale
if (has_osf and fontinfo.osfdef == "false") or (
not has_osf and fontinfo.osfdef == "true"
):
if fontinfo.osfopt == None:
options.extend(osfoption)
continue
osf = find_token(document.header, "\\font_osf false")
osftag = "\\font_osf"
if osf == -1 and fontinfo.fonttype != "math":
# Try with newer format
osftag = "\\font_" + fontinfo.fonttype + "_osf"
osf = find_token(document.header, osftag + " false")
if osf != -1:
document.header[osf] = osftag + " true"
if i > 0 and document.preamble[i - 1] == "% Added by lyx2lyx":
del document.preamble[i - 1]
i -= 1
if fontscale != None:
j = find_token(document.header, fontscale, 0)
if j != -1:
val = get_value(document.header, fontscale, j)
vals = val.split()
scale = "100"
if oscale != None:
scale = "%03d" % int(float(oscale) * 100)
document.header[j] = fontscale + " " + scale + " " + vals[1]
ft = "\\font_" + fontinfo.fonttype
j = find_token(document.header, ft, 0)
if j != -1:
val = get_value(document.header, ft, j)
words = val.split() # ! splits also values like '"DejaVu Sans"'
words[0] = '"' + fn + '"'
document.header[j] = ft + " " + " ".join(words)
if haveFontOpts and fontinfo.fonttype != "math":
fotag = "\\font_" + fontinfo.fonttype + "_opts"
fo = find_token(document.header, fotag)
if fo != -1:
document.header[fo] = fotag + ' "' + ",".join(options) + '"'
else:
# Sensible place to insert tag
fo = find_token(document.header, "\\font_sf_scale")
if fo == -1:
document.warning("Malformed LyX document! Missing \\font_sf_scale")
else:
document.header.insert(fo, fotag + ' "' + ",".join(options) + '"')
def revert_fonts(document, fm, fontmap, OnlyWithXOpts=False, WithXOpts=False):
"""Revert native font definition to LaTeX"""
# fonlist := list of fonts created from the same package
# Empty package means that the font-name is the same as the package-name
# fontmap (key = package, val += found options) will be filled
# and used later in add_preamble_fonts() to be added to user-preamble
rfontscale = re.compile(r"^\s*(\\font_(roman|sans|typewriter|math))\s+")
rscales = re.compile(r"^\s*(\d+)\s+(\d+)")
i = 0
while i < len(document.header):
i = find_re(document.header, rfontscale, i + 1)
if i == -1:
return True
mo = rfontscale.search(document.header[i])
if mo == None:
continue
ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
val = get_value(document.header, ft, i)
words = val.split(" ") # ! splits also values like '"DejaVu Sans"'
font = words[0].strip('"') # TeX font name has no whitespace
if font not in fm.font2pkgmap:
continue
fontinfo = fm.font2pkgmap[font]
val = fontinfo.package
if val not in fontmap:
fontmap[val] = []
x = -1
if OnlyWithXOpts or WithXOpts:
if ft == "\\font_math":
return False
regexp = re.compile(r"^\s*(\\font_roman_opts)\s+")
if ft == "\\font_sans":
regexp = re.compile(r"^\s*(\\font_sans_opts)\s+")
elif ft == "\\font_typewriter":
regexp = re.compile(r"^\s*(\\font_typewriter_opts)\s+")
x = find_re(document.header, regexp, 0)
if x == -1 and OnlyWithXOpts:
return False
if x != -1:
# We need to use this regex since split() does not handle quote protection
xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
opts = xopts[1].strip('"').split(",")
fontmap[val].extend(opts)
del document.header[x]
words[0] = '"default"'
document.header[i] = ft + " " + " ".join(words)
if fontinfo.scaleopt != None:
xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
mo = rscales.search(xval)
if mo != None:
xval1 = mo.group(1)
if xval1 != "100":
# set correct scale option
fontmap[val].extend(
[fontinfo.scaleopt + "=" + format(float(xval1) / 100, ".2f")]
)
if fontinfo.osfopt != None:
oldval = "true"
if fontinfo.osfdef == "true":
oldval = "false"
osf = find_token(document.header, "\\font_osf " + oldval)
if osf == -1 and ft != "\\font_math":
# Try with newer format
osftag = "\\font_roman_osf " + oldval
if ft == "\\font_sans":
osftag = "\\font_sans_osf " + oldval
elif ft == "\\font_typewriter":
osftag = "\\font_typewriter_osf " + oldval
osf = find_token(document.header, osftag)
if osf != -1:
fontmap[val].extend([fontinfo.osfopt])
if len(fontinfo.options) > 0:
fontmap[val].extend(fontinfo.options)
return True
###############################################################################
###
### Conversion and reversion routines
###
###############################################################################
def convert_inputencoding_namechange(document):
"""Rename inputencoding settings."""
i = find_token(document.header, "\\inputencoding", 0)
if i == -1:
return
s = document.header[i].replace("auto", "auto-legacy")
document.header[i] = s.replace("default", "auto-legacy-plain")
def revert_inputencoding_namechange(document):
"""Rename inputencoding settings."""
i = find_token(document.header, "\\inputencoding", 0)
if i == -1:
return
s = document.header[i].replace("auto-legacy-plain", "default")
document.header[i] = s.replace("auto-legacy", "auto")
def convert_notoFonts(document):
"""Handle Noto fonts definition to LaTeX"""
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(["Noto"])
convert_fonts(document, fm)
def revert_notoFonts(document):
"""Revert native Noto font definition to LaTeX"""
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(["Noto"])
if revert_fonts(document, fm, fontmap):
add_preamble_fonts(document, fontmap)
def convert_latexFonts(document):
"""Handle DejaVu and IBMPlex fonts definition to LaTeX"""
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(["DejaVu", "IBM"])
convert_fonts(document, fm)
def revert_latexFonts(document):
"""Revert native DejaVu font definition to LaTeX"""
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(["DejaVu", "IBM"])
if revert_fonts(document, fm, fontmap):
add_preamble_fonts(document, fontmap)
def convert_AdobeFonts(document):
"""Handle Adobe Source fonts definition to LaTeX"""
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(["Adobe"])
convert_fonts(document, fm)
def revert_AdobeFonts(document):
"""Revert Adobe Source font definition to LaTeX"""
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(["Adobe"])
if revert_fonts(document, fm, fontmap):
add_preamble_fonts(document, fontmap)
def removeFrontMatterStyles(document):
"""Remove styles Begin/EndFrontmatter"""
layouts = ["BeginFrontmatter", "EndFrontmatter"]
tokenend = len("\\begin_layout ")
i = 0
while True:
i = find_token_exact(document.body, "\\begin_layout ", i + 1)
if i == -1:
return
layout = document.body[i][tokenend:].strip()
if layout not in layouts:
continue
j = find_end_of_layout(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
continue
while document.body[j + 1].strip() == "":
j += 1
document.body[i : j + 1] = []
def addFrontMatterStyles(document):
"""Use styles Begin/EndFrontmatter for elsarticle"""
if document.textclass != "elsarticle":
return
def insertFrontmatter(prefix, line):
above = line
while above > 0 and document.body[above - 1].strip() == "":
above -= 1
below = line
while document.body[below].strip() == "":
below += 1
document.body[above:below] = [
"",
"\\begin_layout " + prefix + "Frontmatter",
"\\begin_inset Note Note",
"status open",
"",
"\\begin_layout Plain Layout",
"Keep this empty!",
"\\end_layout",
"",
"\\end_inset",
"",
"",
"\\end_layout",
"",
]
layouts = [
"Title",
"Title footnote",
"Author",
"Author footnote",
"Corresponding author",
"Address",
"Email",
"Abstract",
"Keywords",
]
tokenend = len("\\begin_layout ")
first = -1
i = 0
while True:
i = find_token_exact(document.body, "\\begin_layout ", i + 1)
if i == -1:
break
layout = document.body[i][tokenend:].strip()
if layout not in layouts:
continue
k = find_end_of_layout(document.body, i)
if k == -1:
document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
continue
if first == -1:
first = i
i = k
if first == -1:
return
insertFrontmatter("End", k + 1)
insertFrontmatter("Begin", first)
def convert_lst_literalparam(document):
"""Add param literal to include inset"""
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset include", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of command inset at line %d" % i
)
continue
while i < j and document.body[i].strip() != "":
i += 1
document.body.insert(i, 'literal "true"')
def revert_lst_literalparam(document):
"""Remove param literal from include inset"""
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset include", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of include inset at line %d" % i
)
continue
del_token(document.body, "literal", i, j)
def revert_paratype(document):
"""Revert ParaType font definitions to LaTeX"""
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
i1 = find_token(document.header, '\\font_roman "PTSerif-TLF"', 0)
i2 = find_token(document.header, '\\font_sans "default"', 0)
i3 = find_token(document.header, '\\font_typewriter "default"', 0)
j = find_token(document.header, '\\font_sans "PTSans-TLF"', 0)
sf_scale = 100.0
sfval = find_token(document.header, "\\font_sf_scale", 0)
if sfval == -1:
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
else:
sfscale = document.header[sfval].split()
val = sfscale[1]
sfscale[1] = "100"
document.header[sfval] = " ".join(sfscale)
try:
# float() can throw
sf_scale = float(val)
except:
document.warning("Invalid font_sf_scale value: " + val)
sfoption = ""
if sf_scale != "100.0":
sfoption = "scaled=" + str(sf_scale / 100.0)
k = find_token(document.header, '\\font_typewriter "PTMono-TLF"', 0)
ttval = get_value(document.header, "\\font_tt_scale", 0)
# cutoff " 100"
ttval = ttval[:-4]
ttoption = ""
if ttval != "100":
ttoption = "scaled=" + format(float(ttval) / 100, ".2f")
if i1 != -1 and i2 != -1 and i3 != -1:
add_to_preamble(document, ["\\usepackage{paratype}"])
else:
if i1 != -1:
add_to_preamble(document, ["\\usepackage{PTSerif}"])
document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
if j != -1:
if sfoption != "":
add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
else:
add_to_preamble(document, ["\\usepackage{PTSans}"])
document.header[j] = document.header[j].replace("PTSans-TLF", "default")
if k != -1:
if ttoption != "":
add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
else:
add_to_preamble(document, ["\\usepackage{PTMono}"])
document.header[k] = document.header[k].replace("PTMono-TLF", "default")
def revert_xcharter(document):
"""Revert XCharter font definitions to LaTeX"""
i = find_token(document.header, '\\font_roman "xcharter"', 0)
if i == -1:
return
# replace unsupported font setting
document.header[i] = document.header[i].replace("xcharter", "default")
# no need for preamble code with system fonts
if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
# transfer old style figures setting to package options
j = find_token(document.header, "\\font_osf true")
if j != -1:
options = "[osf]"
document.header[j] = "\\font_osf false"
else:
options = ""
if i != -1:
add_to_preamble(document, ["\\usepackage%s{XCharter}" % options])
def revert_lscape(document):
"""Reverts the landscape environment (Landscape module) to TeX-code"""
if "landscape" not in document.get_module_list():
return
i = 0
while True:
i = find_token(document.body, "\\begin_inset Flex Landscape", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Landscape inset")
continue
if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
add_to_preamble(document, ["\\usepackage{afterpage}"])
else:
document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
add_to_preamble(document, ["\\usepackage{pdflscape}"])
document.del_module("landscape")
def convert_fontenc(document):
"""Convert default fontenc setting"""
i = find_token(document.header, "\\fontencoding global", 0)
if i == -1:
return
document.header[i] = document.header[i].replace("global", "auto")
def revert_fontenc(document):
"""Revert default fontenc setting"""
i = find_token(document.header, "\\fontencoding auto", 0)
if i == -1:
return
document.header[i] = document.header[i].replace("auto", "global")
def revert_nospellcheck(document):
"""Remove nospellcheck font info param"""
i = 0
while True:
i = find_token(document.body, "\\nospellcheck", i)
if i == -1:
return
del document.body[i]
def revert_floatpclass(document):
"""Remove float placement params 'document' and 'class'"""
del_token(document.header, "\\float_placement class")
i = 0
while True:
i = find_token(document.body, "\\begin_inset Float", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
k = find_token(document.body, "placement class", i, j)
if k == -1:
k = find_token(document.body, "placement document", i, j)
if k != -1:
del document.body[k]
continue
del document.body[k]
def revert_floatalignment(document):
"""Remove float alignment params"""
galignment = get_value(document.header, "\\float_alignment", delete=True)
i = 0
while True:
i = find_token(document.body, "\\begin_inset Float", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of inset at line " + str(i)
)
continue
k = find_token(document.body, "alignment", i, j)
if k == -1:
i = j
continue
alignment = get_value(document.body, "alignment", k)
if alignment == "document":
alignment = galignment
del document.body[k]
l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
if l == -1:
document.warning("Can't find float layout!")
continue
alcmd = []
if alignment == "left":
alcmd = put_cmd_in_ert("\\raggedright{}")
elif alignment == "center":
alcmd = put_cmd_in_ert("\\centering{}")
elif alignment == "right":
alcmd = put_cmd_in_ert("\\raggedleft{}")
if len(alcmd) > 0:
document.body[l + 1 : l + 1] = alcmd
# There might be subfloats, so we do not want to move past
# the end of the inset.
i += 1
def revert_tuftecite(document):
r"""Revert \cite commands in tufte classes"""
tufte = ["tufte-book", "tufte-handout"]
if document.textclass not in tufte:
return
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset citation", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of citation inset at line %d!!" % (i))
continue
k = find_token(document.body, "LatexCommand", i, j)
if k == -1:
document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
i = j
continue
cmd = get_value(document.body, "LatexCommand", k)
if cmd != "cite":
i = j
continue
pre = get_quoted_value(document.body, "before", i, j)
post = get_quoted_value(document.body, "after", i, j)
key = get_quoted_value(document.body, "key", i, j)
if not key:
document.warning("Citation inset at line %d does not have a key!" % (i))
key = "???"
# Replace command with ERT
res = "\\cite"
if pre:
res += "[" + pre + "]"
if post:
res += "[" + post + "]"
elif pre:
res += "[]"
res += "{" + key + "}"
document.body[i : j + 1] = put_cmd_in_ert([res])
i = j
def revert_stretchcolumn(document):
"""We remove the column varwidth flags or everything else will become a mess."""
i = 0
while True:
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of tabular.")
continue
for k in range(i, j):
if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
document.body[k] = document.body[k].replace(' varwidth="true"', "")
def revert_vcolumns(document):
"""Revert standard columns with line breaks etc."""
i = 0
needvarwidth = False
needarray = False
try:
while True:
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Could not find end of tabular.")
continue
# Collect necessary column information
m = i + 1
nrows = int(document.body[i + 1].split('"')[3])
ncols = int(document.body[i + 1].split('"')[5])
col_info = []
for k in range(ncols):
m = find_token(document.body, "<column", m)
width = get_option_value(document.body[m], "width")
varwidth = get_option_value(document.body[m], "varwidth")
alignment = get_option_value(document.body[m], "alignment")
special = get_option_value(document.body[m], "special")
col_info.append([width, varwidth, alignment, special, m])
# Now parse cells
m = i + 1
lines = []
for row in range(nrows):
for col in range(ncols):
m = find_token(document.body, "<cell", m)
multicolumn = get_option_value(document.body[m], "multicolumn")
multirow = get_option_value(document.body[m], "multirow")
width = get_option_value(document.body[m], "width")
rotate = get_option_value(document.body[m], "rotate")
# Check for: linebreaks, multipars, non-standard environments
begcell = m
endcell = find_token(document.body, "</cell>", begcell)
vcand = False
if (
find_token(document.body, "\\begin_inset Newline", begcell, endcell)
!= -1
):
vcand = True
elif count_pars_in_inset(document.body, begcell + 2) > 1:
vcand = True
elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
vcand = True
if (
vcand
and rotate == ""
and ((multicolumn == "" and multirow == "") or width == "")
):
if (
col_info[col][0] == ""
and col_info[col][1] == ""
and col_info[col][3] == ""
):
needvarwidth = True
alignment = col_info[col][2]
col_line = col_info[col][4]
vval = ""
if alignment == "center":
vval = ">{\\centering}"
elif alignment == "left":
vval = ">{\\raggedright}"
elif alignment == "right":
vval = ">{\\raggedleft}"
if vval != "":
needarray = True
vval += "V{\\linewidth}"
document.body[col_line] = (
document.body[col_line][:-1] + ' special="' + vval + '">'
)
# ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
# with newlines, and we do not want that)
while True:
endcell = find_token(document.body, "</cell>", begcell)
linebreak = False
nl = find_token(
document.body,
"\\begin_inset Newline newline",
begcell,
endcell,
)
if nl == -1:
nl = find_token(
document.body,
"\\begin_inset Newline linebreak",
begcell,
endcell,
)
if nl == -1:
break
linebreak = True
nle = find_end_of_inset(document.body, nl)
del document.body[nle : nle + 1]
if linebreak:
document.body[nl : nl + 1] = put_cmd_in_ert("\\linebreak{}")
else:
document.body[nl : nl + 1] = put_cmd_in_ert("\\\\")
m += 1
i = j
finally:
if needarray == True:
add_to_preamble(document, ["\\usepackage{array}"])
if needvarwidth == True:
add_to_preamble(document, ["\\usepackage{varwidth}"])
def revert_bibencoding(document):
"""Revert bibliography encoding"""
# Get cite engine
engine = "basic"
i = find_token(document.header, "\\cite_engine", 0)
if i == -1:
document.warning("Malformed document! Missing \\cite_engine")
else:
engine = get_value(document.header, "\\cite_engine", i)
# Check if biblatex
biblatex = False
if engine in ["biblatex", "biblatex-natbib"]:
biblatex = True
# Map lyx to latex encoding names
encodings = {
"utf8": "utf8",
"utf8x": "utf8x",
"armscii8": "armscii8",
"iso8859-1": "latin1",
"iso8859-2": "latin2",
"iso8859-3": "latin3",
"iso8859-4": "latin4",
"iso8859-5": "iso88595",
"iso8859-6": "8859-6",
"iso8859-7": "iso-8859-7",
"iso8859-8": "8859-8",
"iso8859-9": "latin5",
"iso8859-13": "latin7",
"iso8859-15": "latin9",
"iso8859-16": "latin10",
"applemac": "applemac",
"cp437": "cp437",
"cp437de": "cp437de",
"cp850": "cp850",
"cp852": "cp852",
"cp855": "cp855",
"cp858": "cp858",
"cp862": "cp862",
"cp865": "cp865",
"cp866": "cp866",
"cp1250": "cp1250",
"cp1251": "cp1251",
"cp1252": "cp1252",
"cp1255": "cp1255",
"cp1256": "cp1256",
"cp1257": "cp1257",
"koi8-r": "koi8-r",
"koi8-u": "koi8-u",
"pt154": "pt154",
"utf8-platex": "utf8",
"ascii": "ascii",
}
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of bibtex inset at line %d!!" % (i))
continue
encoding = get_quoted_value(document.body, "encoding", i, j)
if not encoding:
continue
# remove encoding line
k = find_token(document.body, "encoding", i, j)
if k != -1:
del document.body[k]
if encoding == "default":
continue
# Re-find inset end line
j = find_end_of_inset(document.body, i)
if biblatex:
biblio_options = ""
h = find_token(document.header, "\\biblio_options", 0)
if h != -1:
biblio_options = get_value(document.header, "\\biblio_options", h)
if "bibencoding" not in biblio_options:
document.header[h] += ",bibencoding=%s" % encodings[encoding]
else:
bs = find_token(document.header, "\\biblatex_bibstyle", 0)
if bs == -1:
# this should not happen
document.warning(
"Malformed LyX document! No \\biblatex_bibstyle header found!"
)
else:
document.header[bs - 1 : bs - 1] = [
"\\biblio_options bibencoding=" + encodings[encoding]
]
else:
document.body[j + 1 : j + 1] = put_cmd_in_ert("\\egroup")
document.body[i:i] = put_cmd_in_ert(
"\\bgroup\\inputencoding{" + encodings[encoding] + "}"
)
i = j
def convert_vcsinfo(document):
"""Separate vcs Info inset from buffer Info inset."""
types = {
"vcs-revision": "revision",
"vcs-tree-revision": "tree-revision",
"vcs-author": "author",
"vcs-time": "time",
"vcs-date": "date",
}
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
continue
tp = find_token(document.body, "type", i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv != "buffer":
continue
arg = find_token(document.body, "arg", i, j)
argv = get_quoted_value(document.body, "arg", arg)
if argv not in list(types.keys()):
continue
document.body[tp] = 'type "vcs"'
document.body[arg] = 'arg "' + types[argv] + '"'
def revert_vcsinfo(document):
"""Merge vcs Info inset to buffer Info inset."""
args = ["revision", "tree-revision", "author", "time", "date"]
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
continue
tp = find_token(document.body, "type", i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv != "vcs":
continue
arg = find_token(document.body, "arg", i, j)
argv = get_quoted_value(document.body, "arg", arg)
if argv not in args:
document.warning("Malformed Info inset. Invalid vcs arg.")
continue
document.body[tp] = 'type "buffer"'
document.body[arg] = 'arg "vcs-' + argv + '"'
def revert_vcsinfo_rev_abbrev(document):
"Convert abbreviated revisions to regular revisions."
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
continue
tp = find_token(document.body, "type", i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv != "vcs":
continue
arg = find_token(document.body, "arg", i, j)
argv = get_quoted_value(document.body, "arg", arg)
if argv == "revision-abbrev":
document.body[arg] = 'arg "revision"'
def revert_dateinfo(document):
"""Revert date info insets to static text."""
# FIXME This currently only considers the main language and uses the system locale
# Ideally, it should honor context languages and switch the locale accordingly.
# The date formats for each language using strftime syntax:
# long, short, loclong, locmedium, locshort
dateformats = {
"afrikaans": ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
"albanian": ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"american": ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"amharic": ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"ancientgreek": [
"%A, %d %B %Y",
"%d %b %Y",
"%d %B %Y",
"%d %b %Y",
"%d/%m/%Y",
],
"arabic_arabi": [
"%%d %%Y",
"%d/%m/%Y",
"%d %%Y",
"%d/%m/%Y",
"%d/%m/%Y",
],
"arabic_arabtex": [
"%%d %%Y",
"%d/%m/%Y",
"%d %%Y",
"%d/%m/%Y",
"%d/%m/%Y",
],
"armenian": [
"%Y թ. %B %d, %A",
"%d.%m.%y",
"%d %%Y",
"%d %%Y",
"%d/%m/%Y",
],
"asturian": [
"%A, %d %B de %Y",
"%d/%m/%y",
"%d de %B de %Y",
"%d %b %Y",
"%d/%m/%Y",
],
"australian": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"austrian": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"bahasa": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"bahasam": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"basque": ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
"belarusian": [
"%A, %d %B %Y г.",
"%d.%m.%y",
"%d %B %Y",
"%d %b %Y",
"%d.%m.%Y",
],
"bosnian": [
"%A, %d. %B %Y.",
"%d.%m.%y.",
"%d. %B %Y",
"%d. %b %Y",
"%Y-%m-%d",
],
"brazilian": [
"%A, %d de %B de %Y",
"%d/%m/%Y",
"%d de %B de %Y",
"%d de %b de %Y",
"%d/%m/%Y",
],
"breton": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
"british": ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"bulgarian": [
"%A, %d %B %Y г.",
"%d.%m.%y г.",
"%d %B %Y",
"%d %b %Y",
"%Y-%m-%d",
],
"canadian": ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
"canadien": ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
"catalan": [
"%A, %d %B de %Y",
"%d/%m/%y",
"%d / %B / %Y",
"%d / %b / %Y",
"%d/%m/%Y",
],
"chinese-simplified": [
"%Y年%m月%d%A",
"%Y/%m/%d",
"%Y年%m月%d",
"%Y-%m-%d",
"%y-%m-%d",
],
"chinese-traditional": [
"%Y年%m月%d%A",
"%Y/%m/%d",
"%Y年%m月%d",
"%Y年%m月%d",
"%y年%m月%d",
],
"coptic": ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"croatian": [
"%A, %d. %B %Y.",
"%d. %m. %Y.",
"%d. %B %Y.",
"%d. %b. %Y.",
"%d.%m.%Y.",
],
"czech": ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
"danish": [
"%A den %d. %B %Y",
"%d/%m/%Y",
"%d. %B %Y",
"%d. %b %Y",
"%d/%m/%Y",
],
"divehi": ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
"dutch": ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"english": ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"esperanto": [
"%A, %d %B %Y",
"%d %b %Y",
"la %d de %B %Y",
"la %d de %b %Y",
"%m/%d/%Y",
],
"estonian": ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"farsi": ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
"finnish": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"french": ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"friulan": [
"%A %d di %B dal %Y",
"%d/%m/%y",
"%d di %B dal %Y",
"%d di %b dal %Y",
"%d/%m/%Y",
],
"galician": [
"%A, %d de %B de %Y",
"%d/%m/%y",
"%d de %B de %Y",
"%d de %b de %Y",
"%d/%m/%Y",
],
"georgian": ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"german": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"german-ch": [
"%A, %d. %B %Y",
"%d.%m.%y",
"%d. %B %Y",
"%d. %b %Y",
"%d.%m.%Y",
],
"german-ch-old": [
"%A, %d. %B %Y",
"%d.%m.%y",
"%d. %B %Y",
"%d. %b %Y",
"%d.%m.%Y",
],
"greek": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"hebrew": ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"hindi": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"icelandic": [
"%A, %d. %B %Y",
"%d.%m.%Y",
"%d. %B %Y",
"%d. %b %Y",
"%d.%m.%Y",
],
"interlingua": [
"%Y %B %d, %A",
"%Y-%m-%d",
"le %d de %B %Y",
"le %d de %b %Y",
"%Y-%m-%d",
],
"irish": ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
"italian": ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
"japanese": [
"%Y年%m月%d%A",
"%Y/%m/%d",
"%Y年%m月%d",
"%Y/%m/%d",
"%y/%m/%d",
],
"japanese-cjk": [
"%Y年%m月%d%A",
"%Y/%m/%d",
"%Y年%m月%d",
"%Y/%m/%d",
"%y/%m/%d",
],
"kannada": ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
"kazakh": ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
"khmer": ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
"korean": [
"%Y년 %m월 %d%A",
"%y. %m. %d.",
"%Y년 %m월 %d",
"%Y. %m. %d.",
"%y. %m. %d.",
],
"kurmanji": ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
"lao": ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
"latin": ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"latvian": [
"%A, %Y. gada %d. %B",
"%d.%m.%y",
"%Y. gada %d. %B",
"%Y. gada %d. %b",
"%d.%m.%Y",
],
"lithuanian": [
"%Y m. %B %d d., %A",
"%Y-%m-%d",
"%Y m. %B %d d.",
"%Y m. %B %d d.",
"%Y-%m-%d",
],
"lowersorbian": [
"%A, %d. %B %Y",
"%d.%m.%y",
"%d %B %Y",
"%d %b %Y",
"%d.%m.%Y",
],
"macedonian": ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"magyar": [
"%Y. %B %d., %A",
"%Y. %m. %d.",
"%Y. %B %d.",
"%Y. %b %d.",
"%Y.%m.%d.",
],
"malayalam": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"marathi": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"mongolian": [
"%A, %Y оны %m сарын %d",
"%Y-%m-%d",
"%Y оны %m сарын %d",
"%d-%m-%Y",
"%d-%m-%Y",
],
"naustrian": [
"%A, %d. %B %Y",
"%d.%m.%y",
"%d. %B %Y",
"%d. %b %Y",
"%d.%m.%Y",
],
"newzealand": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"ngerman": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"norsk": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"nynorsk": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"occitan": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"piedmontese": [
"%A, %d %B %Y",
"%d %b %Y",
"%B %d, %Y",
"%b %d, %Y",
"%m/%d/%Y",
],
"polish": ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
"polutonikogreek": [
"%A, %d %B %Y",
"%d/%m/%y",
"%d %B %Y",
"%d %b %Y",
"%d/%m/%Y",
],
"portuguese": [
"%A, %d de %B de %Y",
"%d/%m/%y",
"%d de %B de %Y",
"%d de %b de %Y",
"%Y/%m/%d",
],
"romanian": ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"romansh": [
"%A, ils %d da %B %Y",
"%d-%m-%y",
"%d %B %Y",
"%d %b %Y",
"%d.%m.%Y",
],
"russian": [
"%A, %d %B %Y г.",
"%d.%m.%Y",
"%d %B %Y г.",
"%d %b %Y г.",
"%d.%m.%Y",
],
"samin": [
"%Y %B %d, %A",
"%Y-%m-%d",
"%B %d. b. %Y",
"%b %d. b. %Y",
"%d.%m.%Y",
],
"sanskrit": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"scottish": ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"serbian": [
"%A, %d. %B %Y.",
"%d.%m.%y.",
"%d. %B %Y",
"%d. %b %Y",
"%d.%m.%Y",
],
"serbian-latin": [
"%A, %d. %B %Y.",
"%d.%m.%y.",
"%d. %B %Y",
"%d. %b %Y",
"%d.%m.%Y",
],
"slovak": ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"slovene": [
"%A, %d. %B %Y",
"%d. %m. %y",
"%d. %B %Y",
"%d. %b %Y",
"%d.%m.%Y",
],
"spanish": [
"%A, %d de %B de %Y",
"%d/%m/%y",
"%d de %B %de %Y",
"%d %b %Y",
"%d/%m/%Y",
],
"spanish-mexico": [
"%A, %d de %B %de %Y",
"%d/%m/%y",
"%d de %B de %Y",
"%d %b %Y",
"%d/%m/%Y",
],
"swedish": ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
"syriac": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"tamil": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"telugu": ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"thai": ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"tibetan": [
"%Y %Bའི་ཚེས་%d, %A",
"%Y-%m-%d",
"%B %d, %Y",
"%b %d, %Y",
"%m/%d/%Y",
],
"turkish": ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
"turkmen": [
"%d %B %Y %A",
"%d.%m.%Y",
"%Y ý. %B %d",
"%d.%m.%Y ý.",
"%d.%m.%y ý.",
],
"ukrainian": [
"%A, %d %B %Y р.",
"%d.%m.%y",
"%d %B %Y",
"%d %m %Y",
"%d.%m.%Y",
],
"uppersorbian": [
"%A, %d. %B %Y",
"%d.%m.%y",
"%d %B %Y",
"%d %b %Y",
"%d.%m.%Y",
],
"urdu": ["%%d %%Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
"vietnamese": [
"%A, %d %B, %Y",
"%d/%m/%Y",
"%d tháng %B %Y",
"%d-%m-%Y",
"%d/%m/%Y",
],
"welsh": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
}
types = ["date", "fixdate", "moddate"]
lang = get_value(document.header, "\\language")
if lang == "":
document.warning("Malformed LyX document! No \\language header found!")
return
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
continue
tp = find_token(document.body, "type", i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv not in types:
continue
arg = find_token(document.body, "arg", i, j)
argv = get_quoted_value(document.body, "arg", arg)
isodate = ""
dte = date.today()
if tpv == "fixdate":
datecomps = argv.split("@")
if len(datecomps) > 1:
argv = datecomps[0]
isodate = datecomps[1]
m = re.search(r"(\d\d\d\d)-(\d\d)-(\d\d)", isodate)
if m:
dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
# FIXME if we had the path to the original document (not the one in the tmp dir),
# we could use the mtime.
# elif tpv == "moddate":
# dte = date.fromtimestamp(os.path.getmtime(document.dir))
result = ""
if argv == "ISO":
result = dte.isodate()
elif argv == "long":
result = dte.strftime(dateformats[lang][0])
elif argv == "short":
result = dte.strftime(dateformats[lang][1])
elif argv == "loclong":
result = dte.strftime(dateformats[lang][2])
elif argv == "locmedium":
result = dte.strftime(dateformats[lang][3])
elif argv == "locshort":
result = dte.strftime(dateformats[lang][4])
else:
fmt = (
argv.replace("MMMM", "%b")
.replace("MMM", "%b")
.replace("MM", "%m")
.replace("M", "%m")
)
fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
fmt = re.sub("[^'%]d", "%d", fmt)
fmt = fmt.replace("'", "")
result = dte.strftime(fmt)
document.body[i : j + 1] = [result]
def revert_timeinfo(document):
"""Revert time info insets to static text."""
# FIXME This currently only considers the main language and uses the system locale
# Ideally, it should honor context languages and switch the locale accordingly.
# Also, the time object is "naive", i.e., it does not know of timezones (%Z will
# be empty).
# The time formats for each language using strftime syntax:
# long, short
timeformats = {
"afrikaans": ["%H:%M:%S %Z", "%H:%M"],
"albanian": ["%I:%M:%S %p, %Z", "%I:%M %p"],
"american": ["%I:%M:%S %p %Z", "%I:%M %p"],
"amharic": ["%I:%M:%S %p %Z", "%I:%M %p"],
"ancientgreek": ["%H:%M:%S %Z", "%H:%M:%S"],
"arabic_arabi": ["%I:%M:%S %p %Z", "%I:%M %p"],
"arabic_arabtex": ["%I:%M:%S %p %Z", "%I:%M %p"],
"armenian": ["%H:%M:%S %Z", "%H:%M"],
"asturian": ["%H:%M:%S %Z", "%H:%M"],
"australian": ["%I:%M:%S %p %Z", "%I:%M %p"],
"austrian": ["%H:%M:%S %Z", "%H:%M"],
"bahasa": ["%H.%M.%S %Z", "%H.%M"],
"bahasam": ["%I:%M:%S %p %Z", "%I:%M %p"],
"basque": ["%H:%M:%S (%Z)", "%H:%M"],
"belarusian": ["%H:%M:%S, %Z", "%H:%M"],
"bosnian": ["%H:%M:%S %Z", "%H:%M"],
"brazilian": ["%H:%M:%S %Z", "%H:%M"],
"breton": ["%H:%M:%S %Z", "%H:%M"],
"british": ["%H:%M:%S %Z", "%H:%M"],
"bulgarian": ["%H:%M:%S %Z", "%H:%M"],
"canadian": ["%I:%M:%S %p %Z", "%I:%M %p"],
"canadien": ["%H:%M:%S %Z", "%H h %M"],
"catalan": ["%H:%M:%S %Z", "%H:%M"],
"chinese-simplified": ["%Z %p%I:%M:%S", "%p%I:%M"],
"chinese-traditional": ["%p%I:%M:%S [%Z]", "%p%I:%M"],
"coptic": ["%H:%M:%S %Z", "%H:%M:%S"],
"croatian": ["%H:%M:%S (%Z)", "%H:%M"],
"czech": ["%H:%M:%S %Z", "%H:%M"],
"danish": ["%H.%M.%S %Z", "%H.%M"],
"divehi": ["%H:%M:%S %Z", "%H:%M"],
"dutch": ["%H:%M:%S %Z", "%H:%M"],
"english": ["%I:%M:%S %p %Z", "%I:%M %p"],
"esperanto": ["%H:%M:%S %Z", "%H:%M:%S"],
"estonian": ["%H:%M:%S %Z", "%H:%M"],
"farsi": ["%H:%M:%S (%Z)", "%H:%M"],
"finnish": ["%H.%M.%S %Z", "%H.%M"],
"french": ["%H:%M:%S %Z", "%H:%M"],
"friulan": ["%H:%M:%S %Z", "%H:%M"],
"galician": ["%H:%M:%S %Z", "%H:%M"],
"georgian": ["%H:%M:%S %Z", "%H:%M"],
"german": ["%H:%M:%S %Z", "%H:%M"],
"german-ch": ["%H:%M:%S %Z", "%H:%M"],
"german-ch-old": ["%H:%M:%S %Z", "%H:%M"],
"greek": ["%I:%M:%S %p %Z", "%I:%M %p"],
"hebrew": ["%H:%M:%S %Z", "%H:%M"],
"hindi": ["%I:%M:%S %p %Z", "%I:%M %p"],
"icelandic": ["%H:%M:%S %Z", "%H:%M"],
"interlingua": ["%H:%M:%S %Z", "%H:%M"],
"irish": ["%H:%M:%S %Z", "%H:%M"],
"italian": ["%H:%M:%S %Z", "%H:%M"],
"japanese": ["%H時%M分%S秒 %Z", "%H:%M"],
"japanese-cjk": ["%H時%M分%S秒 %Z", "%H:%M"],
"kannada": ["%I:%M:%S %p %Z", "%I:%M %p"],
"kazakh": ["%H:%M:%S %Z", "%H:%M"],
"khmer": ["%I:%M:%S %p %Z", "%I:%M %p"],
"korean": ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
"kurmanji": ["%H:%M:%S %Z", "%H:%M:%S"],
"lao": ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
"latin": ["%H:%M:%S %Z", "%H:%M:%S"],
"latvian": ["%H:%M:%S %Z", "%H:%M"],
"lithuanian": ["%H:%M:%S %Z", "%H:%M"],
"lowersorbian": ["%H:%M:%S %Z", "%H:%M"],
"macedonian": ["%H:%M:%S %Z", "%H:%M"],
"magyar": ["%H:%M:%S %Z", "%H:%M"],
"malayalam": ["%p %I:%M:%S %Z", "%p %I:%M"],
"marathi": ["%I:%M:%S %p %Z", "%I:%M %p"],
"mongolian": ["%H:%M:%S %Z", "%H:%M"],
"naustrian": ["%H:%M:%S %Z", "%H:%M"],
"newzealand": ["%I:%M:%S %p %Z", "%I:%M %p"],
"ngerman": ["%H:%M:%S %Z", "%H:%M"],
"norsk": ["%H:%M:%S %Z", "%H:%M"],
"nynorsk": ["kl. %H:%M:%S %Z", "%H:%M"],
"occitan": ["%H:%M:%S %Z", "%H:%M"],
"piedmontese": ["%H:%M:%S %Z", "%H:%M:%S"],
"polish": ["%H:%M:%S %Z", "%H:%M"],
"polutonikogreek": ["%I:%M:%S %p %Z", "%I:%M %p"],
"portuguese": ["%H:%M:%S %Z", "%H:%M"],
"romanian": ["%H:%M:%S %Z", "%H:%M"],
"romansh": ["%H:%M:%S %Z", "%H:%M"],
"russian": ["%H:%M:%S %Z", "%H:%M"],
"samin": ["%H:%M:%S %Z", "%H:%M"],
"sanskrit": ["%H:%M:%S %Z", "%H:%M"],
"scottish": ["%H:%M:%S %Z", "%H:%M"],
"serbian": ["%H:%M:%S %Z", "%H:%M"],
"serbian-latin": ["%H:%M:%S %Z", "%H:%M"],
"slovak": ["%H:%M:%S %Z", "%H:%M"],
"slovene": ["%H:%M:%S %Z", "%H:%M"],
"spanish": ["%H:%M:%S (%Z)", "%H:%M"],
"spanish-mexico": ["%H:%M:%S %Z", "%H:%M"],
"swedish": ["kl. %H:%M:%S %Z", "%H:%M"],
"syriac": ["%H:%M:%S %Z", "%H:%M"],
"tamil": ["%p %I:%M:%S %Z", "%p %I:%M"],
"telugu": ["%I:%M:%S %p %Z", "%I:%M %p"],
"thai": ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
"tibetan": ["%I:%M:%S %p %Z", "%I:%M %p"],
"turkish": ["%H:%M:%S %Z", "%H:%M"],
"turkmen": ["%H:%M:%S %Z", "%H:%M"],
"ukrainian": ["%H:%M:%S %Z", "%H:%M"],
"uppersorbian": ["%H:%M:%S %Z", "%H:%M hodź."],
"urdu": ["%I:%M:%S %p %Z", "%I:%M %p"],
"vietnamese": ["%H:%M:%S %Z", "%H:%M"],
"welsh": ["%H:%M:%S %Z", "%H:%M"],
}
types = ["time", "fixtime", "modtime"]
i = find_token(document.header, "\\language", 0)
if i == -1:
# this should not happen
document.warning("Malformed LyX document! No \\language header found!")
return
lang = get_value(document.header, "\\language", i)
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
continue
tp = find_token(document.body, "type", i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv not in types:
continue
arg = find_token(document.body, "arg", i, j)
argv = get_quoted_value(document.body, "arg", arg)
isotime = ""
dtme = datetime.now()
tme = dtme.time()
if tpv == "fixtime":
timecomps = argv.split("@")
if len(timecomps) > 1:
argv = timecomps[0]
isotime = timecomps[1]
m = re.search(r"(\d\d):(\d\d):(\d\d)", isotime)
if m:
tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
else:
m = re.search(r"(\d\d):(\d\d)", isotime)
if m:
tme = time(int(m.group(1)), int(m.group(2)))
# FIXME if we had the path to the original document (not the one in the tmp dir),
# we could use the mtime.
# elif tpv == "moddate":
# dte = date.fromtimestamp(os.path.getmtime(document.dir))
result = ""
if argv == "ISO":
result = tme.isoformat()
elif argv == "long":
result = tme.strftime(timeformats[lang][0])
elif argv == "short":
result = tme.strftime(timeformats[lang][1])
else:
fmt = (
argv.replace("HH", "%H")
.replace("H", "%H")
.replace("hh", "%I")
.replace("h", "%I")
)
fmt = (
fmt.replace("mm", "%M")
.replace("m", "%M")
.replace("ss", "%S")
.replace("s", "%S")
)
fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
fmt = (
fmt.replace("AP", "%p")
.replace("ap", "%p")
.replace("A", "%p")
.replace("a", "%p")
)
fmt = fmt.replace("'", "")
result = dte.strftime(fmt)
document.body[i : j + 1] = result
def revert_namenoextinfo(document):
"""Merge buffer Info inset type name-noext to name."""
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
continue
tp = find_token(document.body, "type", i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv != "buffer":
continue
arg = find_token(document.body, "arg", i, j)
argv = get_quoted_value(document.body, "arg", arg)
if argv != "name-noext":
continue
document.body[arg] = 'arg "name"'
def revert_l7ninfo(document):
"""Revert l7n Info inset to text."""
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
continue
tp = find_token(document.body, "type", i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv != "l7n":
continue
arg = find_token(document.body, "arg", i, j)
argv = get_quoted_value(document.body, "arg", arg)
# remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
argv = (
argv.rstrip(":")
.split("|")[0]
.replace(" & ", "</amp;>")
.replace("&", "")
.replace("</amp;>", " & ")
)
document.body[i : j + 1] = argv
def revert_listpargs(document):
"""Reverts listpreamble arguments to TeX-code"""
i = 0
while True:
i = find_token(document.body, "\\begin_inset Argument listpreamble:", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i)
# Find containing paragraph layout
parent = get_containing_layout(document.body, i)
if parent == False:
document.warning("Malformed LyX document: Can't find parent paragraph layout")
continue
parbeg = parent[3]
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
endPlain = find_end_of_layout(document.body, beginPlain)
content = document.body[beginPlain + 1 : endPlain]
del document.body[i : j + 1]
subst = (
[
"\\begin_inset ERT",
"status collapsed",
"",
"\\begin_layout Plain Layout",
"{",
]
+ content
+ ["}", "\\end_layout", "", "\\end_inset", ""]
)
document.body[parbeg:parbeg] = subst
def revert_lformatinfo(document):
"""Revert layout format Info inset to text."""
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
continue
tp = find_token(document.body, "type", i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv != "lyxinfo":
continue
arg = find_token(document.body, "arg", i, j)
argv = get_quoted_value(document.body, "arg", arg)
if argv != "layoutformat":
continue
# hardcoded for now
document.body[i : j + 1] = "69"
def convert_hebrew_parentheses(document):
"""Swap opening/closing parentheses in Hebrew text.
Up to LyX 2.4, "(" was used as closing parenthesis and
")" as opening parenthesis for Hebrew in the LyX source.
"""
current_languages = [document.language]
current_layouts = []
current_insets = []
# pass thru argument insets
skip_layouts_arguments = {}
skip_insets_arguments = {}
# pass thru insets
skip_insets = ["Formula", "ERT", "listings", "Flex URL"]
# pass thru insets per document class
if document.textclass in [
"beamer",
"scrarticle-beamer",
"beamerposter",
"article-beamer",
]:
skip_layouts_arguments.update(
{
"Itemize": ["1", "item:2"],
"Enumerate": ["1", "item:2"],
"Description": ["1", "item:1"],
"Part": ["1"],
"Section": ["1"],
"Section*": ["1"],
"Subsection": ["1"],
"Subsection*": ["1"],
"Subsubsection": ["1"],
"Subsubsection*": ["1"],
"Frame": ["1", "2"],
"AgainFrame": ["1", "2"],
"PlainFrame": ["1", "2"],
"FragileFrame": ["1", "2"],
"FrameTitle": ["1"],
"FrameSubtitle": ["1"],
"Overprint": ["item:1"],
"Uncover": ["1"],
"Only": ["1"],
"Block": ["1"],
"ExampleBlock": ["1"],
"AlertBlock": ["1"],
"Quotation": ["1"],
"Quote": ["1"],
"Verse": ["1"],
"Corollary": ["1"],
"Corollary": ["1"],
"Definition": ["1"],
"Definitions": ["1"],
"Example": ["1"],
"Examples": ["1"],
"Fact": ["1"],
"Lemma": ["1"],
"proof": ["1"],
"Theorem": ["1"],
"NoteItem": ["1"],
}
)
skip_insets_arguments.update(
{
"Flex Bold": ["1"],
"Flex Emphasize": ["1"],
"Flex Alert": ["1"],
"Flex Structure": ["1"],
"Flex Only": ["1"],
"Flex Uncover": ["1"],
"Flex Visible": ["1"],
"Flex Invisible": ["1"],
"Flex Alternative": ["1"],
"Flex Beamer Note": ["1"],
}
)
elif document.textclass == "europecv":
skip_layouts_arguments.update({"Picture": ["1"], "Item": ["1"], "MotherTongue": ["1"]})
elif document.textclass in ["acmsiggraph", "acmsiggraph-0-92"]:
skip_insets_arguments.update({"Flex CRcat": ["1", "2", "3"]})
elif document.textclass in ["aastex", "aastex6", "aastex62"]:
skip_layouts_arguments.update(
{
"Altaffilation": ["1"],
}
)
elif document.textclass == "jss":
skip_insets.append("Flex Code Chunk")
elif document.textclass == "moderncv":
skip_layouts_arguments.update(
{
"Photo": ["1", "2"],
}
)
skip_insets_arguments.update({"Flex Column": ["1"]})
elif document.textclass == "agutex":
skip_layouts_arguments.update({"Author affiliation": ["1"]})
elif document.textclass in ["ijmpd", "ijmpc"]:
skip_layouts_arguments.update({"RomanList": ["1"]})
elif document.textclass in ["jlreq-book", "jlreq-report", "jlreq-article"]:
skip_insets.append("Flex Warichu*")
# pathru insets per module
if "hpstatement" in document.get_module_list():
skip_insets.append("Flex H-P number")
if "tcolorbox" in document.get_module_list():
skip_layouts_arguments.update({"New Color Box Type": ["3"]})
if "sweave" in document.get_module_list():
skip_insets.extend(
[
"Flex Sweave Options",
"Flex S/R expression",
"Flex Sweave Input File",
"Flex Chunk",
]
)
if "knitr" in document.get_module_list():
skip_insets.extend(["Flex Sweave Options", "Flex S/R expression", "Flex Chunk"])
if "linguistics" in document.get_module_list():
skip_layouts_arguments.update(
{
"Numbered Example (multiline)": ["1"],
"Numbered Examples (consecutive)": ["1"],
"Subexample": ["1"],
}
)
if "chessboard" in document.get_module_list():
skip_insets.append("Flex Mainline")
skip_layouts_arguments.update({"NewChessGame": ["1"]})
skip_insets_arguments.update({"Flex ChessBoard": ["1"]})
if "lilypond" in document.get_module_list():
skip_insets.append("Flex LilyPond")
if "noweb" in document.get_module_list():
skip_insets.append("Flex Chunk")
if "multicol" in document.get_module_list():
skip_insets_arguments.update({"Flex Multiple Columns": ["1"]})
i = 0
inset_is_arg = False
while i < len(document.body):
line = document.body[i]
if line.startswith("\\lang "):
tokenend = len("\\lang ")
lang = line[tokenend:].strip()
current_languages[-1] = lang
elif line.startswith("\\begin_layout "):
current_languages.append(current_languages[-1])
tokenend = len("\\begin_layout ")
layout = line[tokenend:].strip()
current_layouts.append(layout)
elif line.startswith("\\end_layout"):
current_languages.pop()
current_layouts.pop()
elif line.startswith("\\begin_inset Argument "):
tokenend = len("\\begin_inset Argument ")
Argument = line[tokenend:].strip()
# all listpreamble:1 arguments are pass thru
listpreamble = Argument == "listpreamble:1"
layout_arg = current_layouts and Argument in skip_layouts_arguments.get(
current_layouts[-1], []
)
inset_arg = current_insets and Argument in skip_insets_arguments.get(
current_insets[-1], []
)
if layout_arg or inset_arg or listpreamble:
# In these arguments, parentheses must not be changed
i = find_end_of_inset(document.body, i) + 1
continue
else:
inset_is_arg = True
elif line.startswith("\\begin_inset "):
tokenend = len("\\begin_inset ")
inset = line[tokenend:].strip()
current_insets.append(inset)
if inset in skip_insets:
# In these insets, parentheses must not be changed
i = find_end_of_inset(document.body, i)
continue
elif line.startswith("\\end_inset"):
if inset_is_arg:
inset_is_arg = is_in_inset(document.body, i, "\\begin_inset Argument")[0] != -1
else:
current_insets.pop()
elif current_languages[-1] == "hebrew" and not line.startswith("\\"):
document.body[i] = line.replace("(", "\x00").replace(")", "(").replace("\x00", ")")
i += 1
def revert_hebrew_parentheses(document):
"""Store parentheses in Hebrew text reversed"""
# This only exists to keep the convert/revert naming convention
convert_hebrew_parentheses(document)
def revert_malayalam(document):
"""Set the document language to English but assure Malayalam output"""
revert_language(document, "malayalam", "", "malayalam")
def revert_soul(document):
"""Revert soul module flex insets to ERT"""
flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
for flex in flexes:
i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
if i != -1:
add_to_preamble(document, ["\\usepackage{soul}"])
break
i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
if i != -1:
add_to_preamble(document, ["\\usepackage{color}"])
revert_flex_inset(document, "Spaceletters", "\\so")
revert_flex_inset(document, "Strikethrough", "\\st")
revert_flex_inset(document, "Underline", "\\ul")
revert_flex_inset(document, "Highlight", "\\hl")
revert_flex_inset(document, "Capitalize", "\\caps")
def revert_tablestyle(document):
"""Remove tablestyle params"""
i = find_token(document.header, "\\tablestyle")
if i != -1:
del document.header[i]
def revert_bibfileencodings(document):
"""Revert individual Biblatex bibliography encodings"""
# Get cite engine
engine = "basic"
i = find_token(document.header, "\\cite_engine", 0)
if i == -1:
document.warning("Malformed document! Missing \\cite_engine")
else:
engine = get_value(document.header, "\\cite_engine", i)
# Check if biblatex
biblatex = False
if engine in ["biblatex", "biblatex-natbib"]:
biblatex = True
# Map lyx to latex encoding names
encodings = {
"utf8": "utf8",
"utf8x": "utf8x",
"armscii8": "armscii8",
"iso8859-1": "latin1",
"iso8859-2": "latin2",
"iso8859-3": "latin3",
"iso8859-4": "latin4",
"iso8859-5": "iso88595",
"iso8859-6": "8859-6",
"iso8859-7": "iso-8859-7",
"iso8859-8": "8859-8",
"iso8859-9": "latin5",
"iso8859-13": "latin7",
"iso8859-15": "latin9",
"iso8859-16": "latin10",
"applemac": "applemac",
"cp437": "cp437",
"cp437de": "cp437de",
"cp850": "cp850",
"cp852": "cp852",
"cp855": "cp855",
"cp858": "cp858",
"cp862": "cp862",
"cp865": "cp865",
"cp866": "cp866",
"cp1250": "cp1250",
"cp1251": "cp1251",
"cp1252": "cp1252",
"cp1255": "cp1255",
"cp1256": "cp1256",
"cp1257": "cp1257",
"koi8-r": "koi8-r",
"koi8-u": "koi8-u",
"pt154": "pt154",
"utf8-platex": "utf8",
"ascii": "ascii",
}
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of bibtex inset at line %d!!" % (i))
continue
encodings = get_quoted_value(document.body, "file_encodings", i, j)
if not encodings:
i = j
continue
bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
opts = get_quoted_value(document.body, "biblatexopts", i, j)
if len(bibfiles) == 0:
document.warning("Bibtex inset at line %d does not have a bibfile!" % (i))
# remove encoding line
k = find_token(document.body, "file_encodings", i, j)
if k != -1:
del document.body[k]
# Re-find inset end line
j = find_end_of_inset(document.body, i)
if biblatex:
enclist = encodings.split("\t")
encmap = dict()
for pp in enclist:
ppp = pp.split(" ", 1)
encmap[ppp[0]] = ppp[1]
for bib in bibfiles:
pr = "\\addbibresource"
if bib in encmap.keys():
pr += "[bibencoding=" + encmap[bib] + "]"
pr += "{" + bib + "}"
add_to_preamble(document, [pr])
# Insert ERT \\printbibliography and wrap bibtex inset to a Note
pcmd = "printbibliography"
if opts:
pcmd += "[" + opts + "]"
repl = [
"\\begin_inset ERT",
"status open",
"",
"\\begin_layout Plain Layout",
"",
"",
"\\backslash",
pcmd,
"\\end_layout",
"",
"\\end_inset",
"",
"",
"\\end_layout",
"",
"\\begin_layout Standard",
"\\begin_inset Note Note",
"status open",
"",
"\\begin_layout Plain Layout",
]
repl += document.body[i : j + 1]
repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
document.body[i : j + 1] = repl
j += 27
i = j
def revert_cmidruletrimming(document):
"""Remove \\cmidrule trimming"""
# FIXME: Revert to TeX code?
i = 0
while True:
# first, let's find out if we need to do anything
i = find_token(document.body, "<cell ", i + 1)
if i == -1:
return
j = document.body[i].find('trim="')
if j == -1:
continue
rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
# remove trim option
document.body[i] = rgx.sub("", document.body[i])
ruby_inset_def = [
r"### Inserted by lyx2lyx (ruby inset) ###",
r"InsetLayout Flex:Ruby",
r" LyxType charstyle",
r" LatexType command",
r" LatexName ruby",
r" HTMLTag ruby",
r' HTMLAttr ""',
r" HTMLInnerTag rb",
r' HTMLInnerAttr ""',
r" BgColor none",
r' LabelString "Ruby"',
r" Decoration Conglomerate",
r" Preamble",
r" \ifdefined\kanjiskip",
r" \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}",
r" \else \ifdefined\luatexversion",
r" \usepackage{luatexja-ruby}",
r" \else \ifdefined\XeTeXversion",
r" \usepackage{ruby}%",
r" \fi\fi\fi",
r" \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}",
r" EndPreamble",
r" Argument post:1",
r' LabelString "ruby text"',
r' MenuString "Ruby Text|R"',
r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
r" Decoration Conglomerate",
r" Font",
r" Size tiny",
r" EndFont",
r" LabelFont",
r" Size tiny",
r" EndFont",
r" Mandatory 1",
r" EndArgument",
r"End",
]
def convert_ruby_module(document):
"""Use ruby module instead of local module definition"""
if document.del_local_layout(ruby_inset_def):
document.add_module("ruby")
def revert_ruby_module(document):
"""Replace ruby module with local module definition"""
if document.del_module("ruby"):
document.append_local_layout(ruby_inset_def)
def convert_utf8_japanese(document):
"""Use generic utf8 with Japanese documents."""
lang = get_value(document.header, "\\language")
if not lang.startswith("japanese"):
return
inputenc = get_value(document.header, "\\inputencoding")
if (lang == "japanese" and inputenc == "utf8-platex") or (
lang == "japanese-cjk" and inputenc == "utf8-cjk"
):
document.set_parameter("inputencoding", "utf8")
def revert_utf8_japanese(document):
"""Use Japanese utf8 variants with Japanese documents."""
inputenc = get_value(document.header, "\\inputencoding")
if inputenc != "utf8":
return
lang = get_value(document.header, "\\language")
if lang == "japanese":
document.set_parameter("inputencoding", "utf8-platex")
if lang == "japanese-cjk":
document.set_parameter("inputencoding", "utf8-cjk")
def revert_lineno(document):
"Replace lineno setting with user-preamble code."
options = get_quoted_value(document.header, "\\lineno_options", delete=True)
if not get_bool_value(document.header, "\\use_lineno", delete=True):
return
if options:
options = "[" + options + "]"
add_to_preamble(document, ["\\usepackage%s{lineno}" % options, "\\linenumbers"])
def convert_lineno(document):
"Replace user-preamble code with native lineno support."
use_lineno = 0
options = ""
i = find_token(document.preamble, "\\linenumbers", 1)
if i > 0:
usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i - 1])
if usepkg:
use_lineno = 1
options = usepkg.group(1).strip("[]")
del document.preamble[i - 1 : i + 1]
if i > 1:
del_token(document.preamble, "% Added by lyx2lyx", i - 2, i - 1)
k = find_token(document.header, "\\index ")
if options == "":
document.header[k:k] = ["\\use_lineno %d" % use_lineno]
else:
document.header[k:k] = [
"\\use_lineno %d" % use_lineno,
"\\lineno_options %s" % options,
]
def convert_aaencoding(document):
"Convert default document option due to encoding change in aa class."
if document.textclass != "aa":
return
i = find_token(document.header, "\\use_default_options true")
if i == -1:
return
val = get_value(document.header, "\\inputencoding")
if not val:
document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
return
if val == "auto-legacy" or val == "latin9":
document.header[i] = "\\use_default_options false"
k = find_token(document.header, "\\options")
if k == -1:
document.header.insert(i, "\\options latin9")
else:
document.header[k] += ",latin9"
def revert_aaencoding(document):
"Revert default document option due to encoding change in aa class."
if document.textclass != "aa":
return
i = find_token(document.header, "\\use_default_options true")
if i == -1:
return
val = get_value(document.header, "\\inputencoding")
if not val:
document.warning("Malformed LyX Document! Missing \\inputencoding header.")
return
if val == "utf8":
document.header[i] = "\\use_default_options false"
k = find_token(document.header, "\\options", 0)
if k == -1:
document.header.insert(i, "\\options utf8")
else:
document.header[k] = document.header[k] + ",utf8"
def revert_new_languages(document):
"""Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
and Russian (Petrine orthography)."""
# lyxname: (babelname, polyglossianame)
new_languages = {
"azerbaijani": ("azerbaijani", ""),
"bengali": ("", "bengali"),
"churchslavonic": ("", "churchslavonic"),
"oldrussian": ("", "russian"),
"korean": ("", "korean"),
}
if document.language in new_languages:
used_languages = {document.language}
else:
used_languages = set()
i = 0
while True:
i = find_token(document.body, "\\lang", i + 1)
if i == -1:
break
val = get_value(document.body, "\\lang", i)
if val in new_languages:
used_languages.add(val)
# Korean is already supported via CJK, so leave as-is for Babel
if "korean" in used_languages and (
not get_bool_value(document.header, "\\use_non_tex_fonts")
or get_value(document.header, "\\language_package") == "babel"
):
used_languages.discard("korean")
for lang in used_languages:
revert_language(document, lang, *new_languages[lang])
gloss_inset_def = [
r"### Inserted by lyx2lyx (deprecated ling glosses) ###",
r"InsetLayout Flex:Glosse",
r" LyXType custom",
r' LabelString "Gloss (old version)"',
r' MenuString "Gloss (old version)"',
r" LatexType environment",
r" LatexName linggloss",
r" Decoration minimalistic",
r" LabelFont",
r" Size Small",
r" EndFont",
r" MultiPar true",
r" CustomPars false",
r" ForcePlain true",
r" ParbreakIsNewline true",
r" FreeSpacing true",
r" Requires covington",
r" Preamble",
r" \def\glosstr{}",
r" \@ifundefined{linggloss}{%",
r" \newenvironment{linggloss}[2][]{",
r" \def\glosstr{\glt #1}%",
r" \gll #2}",
r" {\glosstr\glend}}{}",
r" EndPreamble",
r" InToc true",
r" ResetsFont true",
r" Argument 1",
r" Decoration conglomerate",
r' LabelString "Translation"',
r' MenuString "Glosse Translation|s"',
r' Tooltip "Add a translation for the glosse"',
r" EndArgument",
r"End",
]
glosss_inset_def = [
r"### Inserted by lyx2lyx (deprecated ling glosses) ###",
r"InsetLayout Flex:Tri-Glosse",
r" LyXType custom",
r' LabelString "Tri-Gloss (old version)"',
r' MenuString "Tri-Gloss (old version)"',
r" LatexType environment",
r" LatexName lingglosss",
r" Decoration minimalistic",
r" LabelFont",
r" Size Small",
r" EndFont",
r" MultiPar true",
r" CustomPars false",
r" ForcePlain true",
r" ParbreakIsNewline true",
r" FreeSpacing true",
r" InToc true",
r" Requires covington",
r" Preamble",
r" \def\glosstr{}",
r" \@ifundefined{lingglosss}{%",
r" \newenvironment{lingglosss}[2][]{",
r" \def\glosstr{\glt #1}%",
r" \glll #2}",
r" {\glosstr\glend}}{}",
r" EndPreamble",
r" ResetsFont true",
r" Argument 1",
r" Decoration conglomerate",
r' LabelString "Translation"',
r' MenuString "Glosse Translation|s"',
r' Tooltip "Add a translation for the glosse"',
r" EndArgument",
r"End",
]
def convert_linggloss(document):
"Move old ling glosses to local layout"
if find_token(document.body, "\\begin_inset Flex Glosse", 0) != -1:
document.append_local_layout(gloss_inset_def)
if find_token(document.body, "\\begin_inset Flex Tri-Glosse", 0) != -1:
document.append_local_layout(glosss_inset_def)
def revert_linggloss(document):
"Revert to old ling gloss definitions"
if "linguistics" not in document.get_module_list():
return
document.del_local_layout(gloss_inset_def)
document.del_local_layout(glosss_inset_def)
cov_req = False
glosses = [
"\\begin_inset Flex Interlinear Gloss (2 Lines)",
"\\begin_inset Flex Interlinear Gloss (3 Lines)",
]
for glosse in glosses:
i = 0
while True:
i = find_token(document.body, glosse, i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Gloss inset")
continue
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
endarg = find_end_of_inset(document.body, arg)
optargcontent = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find optarg plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
endarg = find_end_of_inset(document.body, arg)
marg1content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
endarg = find_end_of_inset(document.body, arg)
marg2content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
endarg = find_end_of_inset(document.body, arg)
marg3content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
cmd = "\\digloss"
if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
cmd = "\\trigloss"
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
endInset = find_end_of_inset(document.body, i)
endPlain = find_end_of_layout(document.body, beginPlain)
precontent = put_cmd_in_ert(cmd)
if len(optargcontent) > 0:
precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
precontent += put_cmd_in_ert("{")
postcontent = (
put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
)
if cmd == "\\trigloss":
postcontent += put_cmd_in_ert("}{") + marg3content
postcontent += put_cmd_in_ert("}")
document.body[endPlain : endInset + 1] = postcontent
document.body[beginPlain + 1 : beginPlain] = precontent
del document.body[i : beginPlain + 1]
if not cov_req:
document.append_local_layout("Requires covington")
cov_req = True
i = beginPlain
def revert_subexarg(document):
"Revert linguistic subexamples with argument to ERT"
if "linguistics" not in document.get_module_list():
return
cov_req = False
i = 0
while True:
i = find_token(document.body, "\\begin_layout Subexample", i + 1)
if i == -1:
break
j = find_end_of_layout(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Subexample layout")
continue
while True:
# check for consecutive layouts
k = find_token(document.body, "\\begin_layout", j)
if k == -1 or document.body[k] != "\\begin_layout Subexample":
break
j = find_end_of_layout(document.body, k)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Subexample layout")
continue
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
if arg == -1:
continue
endarg = find_end_of_inset(document.body, arg)
optargcontent = ""
argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find optarg plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
# re-find end of layout
j = find_end_of_layout(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Subexample layout")
continue
while True:
# check for consecutive layouts
k = find_token(document.body, "\\begin_layout", j)
if k == -1 or document.body[k] != "\\begin_layout Subexample":
break
document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
j = find_end_of_layout(document.body, k)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Subexample layout")
continue
endev = put_cmd_in_ert("\\end{subexamples}")
document.body[j:j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
document.body[i : i + 1] = (
["\\begin_layout Standard"]
+ cmd
+ ["\\end_layout", "", "\\begin_layout Standard"]
+ put_cmd_in_ert("\\item ")
)
if not cov_req:
document.append_local_layout("Requires covington")
cov_req = True
def revert_drs(document):
"Revert DRS insets (linguistics) to ERT"
if "linguistics" not in document.get_module_list():
return
cov_req = False
drses = [
"\\begin_inset Flex DRS",
"\\begin_inset Flex DRS*",
"\\begin_inset Flex IfThen-DRS",
"\\begin_inset Flex Cond-DRS",
"\\begin_inset Flex QDRS",
"\\begin_inset Flex NegDRS",
"\\begin_inset Flex SDRS",
]
for drs in drses:
i = 0
while True:
i = find_token(document.body, drs, i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of DRS inset")
continue
# Check for arguments
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
endarg = find_end_of_inset(document.body, arg)
prearg1content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning(
"Malformed LyX document: Can't find Argument 1 plain Layout"
)
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
# re-find inset end
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of DRS inset")
continue
arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
endarg = find_end_of_inset(document.body, arg)
prearg2content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning(
"Malformed LyX document: Can't find Argument 2 plain Layout"
)
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
# re-find inset end
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of DRS inset")
continue
arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
endarg = find_end_of_inset(document.body, arg)
postarg1content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning(
"Malformed LyX document: Can't find Argument post:1 plain Layout"
)
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
# re-find inset end
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of DRS inset")
continue
arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
endarg = find_end_of_inset(document.body, arg)
postarg2content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning(
"Malformed LyX document: Can't find Argument post:2 plain Layout"
)
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
# re-find inset end
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of DRS inset")
continue
arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
endarg = find_end_of_inset(document.body, arg)
postarg3content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning(
"Malformed LyX document: Can't find Argument post:3 plain Layout"
)
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
# re-find inset end
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of DRS inset")
continue
arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
endarg = find_end_of_inset(document.body, arg)
postarg4content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning(
"Malformed LyX document: Can't find Argument post:4 plain Layout"
)
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
# The respective LaTeX command
cmd = "\\drs"
if drs == "\\begin_inset Flex DRS*":
cmd = "\\drs*"
elif drs == "\\begin_inset Flex IfThen-DRS":
cmd = "\\ifdrs"
elif drs == "\\begin_inset Flex Cond-DRS":
cmd = "\\condrs"
elif drs == "\\begin_inset Flex QDRS":
cmd = "\\qdrs"
elif drs == "\\begin_inset Flex NegDRS":
cmd = "\\negdrs"
elif drs == "\\begin_inset Flex SDRS":
cmd = "\\sdrs"
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
endInset = find_end_of_inset(document.body, i)
endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
precontent = put_cmd_in_ert(cmd)
precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
if drs == "\\begin_inset Flex SDRS":
precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
precontent += put_cmd_in_ert("{")
postcontent = []
if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
postcontent = (
put_cmd_in_ert("}{")
+ postarg1content
+ put_cmd_in_ert("}{")
+ postarg2content
+ put_cmd_in_ert("}")
)
if cmd == "\\condrs" or cmd == "\\qdrs":
postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
if cmd == "\\qdrs":
postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
else:
postcontent = put_cmd_in_ert("}")
document.body[endPlain : endInset + 1] = postcontent
document.body[beginPlain + 1 : beginPlain] = precontent
del document.body[i : beginPlain + 1]
if not cov_req:
document.append_local_layout("Provides covington 1")
add_to_preamble(document, ["\\usepackage{drs,covington}"])
cov_req = True
i = beginPlain
def revert_babelfont(document):
"Reverts the use of \\babelfont to user preamble"
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
return
i = find_token(document.header, "\\language_package", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\language_package.")
return
if get_value(document.header, "\\language_package", 0) != "babel":
return
# check font settings
# defaults
roman = sans = typew = "default"
osf = False
sf_scale = tt_scale = 100.0
j = find_token(document.header, "\\font_roman", 0)
if j == -1:
document.warning("Malformed LyX document: Missing \\font_roman.")
else:
# We need to use this regex since split() does not handle quote protection
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
roman = romanfont[2].strip('"')
romanfont[2] = '"default"'
document.header[j] = " ".join(romanfont)
j = find_token(document.header, "\\font_sans", 0)
if j == -1:
document.warning("Malformed LyX document: Missing \\font_sans.")
else:
# We need to use this regex since split() does not handle quote protection
sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
sans = sansfont[2].strip('"')
sansfont[2] = '"default"'
document.header[j] = " ".join(sansfont)
j = find_token(document.header, "\\font_typewriter", 0)
if j == -1:
document.warning("Malformed LyX document: Missing \\font_typewriter.")
else:
# We need to use this regex since split() does not handle quote protection
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
typew = ttfont[2].strip('"')
ttfont[2] = '"default"'
document.header[j] = " ".join(ttfont)
i = find_token(document.header, "\\font_osf", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_osf.")
else:
osf = str2bool(get_value(document.header, "\\font_osf", i))
j = find_token(document.header, "\\font_sf_scale", 0)
if j == -1:
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
else:
sfscale = document.header[j].split()
val = sfscale[2]
sfscale[2] = "100"
document.header[j] = " ".join(sfscale)
try:
# float() can throw
sf_scale = float(val)
except:
document.warning("Invalid font_sf_scale value: " + val)
j = find_token(document.header, "\\font_tt_scale", 0)
if j == -1:
document.warning("Malformed LyX document: Missing \\font_tt_scale.")
else:
ttscale = document.header[j].split()
val = ttscale[2]
ttscale[2] = "100"
document.header[j] = " ".join(ttscale)
try:
# float() can throw
tt_scale = float(val)
except:
document.warning("Invalid font_tt_scale value: " + val)
# set preamble stuff
pretext = ["%% This document must be processed with xelatex or lualatex!"]
pretext.append("\\AtBeginDocument{%")
have_append = False
if roman != "default":
pretext.append("\\babelfont{rm}[Mapping=tex-text]{" + roman + "}")
have_append = True
if sans != "default":
sf = "\\babelfont{sf}["
if sf_scale != 100.0:
sf += "Scale=" + str(sf_scale / 100.0) + ","
sf += "Mapping=tex-text]{" + sans + "}"
pretext.append(sf)
have_append = True
if typew != "default":
tw = "\\babelfont{tt}"
if tt_scale != 100.0:
tw += "[Scale=" + str(tt_scale / 100.0) + "]"
tw += "{" + typew + "}"
pretext.append(tw)
have_append = True
if osf:
pretext.append("\\defaultfontfeatures{Numbers=OldStyle}")
have_append = True
if have_append:
pretext.append("}")
insert_to_preamble(document, pretext)
def revert_minionpro(document):
"Revert native MinionPro font definition (with extra options) to LaTeX"
if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
regexp = re.compile(r"(\\font_roman_opts)")
x = find_re(document.header, regexp, 0)
if x == -1:
return
# We need to use this regex since split() does not handle quote protection
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
opts = romanopts[1].strip('"')
i = find_token(document.header, "\\font_roman", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_roman.")
return
else:
# We need to use this regex since split() does not handle quote protection
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
roman = romanfont[1].strip('"')
if roman != "minionpro":
return
romanfont[1] = '"default"'
document.header[i] = " ".join(romanfont)
osf = False
j = find_token(document.header, "\\font_osf true", 0)
if j != -1:
osf = True
preamble = "\\usepackage["
if osf:
document.header[j] = "\\font_osf false"
else:
preamble += "lf,"
preamble += opts
preamble += "]{MinionPro}"
add_to_preamble(document, [preamble])
del document.header[x]
def revert_font_opts(document):
"revert font options by outputting \\setxxxfont or \\babelfont to the preamble"
NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
Babel = get_value(document.header, "\\language_package") == "babel"
# 1. Roman
regexp = re.compile(r"(\\font_roman_opts)")
i = find_re(document.header, regexp, 0)
if i != -1:
# We need to use this regex since split() does not handle quote protection
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
opts = romanopts[1].strip('"')
del document.header[i]
if NonTeXFonts:
regexp = re.compile(r"(\\font_roman)")
i = find_re(document.header, regexp, 0)
if i != -1:
# We need to use this regex since split() does not handle quote protection
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
font = romanfont[2].strip('"')
romanfont[2] = '"default"'
document.header[i] = " ".join(romanfont)
if font != "default":
if Babel:
preamble = "\\babelfont{rm}["
else:
preamble = "\\setmainfont["
preamble += opts
preamble += ","
preamble += "Mapping=tex-text]{"
preamble += font
preamble += "}"
add_to_preamble(document, [preamble])
# 2. Sans
regexp = re.compile(r"(\\font_sans_opts)")
i = find_re(document.header, regexp, 0)
if i != -1:
scaleval = 100
# We need to use this regex since split() does not handle quote protection
sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
opts = sfopts[1].strip('"')
del document.header[i]
if NonTeXFonts:
regexp = re.compile(r"(\\font_sf_scale)")
i = find_re(document.header, regexp, 0)
if i != -1:
scaleval = get_value(document.header, "\\font_sf_scale", i).split()[1]
regexp = re.compile(r"(\\font_sans)")
i = find_re(document.header, regexp, 0)
if i != -1:
# We need to use this regex since split() does not handle quote protection
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
font = sffont[2].strip('"')
sffont[2] = '"default"'
document.header[i] = " ".join(sffont)
if font != "default":
if Babel:
preamble = "\\babelfont{sf}["
else:
preamble = "\\setsansfont["
preamble += opts
preamble += ","
if scaleval != 100:
preamble += "Scale=0."
preamble += scaleval
preamble += ","
preamble += "Mapping=tex-text]{"
preamble += font
preamble += "}"
add_to_preamble(document, [preamble])
# 3. Typewriter
regexp = re.compile(r"(\\font_typewriter_opts)")
i = find_re(document.header, regexp, 0)
if i != -1:
scaleval = 100
# We need to use this regex since split() does not handle quote protection
ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
opts = ttopts[1].strip('"')
del document.header[i]
if NonTeXFonts:
regexp = re.compile(r"(\\font_tt_scale)")
i = find_re(document.header, regexp, 0)
if i != -1:
scaleval = get_value(document.header, "\\font_tt_scale", i).split()[1]
regexp = re.compile(r"(\\font_typewriter)")
i = find_re(document.header, regexp, 0)
if i != -1:
# We need to use this regex since split() does not handle quote protection
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
font = ttfont[2].strip('"')
ttfont[2] = '"default"'
document.header[i] = " ".join(ttfont)
if font != "default":
if Babel:
preamble = "\\babelfont{tt}["
else:
preamble = "\\setmonofont["
preamble += opts
preamble += ","
if scaleval != 100:
preamble += "Scale=0."
preamble += scaleval
preamble += ","
preamble += "Mapping=tex-text]{"
preamble += font
preamble += "}"
add_to_preamble(document, [preamble])
def revert_plainNotoFonts_xopts(document):
"Revert native (straight) Noto font definition (with extra options) to LaTeX"
if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
osf = False
y = find_token(document.header, "\\font_osf true", 0)
if y != -1:
osf = True
regexp = re.compile(r"(\\font_roman_opts)")
x = find_re(document.header, regexp, 0)
if x == -1 and not osf:
return
opts = ""
if x != -1:
# We need to use this regex since split() does not handle quote protection
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
opts = romanopts[1].strip('"')
if osf:
if opts != "":
opts += ", "
opts += "osf"
i = find_token(document.header, "\\font_roman", 0)
if i == -1:
return
# We need to use this regex since split() does not handle quote protection
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
roman = romanfont[1].strip('"')
if roman != "NotoSerif-TLF":
return
j = find_token(document.header, "\\font_sans", 0)
if j == -1:
return
# We need to use this regex since split() does not handle quote protection
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
sf = sffont[1].strip('"')
if sf != "default":
return
j = find_token(document.header, "\\font_typewriter", 0)
if j == -1:
return
# We need to use this regex since split() does not handle quote protection
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
tt = ttfont[1].strip('"')
if tt != "default":
return
# So we have noto as "complete font"
romanfont[1] = '"default"'
document.header[i] = " ".join(romanfont)
preamble = "\\usepackage["
preamble += opts
preamble += "]{noto}"
add_to_preamble(document, [preamble])
if osf:
document.header[y] = "\\font_osf false"
if x != -1:
del document.header[x]
def revert_notoFonts_xopts(document):
"Revert native (extended) Noto font definition (with extra options) to LaTeX"
if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
fontmap = dict()
fm = createFontMapping(["Noto"])
if revert_fonts(document, fm, fontmap, True):
add_preamble_fonts(document, fontmap)
def revert_IBMFonts_xopts(document):
"Revert native IBM font definition (with extra options) to LaTeX"
if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
fontmap = dict()
fm = createFontMapping(["IBM"])
if revert_fonts(document, fm, fontmap, True):
add_preamble_fonts(document, fontmap)
def revert_AdobeFonts_xopts(document):
"Revert native Adobe font definition (with extra options) to LaTeX"
if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
fontmap = dict()
fm = createFontMapping(["Adobe"])
if revert_fonts(document, fm, fontmap, True):
add_preamble_fonts(document, fontmap)
def convert_osf(document):
"Convert \\font_osf param to new format"
NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
i = find_token(document.header, "\\font_osf", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_osf.")
return
osfsf = [
"biolinum",
"ADOBESourceSansPro",
"NotoSansRegular",
"NotoSansMedium",
"NotoSansThin",
"NotoSansLight",
"NotoSansExtralight",
]
osftt = ["ADOBESourceCodePro", "NotoMonoRegular"]
osfval = str2bool(get_value(document.header, "\\font_osf", i))
document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
if NonTeXFonts:
document.header.insert(i, "\\font_sans_osf false")
document.header.insert(i + 1, "\\font_typewriter_osf false")
return
if osfval:
x = find_token(document.header, "\\font_sans", 0)
if x == -1:
document.warning("Malformed LyX document: Missing \\font_sans.")
else:
# We need to use this regex since split() does not handle quote protection
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
sf = sffont[1].strip('"')
if sf in osfsf:
document.header.insert(i, "\\font_sans_osf true")
else:
document.header.insert(i, "\\font_sans_osf false")
x = find_token(document.header, "\\font_typewriter", 0)
if x == -1:
document.warning("Malformed LyX document: Missing \\font_typewriter.")
else:
# We need to use this regex since split() does not handle quote protection
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
tt = ttfont[1].strip('"')
if tt in osftt:
document.header.insert(i + 1, "\\font_typewriter_osf true")
else:
document.header.insert(i + 1, "\\font_typewriter_osf false")
else:
document.header.insert(i, "\\font_sans_osf false")
document.header.insert(i + 1, "\\font_typewriter_osf false")
def revert_osf(document):
"Revert \\font_*_osf params"
NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
i = find_token(document.header, "\\font_roman_osf", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_roman_osf.")
return
osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
i = find_token(document.header, "\\font_sans_osf", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_sans_osf.")
return
osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
del document.header[i]
i = find_token(document.header, "\\font_typewriter_osf", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
return
osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
del document.header[i]
if osfval:
i = find_token(document.header, "\\font_osf", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_osf.")
return
document.header[i] = "\\font_osf true"
def revert_texfontopts(document):
"Revert native TeX font definitions (with extra options) to LaTeX"
if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
rmfonts = [
"ccfonts",
"cochineal",
"utopia",
"garamondx",
"libertine",
"lmodern",
"palatino",
"times",
"xcharter",
]
# First the sf (biolinum only)
regexp = re.compile(r"(\\font_sans_opts)")
x = find_re(document.header, regexp, 0)
if x != -1:
# We need to use this regex since split() does not handle quote protection
sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
opts = sfopts[1].strip('"')
i = find_token(document.header, "\\font_sans", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_sans.")
else:
# We need to use this regex since split() does not handle quote protection
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
sans = sffont[1].strip('"')
if sans == "biolinum":
sf_scale = 100.0
sffont[1] = '"default"'
document.header[i] = " ".join(sffont)
osf = False
j = find_token(document.header, "\\font_sans_osf true", 0)
if j != -1:
osf = True
k = find_token(document.header, "\\font_sf_scale", 0)
if k == -1:
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
else:
sfscale = document.header[k].split()
val = sfscale[1]
sfscale[1] = "100"
document.header[k] = " ".join(sfscale)
try:
# float() can throw
sf_scale = float(val)
except:
document.warning("Invalid font_sf_scale value: " + val)
preamble = "\\usepackage["
if osf:
document.header[j] = "\\font_sans_osf false"
preamble += "osf,"
if sf_scale != 100.0:
preamble += "scaled=" + str(sf_scale / 100.0) + ","
preamble += opts
preamble += "]{biolinum}"
add_to_preamble(document, [preamble])
del document.header[x]
regexp = re.compile(r"(\\font_roman_opts)")
x = find_re(document.header, regexp, 0)
if x == -1:
return
# We need to use this regex since split() does not handle quote protection
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
opts = romanopts[1].strip('"')
i = find_token(document.header, "\\font_roman", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_roman.")
return
else:
# We need to use this regex since split() does not handle quote protection
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
roman = romanfont[1].strip('"')
if roman not in rmfonts:
return
romanfont[1] = '"default"'
document.header[i] = " ".join(romanfont)
package = roman
if roman == "utopia":
package = "fourier"
elif roman == "palatino":
package = "mathpazo"
elif roman == "times":
package = "mathptmx"
elif roman == "xcharter":
package = "XCharter"
osf = ""
j = find_token(document.header, "\\font_roman_osf true", 0)
if j != -1:
if roman == "cochineal":
osf = "proportional,osf,"
elif roman == "utopia":
osf = "oldstyle,"
elif roman == "garamondx":
osf = "osfI,"
elif roman == "libertine":
osf = "osf,"
elif roman == "palatino":
osf = "osf,"
elif roman == "xcharter":
osf = "osf,"
document.header[j] = "\\font_roman_osf false"
k = find_token(document.header, "\\font_sc true", 0)
if k != -1:
if roman == "utopia":
osf += "expert,"
if roman == "palatino" and osf == "":
osf = "sc,"
document.header[k] = "\\font_sc false"
preamble = "\\usepackage["
preamble += osf
preamble += opts
preamble += "]{" + package + "}"
add_to_preamble(document, [preamble])
del document.header[x]
def convert_CantarellFont(document):
"Handle Cantarell font definition to LaTeX"
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(["Cantarell"])
convert_fonts(document, fm, "oldstyle")
def revert_CantarellFont(document):
"Revert native Cantarell font definition to LaTeX"
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(["Cantarell"])
if revert_fonts(document, fm, fontmap, False, True):
add_preamble_fonts(document, fontmap)
def convert_ChivoFont(document):
"Handle Chivo font definition to LaTeX"
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(["Chivo"])
convert_fonts(document, fm, "oldstyle")
def revert_ChivoFont(document):
"Revert native Chivo font definition to LaTeX"
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(["Chivo"])
if revert_fonts(document, fm, fontmap, False, True):
add_preamble_fonts(document, fontmap)
def convert_FiraFont(document):
"Handle Fira font definition to LaTeX"
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(["Fira"])
convert_fonts(document, fm, "lf")
def revert_FiraFont(document):
"Revert native Fira font definition to LaTeX"
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(["Fira"])
if revert_fonts(document, fm, fontmap, False, True):
add_preamble_fonts(document, fontmap)
def convert_Semibolds(document):
"Move semibold options to extraopts"
NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
i = find_token(document.header, "\\font_roman", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_roman.")
else:
# We need to use this regex since split() does not handle quote protection
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
roman = romanfont[1].strip('"')
if roman == "IBMPlexSerifSemibold":
romanfont[1] = '"IBMPlexSerif"'
document.header[i] = " ".join(romanfont)
if NonTeXFonts == False:
regexp = re.compile(r"(\\font_roman_opts)")
x = find_re(document.header, regexp, 0)
if x == -1:
# Sensible place to insert tag
fo = find_token(document.header, "\\font_sf_scale")
if fo == -1:
document.warning("Malformed LyX document! Missing \\font_sf_scale")
else:
document.header.insert(fo, '\\font_roman_opts "semibold"')
else:
# We need to use this regex since split() does not handle quote protection
romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
document.header[x] = (
'\\font_roman_opts "semibold, ' + romanopts[1].strip('"') + '"'
)
i = find_token(document.header, "\\font_sans", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_sans.")
else:
# We need to use this regex since split() does not handle quote protection
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
sf = sffont[1].strip('"')
if sf == "IBMPlexSansSemibold":
sffont[1] = '"IBMPlexSans"'
document.header[i] = " ".join(sffont)
if NonTeXFonts == False:
regexp = re.compile(r"(\\font_sans_opts)")
x = find_re(document.header, regexp, 0)
if x == -1:
# Sensible place to insert tag
fo = find_token(document.header, "\\font_sf_scale")
if fo == -1:
document.warning("Malformed LyX document! Missing \\font_sf_scale")
else:
document.header.insert(fo, '\\font_sans_opts "semibold"')
else:
# We need to use this regex since split() does not handle quote protection
sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
document.header[x] = (
'\\font_sans_opts "semibold, ' + sfopts[1].strip('"') + '"'
)
i = find_token(document.header, "\\font_typewriter", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_typewriter.")
else:
# We need to use this regex since split() does not handle quote protection
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
tt = ttfont[1].strip('"')
if tt == "IBMPlexMonoSemibold":
ttfont[1] = '"IBMPlexMono"'
document.header[i] = " ".join(ttfont)
if NonTeXFonts == False:
regexp = re.compile(r"(\\font_typewriter_opts)")
x = find_re(document.header, regexp, 0)
if x == -1:
# Sensible place to insert tag
fo = find_token(document.header, "\\font_tt_scale")
if fo == -1:
document.warning("Malformed LyX document! Missing \\font_tt_scale")
else:
document.header.insert(fo, '\\font_typewriter_opts "semibold"')
else:
# We need to use this regex since split() does not handle quote protection
ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
document.header[x] = (
'\\font_typewriter_opts "semibold, ' + ttopts[1].strip('"') + '"'
)
def convert_NotoRegulars(document):
"Merge diverse noto reagular fonts"
i = find_token(document.header, "\\font_roman", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_roman.")
else:
# We need to use this regex since split() does not handle quote protection
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
roman = romanfont[1].strip('"')
if roman == "NotoSerif-TLF":
romanfont[1] = '"NotoSerifRegular"'
document.header[i] = " ".join(romanfont)
i = find_token(document.header, "\\font_sans", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_sans.")
else:
# We need to use this regex since split() does not handle quote protection
sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
sf = sffont[1].strip('"')
if sf == "NotoSans-TLF":
sffont[1] = '"NotoSansRegular"'
document.header[i] = " ".join(sffont)
i = find_token(document.header, "\\font_typewriter", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_typewriter.")
else:
# We need to use this regex since split() does not handle quote protection
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
tt = ttfont[1].strip('"')
if tt == "NotoMono-TLF":
ttfont[1] = '"NotoMonoRegular"'
document.header[i] = " ".join(ttfont)
def convert_CrimsonProFont(document):
"Handle CrimsonPro font definition to LaTeX"
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(["CrimsonPro"])
convert_fonts(document, fm, "lf")
def revert_CrimsonProFont(document):
"Revert native CrimsonPro font definition to LaTeX"
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(["CrimsonPro"])
if revert_fonts(document, fm, fontmap, False, True):
add_preamble_fonts(document, fontmap)
def revert_pagesizes(document):
"Revert new page sizes in memoir and KOMA to options"
if document.textclass != "memoir" and document.textclass[:2] != "scr":
return
i = find_token(document.header, "\\use_geometry true", 0)
if i != -1:
return
defsizes = [
"default",
"custom",
"letterpaper",
"legalpaper",
"executivepaper",
"a4paper",
"a5paper",
"b5paper",
]
i = find_token(document.header, "\\papersize", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\papersize header.")
return
val = get_value(document.header, "\\papersize", i)
if val in defsizes:
# nothing to do
return
document.header[i] = "\\papersize default"
i = find_token(document.header, "\\options", 0)
if i == -1:
i = find_token(document.header, "\\textclass", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\textclass header.")
return
document.header.insert(i, "\\options " + val)
return
document.header[i] = document.header[i] + "," + val
def convert_pagesizes(document):
"Convert to new page sizes in memoir and KOMA to options"
if document.textclass != "memoir" and document.textclass[:3] != "scr":
return
i = find_token(document.header, "\\use_geometry true", 0)
if i != -1:
return
defsizes = [
"default",
"custom",
"letterpaper",
"legalpaper",
"executivepaper",
"a4paper",
"a5paper",
"b5paper",
]
i = find_token(document.header, "\\papersize", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\papersize header.")
return
val = get_value(document.header, "\\papersize", i)
if val in defsizes:
# nothing to do
return
i = find_token(document.header, "\\use_geometry false", 0)
if i != -1:
# Maintain use of geometry
document.header[1] = "\\use_geometry true"
def revert_komafontsizes(document):
"Revert new font sizes in KOMA to options"
if document.textclass[:3] != "scr":
return
i = find_token(document.header, "\\paperfontsize", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\paperfontsize header.")
return
defsizes = ["default", "10", "11", "12"]
val = get_value(document.header, "\\paperfontsize", i)
if val in defsizes:
# nothing to do
return
document.header[i] = "\\paperfontsize default"
fsize = "fontsize=" + val
i = find_token(document.header, "\\options", 0)
if i == -1:
i = find_token(document.header, "\\textclass", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\textclass header.")
return
document.header.insert(i, "\\options " + fsize)
return
document.header[i] = document.header[i] + "," + fsize
def revert_dupqualicites(document):
"Revert qualified citation list commands with duplicate keys to ERT"
# LyX 2.3 only supports qualified citation lists with unique keys. Thus,
# we need to revert those with multiple uses of the same key.
# Get cite engine
engine = "basic"
i = find_token(document.header, "\\cite_engine", 0)
if i == -1:
document.warning("Malformed document! Missing \\cite_engine")
else:
engine = get_value(document.header, "\\cite_engine", i)
if engine not in ["biblatex", "biblatex-natbib"]:
return
# Citation insets that support qualified lists, with their LaTeX code
ql_citations = {
"cite": "cites",
"Cite": "Cites",
"citet": "textcites",
"Citet": "Textcites",
"citep": "parencites",
"Citep": "Parencites",
"Footcite": "Smartcites",
"footcite": "smartcites",
"Autocite": "Autocites",
"autocite": "autocites",
}
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of citation inset at line %d!!" % (i))
i += 1
continue
k = find_token(document.body, "LatexCommand", i, j)
if k == -1:
document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
i = j + 1
continue
cmd = get_value(document.body, "LatexCommand", k)
if cmd not in list(ql_citations.keys()):
i = j + 1
continue
pres = find_token(document.body, "pretextlist", i, j)
posts = find_token(document.body, "posttextlist", i, j)
if pres == -1 and posts == -1:
# nothing to do.
i = j + 1
continue
key = get_quoted_value(document.body, "key", i, j)
if not key:
document.warning("Citation inset at line %d does not have a key!" % (i))
i = j + 1
continue
keys = key.split(",")
ukeys = list(set(keys))
if len(keys) == len(ukeys):
# no duplicates.
i = j + 1
continue
pretexts = get_quoted_value(document.body, "pretextlist", pres)
posttexts = get_quoted_value(document.body, "posttextlist", posts)
pre = get_quoted_value(document.body, "before", i, j)
post = get_quoted_value(document.body, "after", i, j)
prelist = pretexts.split("\t")
premap = dict()
for pp in prelist:
ppp = pp.split(" ", 1)
val = ""
if len(ppp) > 1:
val = ppp[1]
else:
val = ""
if ppp[0] in premap:
premap[ppp[0]] = premap[ppp[0]] + "\t" + val
else:
premap[ppp[0]] = val
postlist = posttexts.split("\t")
postmap = dict()
for pp in postlist:
ppp = pp.split(" ", 1)
val = ""
if len(ppp) > 1:
val = ppp[1]
else:
val = ""
if ppp[0] in postmap:
postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
else:
postmap[ppp[0]] = val
# Replace known new commands with ERT
if "(" in pre or ")" in pre:
pre = "{" + pre + "}"
if "(" in post or ")" in post:
post = "{" + post + "}"
res = "\\" + ql_citations[cmd]
if pre:
res += "(" + pre + ")"
if post:
res += "(" + post + ")"
elif pre:
res += "()"
for kk in keys:
if premap.get(kk, "") != "":
akeys = premap[kk].split("\t", 1)
akey = akeys[0]
if akey != "":
res += "[" + akey + "]"
if len(akeys) > 1:
premap[kk] = "\t".join(akeys[1:])
else:
premap[kk] = ""
if postmap.get(kk, "") != "":
akeys = postmap[kk].split("\t", 1)
akey = akeys[0]
if akey != "":
res += "[" + akey + "]"
if len(akeys) > 1:
postmap[kk] = "\t".join(akeys[1:])
else:
postmap[kk] = ""
elif premap.get(kk, "") != "":
res += "[]"
res += "{" + kk + "}"
document.body[i : j + 1] = put_cmd_in_ert([res])
def convert_pagesizenames(document):
"Convert LyX page sizes names"
i = find_token(document.header, "\\papersize", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\papersize header.")
return
oldnames = [
"letterpaper",
"legalpaper",
"executivepaper",
"a0paper",
"a1paper",
"a2paper",
"a3paper",
"a4paper",
"a5paper",
"a6paper",
"b0paper",
"b1paper",
"b2paper",
"b3paper",
"b4paper",
"b5paper",
"b6paper",
"c0paper",
"c1paper",
"c2paper",
"c3paper",
"c4paper",
"c5paper",
"c6paper",
]
val = get_value(document.header, "\\papersize", i)
if val in oldnames:
newval = val.replace("paper", "")
document.header[i] = "\\papersize " + newval
def revert_pagesizenames(document):
"Convert LyX page sizes names"
i = find_token(document.header, "\\papersize", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\papersize header.")
return
newnames = [
"letter",
"legal",
"executive",
"a0",
"a1",
"a2",
"a3",
"a4",
"a5",
"a6",
"b0",
"b1",
"b2",
"b3",
"b4",
"b5",
"b6",
"c0",
"c1",
"c2",
"c3",
"c4",
"c5",
"c6",
]
val = get_value(document.header, "\\papersize", i)
if val in newnames:
newval = val + "paper"
document.header[i] = "\\papersize " + newval
def revert_theendnotes(document):
"Reverts native support of \\theendnotes to TeX-code"
if (
"endnotes" not in document.get_module_list()
and "foottoend" not in document.get_module_list()
):
return
i = 0
while True:
i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of FloatList inset")
continue
document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
def revert_enotez(document):
"Reverts native support of enotez package to TeX-code"
if (
"enotez" not in document.get_module_list()
and "foottoenotez" not in document.get_module_list()
):
return
use = False
if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
use = True
revert_flex_inset(document, "Endnote", "\\endnote")
i = 0
while True:
i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of FloatList inset")
continue
use = True
document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
if use:
add_to_preamble(document, ["\\usepackage{enotez}"])
document.del_module("enotez")
document.del_module("foottoenotez")
def revert_memoir_endnotes(document):
"Reverts native support of memoir endnotes to TeX-code"
if document.textclass != "memoir":
return
encommand = "\\pagenote"
modules = document.get_module_list()
if (
"enotez" in modules
or "foottoenotez" in modules
or "endnotes" in modules
or "foottoend" in modules
):
encommand = "\\endnote"
revert_flex_inset(document, "Endnote", encommand)
i = 0
while True:
i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of FloatList inset")
continue
if document.body[i] == "\\begin_inset FloatList pagenote*":
document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
else:
document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
add_to_preamble(document, ["\\makepagenote"])
def revert_totalheight(document):
"Reverts graphics height parameter from totalheight to height"
relative_heights = {
"\\textwidth": "text%",
"\\columnwidth": "col%",
"\\paperwidth": "page%",
"\\linewidth": "line%",
"\\textheight": "theight%",
"\\paperheight": "pheight%",
"\\baselineskip ": "baselineskip%",
}
i = 0
while True:
i = find_token(document.body, "\\begin_inset Graphics", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of graphics inset at line %d!!" % (i))
i += 1
continue
rx = re.compile(r"\s*special\s*(\S+)$")
rxx = re.compile(r"(\d*\.*\d+)(\S+)$")
k = find_re(document.body, rx, i, j)
special = ""
oldheight = ""
if k != -1:
m = rx.match(document.body[k])
if m:
special = m.group(1)
mspecial = special.split(",")
for spc in mspecial:
if spc.startswith("height="):
oldheight = spc.split("=")[1]
ms = rxx.search(oldheight)
if ms:
oldunit = ms.group(2)
if oldunit in list(relative_heights.keys()):
oldval = str(float(ms.group(1)) * 100)
oldunit = relative_heights[oldunit]
oldheight = oldval + oldunit
mspecial.remove(spc)
break
if len(mspecial) > 0:
special = ",".join(mspecial)
else:
special = ""
rx = re.compile(r"(\s*height\s*)(\S+)$")
kk = find_re(document.body, rx, i, j)
if kk != -1:
m = rx.match(document.body[kk])
val = ""
if m:
val = m.group(2)
if k != -1:
if special != "":
val = val + "," + special
document.body[k] = "\tspecial " + "totalheight=" + val
else:
document.body.insert(kk, "\tspecial totalheight=" + val)
if oldheight != "":
document.body[kk] = m.group(1) + oldheight
else:
del document.body[kk]
elif oldheight != "":
if special != "":
document.body[k] = "\tspecial " + special
document.body.insert(k, "\theight " + oldheight)
else:
document.body[k] = "\theight " + oldheight
i = j + 1
def convert_totalheight(document):
"Converts graphics height parameter from totalheight to height"
relative_heights = {
"text%": "\\textwidth",
"col%": "\\columnwidth",
"page%": "\\paperwidth",
"line%": "\\linewidth",
"theight%": "\\textheight",
"pheight%": "\\paperheight",
"baselineskip%": "\\baselineskip",
}
i = 0
while True:
i = find_token(document.body, "\\begin_inset Graphics", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of graphics inset at line %d!!" % (i))
i += 1
continue
rx = re.compile(r"\s*special\s*(\S+)$")
k = find_re(document.body, rx, i, j)
special = ""
newheight = ""
if k != -1:
m = rx.match(document.body[k])
if m:
special = m.group(1)
mspecial = special.split(",")
for spc in mspecial:
if spc[:12] == "totalheight=":
newheight = spc.split("=")[1]
mspecial.remove(spc)
break
if len(mspecial) > 0:
special = ",".join(mspecial)
else:
special = ""
rx = re.compile(r"(\s*height\s*)(\d+\.?\d*)(\S+)$")
kk = find_re(document.body, rx, i, j)
if kk != -1:
m = rx.match(document.body[kk])
val = ""
if m:
val = m.group(2)
unit = m.group(3)
if unit in list(relative_heights.keys()):
val = str(float(val) / 100)
unit = relative_heights[unit]
if k != -1:
if special != "":
val = val + unit + "," + special
document.body[k] = "\tspecial " + "height=" + val
else:
document.body.insert(kk + 1, "\tspecial height=" + val + unit)
if newheight != "":
document.body[kk] = m.group(1) + newheight
else:
del document.body[kk]
elif newheight != "":
document.body.insert(k, "\theight " + newheight)
i = j + 1
def convert_changebars(document):
"Converts the changebars module to native solution"
if "changebars" not in document.get_module_list():
return
i = find_token(document.header, "\\output_changes", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\output_changes header.")
document.del_module("changebars")
return
document.header.insert(i, "\\change_bars true")
document.del_module("changebars")
def revert_changebars(document):
"Converts native changebar param to module"
i = find_token(document.header, "\\change_bars", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\change_bars header.")
return
val = get_value(document.header, "\\change_bars", i)
if val == "true":
document.add_module("changebars")
del document.header[i]
def convert_postpone_fragile(document):
"Adds false \\postpone_fragile_content buffer param"
i = find_token(document.header, "\\output_changes", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\output_changes header.")
return
# Set this to false for old documents (see #2154)
document.header.insert(i, "\\postpone_fragile_content false")
def revert_postpone_fragile(document):
"Remove \\postpone_fragile_content buffer param"
i = find_token(document.header, "\\postpone_fragile_content", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
return
del document.header[i]
def revert_colrow_tracking(document):
"Remove change tag from tabular columns/rows"
i = 0
while True:
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of tabular.")
continue
for k in range(i, j):
m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
if m:
document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', "")
m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
if m:
document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', "")
def convert_counter_maintenance(document):
"Convert \\maintain_unincluded_children buffer param from boolean value tro tristate"
i = find_token(document.header, "\\maintain_unincluded_children", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
return
val = get_value(document.header, "\\maintain_unincluded_children", i)
if val == "true":
document.header[i] = "\\maintain_unincluded_children strict"
else:
document.header[i] = "\\maintain_unincluded_children no"
def revert_counter_maintenance(document):
"Revert \\maintain_unincluded_children buffer param to previous boolean value"
i = find_token(document.header, "\\maintain_unincluded_children", 0)
if i == -1:
document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
return
val = get_value(document.header, "\\maintain_unincluded_children", i)
if val == "no":
document.header[i] = "\\maintain_unincluded_children false"
else:
document.header[i] = "\\maintain_unincluded_children true"
def revert_counter_inset(document):
"Revert counter inset to ERT, where possible"
i = 0
needed_counters = {}
while True:
i = find_token(document.body, "\\begin_inset CommandInset counter", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of counter inset at line %d!" % i)
i += 1
continue
lyx = get_quoted_value(document.body, "lyxonly", i, j)
if lyx == "true":
# there is nothing we can do to affect the LyX counters
document.body[i : j + 1] = []
i = j + 1
continue
cnt = get_quoted_value(document.body, "counter", i, j)
if not cnt:
document.warning("No counter given for inset at line %d!" % i)
i = j + 1
continue
cmd = get_quoted_value(document.body, "LatexCommand", i, j)
document.warning(cmd)
ert = ""
if cmd == "set":
val = get_quoted_value(document.body, "value", i, j)
if not val:
document.warning("Can't convert counter inset at line %d!" % i)
else:
ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{{val}}}")
elif cmd == "addto":
val = get_quoted_value(document.body, "value", i, j)
if not val:
document.warning("Can't convert counter inset at line %d!" % i)
else:
ert = put_cmd_in_ert(f"\\addtocounter{{{cnt}}}{{{val}}}")
elif cmd == "reset":
ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
elif cmd == "save":
needed_counters[cnt] = 1
savecnt = "LyXSave" + cnt
ert = put_cmd_in_ert(f"\\setcounter{{{savecnt}}}{{\\value{{{cnt}}}}}")
elif cmd == "restore":
needed_counters[cnt] = 1
savecnt = "LyXSave" + cnt
ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{\\value{{{savecnt}}}}}")
else:
document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
if ert:
document.body[i : j + 1] = ert
i += 1
continue
pretext = []
for cnt in needed_counters:
pretext.append("\\newcounter{LyXSave%s}" % (cnt))
if pretext:
add_to_preamble(document, pretext)
def revert_ams_spaces(document):
"Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
Found = False
insets = ["\\medspace{}", "\\thickspace{}"]
for inset in insets:
i = 0
i = find_token(document.body, "\\begin_inset space " + inset, i)
if i == -1:
continue
end = find_end_of_inset(document.body, i)
subst = put_cmd_in_ert(inset)
document.body[i : end + 1] = subst
Found = True
if Found == True:
# load amsmath in the preamble if not already loaded
i = find_token(document.header, "\\use_package amsmath 2", 0)
if i == -1:
add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
return
def convert_parskip(document):
"Move old parskip settings to preamble"
i = find_token(document.header, "\\paragraph_separation skip", 0)
if i == -1:
return
j = find_token(document.header, "\\defskip", 0)
if j == -1:
document.warning("Malformed LyX document! Missing \\defskip.")
return
val = get_value(document.header, "\\defskip", j)
skipval = "\\medskipamount"
if val == "smallskip" or val == "medskip" or val == "bigskip":
skipval = "\\" + val + "amount"
else:
skipval = val
add_to_preamble(
document,
["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"],
)
document.header[i] = "\\paragraph_separation indent"
document.header[j] = "\\paragraph_indentation default"
def revert_parskip(document):
"Revert new parskip settings to preamble"
i = find_token(document.header, "\\paragraph_separation skip", 0)
if i == -1:
return
j = find_token(document.header, "\\defskip", 0)
if j == -1:
document.warning("Malformed LyX document! Missing \\defskip.")
return
val = get_value(document.header, "\\defskip", j)
skipval = ""
if val == "smallskip" or val == "medskip" or val == "bigskip":
skipval = "[skip=\\" + val + "amount]"
elif val == "fullline":
skipval = "[skip=\\baselineskip]"
elif val != "halfline":
skipval = "[skip={" + val + "}]"
add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
document.header[i] = "\\paragraph_separation indent"
document.header[j] = "\\paragraph_indentation default"
def revert_line_vspaces(document):
"Revert fulline and halfline vspaces to TeX"
insets = {
"fullline*": "\\vspace*{\\baselineskip}",
"fullline": "\\vspace{\\baselineskip}",
"halfline*": "\\vspace*{0.5\\baselineskip}",
"halfline": "\\vspace{0.5\\baselineskip}",
}
for inset in insets.keys():
i = 0
i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
if i == -1:
continue
end = find_end_of_inset(document.body, i)
subst = put_cmd_in_ert(insets[inset])
document.body[i : end + 1] = subst
def convert_libertinus_rm_fonts(document):
"""Handle Libertinus serif fonts definition to LaTeX"""
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(["Libertinus"])
convert_fonts(document, fm)
def revert_libertinus_rm_fonts(document):
"""Revert Libertinus serif font definition to LaTeX"""
if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(["libertinus"])
if revert_fonts(document, fm, fontmap):
add_preamble_fonts(document, fontmap)
def revert_libertinus_sftt_fonts(document):
"Revert Libertinus sans and tt font definitions to LaTeX"
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
# first sf font
i = find_token(document.header, '\\font_sans "LibertinusSans-LF"', 0)
if i != -1:
j = find_token(document.header, "\\font_sans_osf true", 0)
if j != -1:
add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
document.header[j] = "\\font_sans_osf false"
else:
add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
sf_scale = 100.0
sfval = find_token(document.header, "\\font_sf_scale", 0)
if sfval == -1:
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
else:
sfscale = document.header[sfval].split()
val = sfscale[1]
sfscale[1] = "100"
document.header[sfval] = " ".join(sfscale)
try:
# float() can throw
sf_scale = float(val)
except:
document.warning("Invalid font_sf_scale value: " + val)
if sf_scale != "100.0":
add_to_preamble(
document,
[
"\\renewcommand*{\\LibertinusSans@scale}{"
+ str(sf_scale / 100.0)
+ "}"
],
)
# now tt font
i = find_token(document.header, '\\font_typewriter "LibertinusMono-TLF"', 0)
if i != -1:
add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
tt_scale = 100.0
ttval = find_token(document.header, "\\font_tt_scale", 0)
if ttval == -1:
document.warning("Malformed LyX document: Missing \\font_tt_scale.")
else:
ttscale = document.header[ttval].split()
val = ttscale[1]
ttscale[1] = "100"
document.header[ttval] = " ".join(ttscale)
try:
# float() can throw
tt_scale = float(val)
except:
document.warning("Invalid font_tt_scale value: " + val)
if tt_scale != "100.0":
add_to_preamble(
document,
[
"\\renewcommand*{\\LibertinusMono@scale}{"
+ str(tt_scale / 100.0)
+ "}"
],
)
def revert_docbook_table_output(document):
i = find_token(document.header, "\\docbook_table_output")
if i != -1:
del document.header[i]
def revert_nopagebreak(document):
while True:
i = find_token(document.body, "\\begin_inset Newpage nopagebreak")
if i == -1:
return
end = find_end_of_inset(document.body, i)
if end == 1:
document.warning("Malformed LyX document: Could not find end of Newpage inset.")
continue
subst = put_cmd_in_ert("\\nopagebreak{}")
document.body[i : end + 1] = subst
def revert_hrquotes(document):
"Revert Hungarian Quotation marks"
i = find_token(document.header, "\\quotes_style hungarian", 0)
if i != -1:
document.header[i] = "\\quotes_style polish"
i = 0
while True:
i = find_token(document.body, "\\begin_inset Quotes h")
if i == -1:
return
if document.body[i] == "\\begin_inset Quotes hld":
document.body[i] = "\\begin_inset Quotes pld"
elif document.body[i] == "\\begin_inset Quotes hrd":
document.body[i] = "\\begin_inset Quotes prd"
elif document.body[i] == "\\begin_inset Quotes hls":
document.body[i] = "\\begin_inset Quotes ald"
elif document.body[i] == "\\begin_inset Quotes hrs":
document.body[i] = "\\begin_inset Quotes ard"
def convert_math_refs(document):
i = 0
while True:
i = find_token(document.body, "\\begin_inset Formula", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of inset at line %d of body!" % i)
i += 1
continue
while i < j:
document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
i += 1
def revert_math_refs(document):
i = 0
while True:
i = find_token(document.body, "\\begin_inset Formula", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of inset at line %d of body!" % i)
i += 1
continue
while i < j:
document.body[i] = document.body[i].replace("\\formatted", "\\prettyref")
if "\\labelonly" in document.body[i]:
document.body[i] = re.sub("\\\\labelonly{([^}]+?)}", "\\1", document.body[i])
i += 1
def convert_branch_colors(document):
"Convert branch colors to semantic values"
i = 0
while True:
i = find_token(document.header, "\\branch", i)
if i == -1:
break
j = find_token(document.header, "\\end_branch", i)
if j == -1:
document.warning("Malformed LyX document. Can't find end of branch definition!")
break
# We only support the standard LyX background for now
k = find_token(document.header, "\\color #faf0e6", i, j)
if k != -1:
document.header[k] = "\\color background"
i += 1
def revert_branch_colors(document):
"Revert semantic branch colors"
i = 0
while True:
i = find_token(document.header, "\\branch", i)
if i == -1:
break
j = find_token(document.header, "\\end_branch", i)
if j == -1:
document.warning("Malformed LyX document. Can't find end of branch definition!")
break
k = find_token(document.header, "\\color", i, j)
if k != -1:
bcolor = get_value(document.header, "\\color", k)
if bcolor[1] != "#":
# this will be read as background by LyX 2.3
document.header[k] = "\\color none"
i += 1
def revert_darkmode_graphics(document):
"Revert darkModeSensitive InsetGraphics param"
i = 0
while True:
i = find_token(document.body, "\\begin_inset Graphics", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of graphics inset at line %d!!" % (i))
i += 1
continue
k = find_token(document.body, "\tdarkModeSensitive", i, j)
if k != -1:
del document.body[k]
i += 1
def revert_branch_darkcols(document):
"Revert dark branch colors"
i = 0
while True:
i = find_token(document.header, "\\branch", i)
if i == -1:
break
j = find_token(document.header, "\\end_branch", i)
if j == -1:
document.warning("Malformed LyX document. Can't find end of branch definition!")
break
k = find_token(document.header, "\\color", i, j)
if k != -1:
m = re.search("\\\\color (\\S+) (\\S+)", document.header[k])
if m:
document.header[k] = "\\color " + m.group(1)
i += 1
def revert_vcolumns2(document):
"""Revert varwidth columns with line breaks etc."""
i = 0
needvarwidth = False
needarray = False
needcellvarwidth = False
try:
while True:
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Could not find end of tabular.")
continue
# Collect necessary column information
m = i + 1
nrows = int(document.body[i + 1].split('"')[3])
ncols = int(document.body[i + 1].split('"')[5])
col_info = []
for k in range(ncols):
m = find_token(document.body, "<column", m)
width = get_option_value(document.body[m], "width")
varwidth = get_option_value(document.body[m], "varwidth")
alignment = get_option_value(document.body[m], "alignment")
valignment = get_option_value(document.body[m], "valignment")
special = get_option_value(document.body[m], "special")
col_info.append([width, varwidth, alignment, valignment, special, m])
m += 1
# Now parse cells
m = i + 1
for row in range(nrows):
for col in range(ncols):
m = find_token(document.body, "<cell", m)
multicolumn = get_option_value(document.body[m], "multicolumn") != ""
multirow = get_option_value(document.body[m], "multirow") != ""
fixedwidth = get_option_value(document.body[m], "width") != ""
rotate = get_option_value(document.body[m], "rotate")
cellalign = get_option_value(document.body[m], "alignment")
cellvalign = get_option_value(document.body[m], "valignment")
# Check for: linebreaks, multipars, non-standard environments
begcell = m
endcell = find_token(document.body, "</cell>", begcell)
vcand = False
if (
find_token(document.body, "\\begin_inset Newline", begcell, endcell)
!= -1
):
vcand = not fixedwidth
elif count_pars_in_inset(document.body, begcell + 2) > 1:
vcand = not fixedwidth
elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
vcand = not fixedwidth
colalignment = col_info[col][2]
colvalignment = col_info[col][3]
if vcand:
if rotate == "" and (
(colalignment == "left" and colvalignment == "top")
or (
multicolumn == True
and cellalign == "left"
and cellvalign == "top"
)
):
if (
col_info[col][0] == ""
and col_info[col][1] == ""
and col_info[col][4] == ""
):
needvarwidth = True
col_line = col_info[col][5]
needarray = True
vval = "V{\\linewidth}"
if multicolumn:
document.body[m] = (
document.body[m][:-1] + ' special="' + vval + '">'
)
else:
document.body[col_line] = (
document.body[col_line][:-1]
+ ' special="'
+ vval
+ '">'
)
else:
alarg = ""
if multicolumn or multirow:
if cellvalign == "middle":
alarg = "[m]"
elif cellvalign == "bottom":
alarg = "[b]"
else:
if colvalignment == "middle":
alarg = "[m]"
elif colvalignment == "bottom":
alarg = "[b]"
flt = find_token(document.body, "\\begin_layout", begcell, endcell)
elt = find_token_backwards(document.body, "\\end_layout", endcell)
if flt != -1 and elt != -1:
extralines = []
# we need to reset character layouts if necessary
el = find_token(document.body, "\\emph on", flt, elt)
if el != -1:
extralines.append("\\emph default")
el = find_token(document.body, "\\noun on", flt, elt)
if el != -1:
extralines.append("\\noun default")
el = find_token(document.body, "\\series", flt, elt)
if el != -1:
extralines.append("\\series default")
el = find_token(document.body, "\\family", flt, elt)
if el != -1:
extralines.append("\\family default")
el = find_token(document.body, "\\shape", flt, elt)
if el != -1:
extralines.append("\\shape default")
el = find_token(document.body, "\\color", flt, elt)
if el != -1:
extralines.append("\\color inherit")
el = find_token(document.body, "\\size", flt, elt)
if el != -1:
extralines.append("\\size default")
el = find_token(document.body, "\\bar under", flt, elt)
if el != -1:
extralines.append("\\bar default")
el = find_token(document.body, "\\uuline on", flt, elt)
if el != -1:
extralines.append("\\uuline default")
el = find_token(document.body, "\\uwave on", flt, elt)
if el != -1:
extralines.append("\\uwave default")
el = find_token(document.body, "\\strikeout on", flt, elt)
if el != -1:
extralines.append("\\strikeout default")
document.body[elt : elt + 1] = (
extralines
+ put_cmd_in_ert("\\end{cellvarwidth}")
+ [r"\end_layout"]
)
parlang = -1
for q in range(flt, elt):
if document.body[q] != "" and document.body[q][0] != "\\":
break
if document.body[q][:5] == "\\lang":
parlang = q
break
if parlang != -1:
document.body[parlang + 1 : parlang + 1] = put_cmd_in_ert(
"\\begin{cellvarwidth}" + alarg
)
else:
document.body[flt + 1 : flt + 1] = put_cmd_in_ert(
"\\begin{cellvarwidth}" + alarg
)
needcellvarwidth = True
needvarwidth = True
# ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
# with newlines, and we do not want that)
while True:
endcell = find_token(document.body, "</cell>", begcell)
linebreak = False
nl = find_token(
document.body,
"\\begin_inset Newline newline",
begcell,
endcell,
)
if nl == -1:
nl = find_token(
document.body,
"\\begin_inset Newline linebreak",
begcell,
endcell,
)
if nl == -1:
break
linebreak = True
nle = find_end_of_inset(document.body, nl)
del document.body[nle : nle + 1]
if linebreak:
document.body[nl : nl + 1] = put_cmd_in_ert("\\linebreak{}")
else:
document.body[nl : nl + 1] = put_cmd_in_ert("\\\\")
# Replace parbreaks in multirow with \\endgraf
if multirow == True:
flt = find_token(document.body, "\\begin_layout", begcell, endcell)
if flt != -1:
while True:
elt = find_end_of_layout(document.body, flt)
if elt == -1:
document.warning(
"Malformed LyX document! Missing layout end."
)
break
endcell = find_token(document.body, "</cell>", begcell)
flt = find_token(
document.body, "\\begin_layout", elt, endcell
)
if flt == -1:
break
document.body[elt : flt + 1] = put_cmd_in_ert("\\endgraf{}")
m += 1
i = j
finally:
if needarray == True:
add_to_preamble(document, ["\\usepackage{array}"])
if needcellvarwidth == True:
add_to_preamble(
document,
[
"%% Variable width box for table cells",
"\\newenvironment{cellvarwidth}[1][t]",
" {\\begin{varwidth}[#1]{\\linewidth}}",
" {\\@finalstrut\\@arstrutbox\\end{varwidth}}",
],
)
if needvarwidth == True:
add_to_preamble(document, ["\\usepackage{varwidth}"])
def convert_vcolumns2(document):
"""Convert varwidth ERT to native"""
i = 0
try:
while True:
i = find_token(document.body, "\\begin_inset Tabular", i + 1)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Could not find end of tabular.")
continue
# Parse cells
nrows = int(document.body[i + 1].split('"')[3])
ncols = int(document.body[i + 1].split('"')[5])
m = i + 1
lines = []
for row in range(nrows):
for col in range(ncols):
m = find_token(document.body, "<cell", m)
multirow = get_option_value(document.body[m], "multirow") != ""
begcell = m
endcell = find_token(document.body, "</cell>", begcell)
vcand = False
cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
if cvw != -1:
vcand = (
document.body[cvw - 1] == "\\backslash"
and get_containing_inset(document.body, cvw)[0] == "ERT"
)
if vcand:
# Remove ERTs with cellvarwidth env
ecvw = find_token(document.body, "end{cellvarwidth}", begcell, endcell)
if ecvw != -1:
if document.body[ecvw - 1] == "\\backslash":
eertins = get_containing_inset(document.body, ecvw)
if eertins and eertins[0] == "ERT":
del document.body[eertins[1] : eertins[2] + 1]
cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
ertins = get_containing_inset(document.body, cvw)
if ertins and ertins[0] == "ERT":
del document.body[ertins[1] : ertins[2] + 1]
# Convert ERT newlines (as cellvarwidth detection relies on that)
while True:
endcell = find_token(document.body, "</cell>", begcell)
nl = find_token(document.body, "\\backslash", begcell, endcell)
if nl == -1 or document.body[nl + 2] != "\\backslash":
break
ertins = get_containing_inset(document.body, nl)
if ertins and ertins[0] == "ERT":
document.body[ertins[1] : ertins[2] + 1] = [
"\\begin_inset Newline newline",
"",
"\\end_inset",
]
# Same for linebreaks
while True:
endcell = find_token(document.body, "</cell>", begcell)
nl = find_token(document.body, "linebreak", begcell, endcell)
if nl == -1 or document.body[nl - 1] != "\\backslash":
break
ertins = get_containing_inset(document.body, nl)
if ertins and ertins[0] == "ERT":
document.body[ertins[1] : ertins[2] + 1] = [
"\\begin_inset Newline linebreak",
"",
"\\end_inset",
]
# And \\endgraf
if multirow == True:
endcell = find_token(document.body, "</cell>", begcell)
nl = find_token(document.body, "endgraf{}", begcell, endcell)
if nl == -1 or document.body[nl - 1] != "\\backslash":
break
ertins = get_containing_inset(document.body, nl)
if ertins and ertins[0] == "ERT":
document.body[ertins[1] : ertins[2] + 1] = [
"\\end_layout",
"",
"\\begin_layout Plain Layout",
]
m += 1
i += 1
finally:
del_complete_lines(
document.preamble,
[
"% Added by lyx2lyx",
"%% Variable width box for table cells",
r"\newenvironment{cellvarwidth}[1][t]",
r" {\begin{varwidth}[#1]{\linewidth}}",
r" {\@finalstrut\@arstrutbox\end{varwidth}}",
],
)
del_complete_lines(document.preamble, ["% Added by lyx2lyx", r"\usepackage{varwidth}"])
frontispiece_def = [
r"### Inserted by lyx2lyx (frontispiece layout) ###",
r"Style Frontispiece",
r" CopyStyle Titlehead",
r" LatexName frontispiece",
r"End",
]
def convert_koma_frontispiece(document):
"""Remove local KOMA frontispiece definition"""
if document.textclass[:3] != "scr":
return
if document.del_local_layout(frontispiece_def):
document.add_module("ruby")
def revert_koma_frontispiece(document):
"""Add local KOMA frontispiece definition"""
if document.textclass[:3] != "scr":
return
if find_token(document.body, "\\begin_layout Frontispiece", 0) != -1:
document.append_local_layout(frontispiece_def)
def revert_spellchecker_ignore(document):
"""Revert document spellchecker dictionary"""
while True:
i = find_token(document.header, "\\spellchecker_ignore")
if i == -1:
return
del document.header[i]
def revert_docbook_mathml_prefix(document):
"""Revert the DocBook parameter to choose the prefix for the MathML name space"""
while True:
i = find_token(document.header, "\\docbook_mathml_prefix")
if i == -1:
return
del document.header[i]
def revert_document_metadata(document):
"""Revert document metadata"""
i = 0
while True:
i = find_token(document.header, "\\begin_metadata", i)
if i == -1:
return
j = find_end_of(document.header, i, "\\begin_metadata", "\\end_metadata")
if j == -1:
# this should not happen
break
document.header[i : j + 1] = []
def revert_index_macros(document):
"Revert inset index macros"
i = 0
while True:
# trailing blank needed here to exclude IndexMacro insets
i = find_token(document.body, "\\begin_inset Index ", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of index inset at line %d" % i
)
continue
pl = find_token(document.body, "\\begin_layout Plain Layout", i, j)
if pl == -1:
document.warning(
"Malformed LyX document: Can't find plain layout in index inset at line %d" % i
)
continue
# find, store and remove inset params
pr = find_token(document.body, "range", i, pl)
prval = get_quoted_value(document.body, "range", pr)
pagerange = ""
if prval == "start":
pagerange = "("
elif prval == "end":
pagerange = ")"
pf = find_token(document.body, "pageformat", i, pl)
pageformat = get_quoted_value(document.body, "pageformat", pf)
del document.body[pr : pf + 1]
# Now re-find (potentially moved) inset end again, and search for subinsets
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of index inset at line %d" % i
)
continue
# We search for all possible subentries in turn, store their
# content and delete them
see = []
seealso = []
subentry = []
subentry2 = []
sortkey = []
# Two subentries are allowed, thus the duplication
imacros = ["seealso", "see", "subentry", "subentry", "sortkey"]
for imacro in imacros:
iim = find_token(document.body, "\\begin_inset IndexMacro %s" % imacro, i, j)
if iim == -1:
continue
iime = find_end_of_inset(document.body, iim)
if iime == -1:
document.warning(
"Malformed LyX document: Can't find end of index macro inset at line %d" % i
)
continue
iimpl = find_token(document.body, "\\begin_layout Plain Layout", iim, iime)
if iimpl == -1:
document.warning(
"Malformed LyX document: Can't find plain layout in index macro inset at line %d"
% i
)
continue
iimple = find_end_of_layout(document.body, iimpl)
if iimple == -1:
document.warning(
"Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
% i
)
continue
icont = document.body[iimpl:iimple]
if imacro == "seealso":
seealso = icont[1:]
elif imacro == "see":
see = icont[1:]
elif imacro == "subentry":
# subentries might hace their own sortkey!
xiim = find_token(
document.body, "\\begin_inset IndexMacro sortkey", iimpl, iimple
)
if xiim != -1:
xiime = find_end_of_inset(document.body, xiim)
if xiime == -1:
document.warning(
"Malformed LyX document: Can't find end of index macro inset at line %d"
% i
)
else:
xiimpl = find_token(
document.body, "\\begin_layout Plain Layout", xiim, xiime
)
if xiimpl == -1:
document.warning(
"Malformed LyX document: Can't find plain layout in index macro inset at line %d"
% i
)
else:
xiimple = find_end_of_layout(document.body, xiimpl)
if xiimple == -1:
document.warning(
"Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
% i
)
else:
# the sortkey
xicont = document.body[xiimpl + 1 : xiimple]
# everything before ................... or after
xxicont = (
document.body[iimpl + 1 : xiim]
+ document.body[xiime + 1 : iimple]
)
# construct the latex sequence
icont = xicont + put_cmd_in_ert("@") + xxicont[1:]
if len(subentry) > 0:
if icont[0] == "\\begin_layout Plain Layout":
subentry2 = icont[1:]
else:
subentry2 = icont
else:
if icont[0] == "\\begin_layout Plain Layout":
subentry = icont[1:]
else:
subentry = icont
elif imacro == "sortkey":
sortkey = icont
# Everything stored. Delete subinset.
del document.body[iim : iime + 1]
# Again re-find (potentially moved) index inset end
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of index inset at line %d" % i
)
continue
# Now insert all stuff, starting from the inset end
pl = find_token(document.body, "\\begin_layout Plain Layout", i, j)
if pl == -1:
document.warning(
"Malformed LyX document: Can't find plain layout in index inset at line %d" % i
)
continue
ple = find_end_of_layout(document.body, pl)
if ple == -1:
document.warning(
"Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
% i
)
continue
if len(see) > 0:
document.body[ple:ple] = (
put_cmd_in_ert("|" + pagerange + "see{") + see + put_cmd_in_ert("}")
)
elif len(seealso) > 0:
document.body[ple:ple] = (
put_cmd_in_ert("|" + pagerange + "seealso{") + seealso + put_cmd_in_ert("}")
)
elif pageformat != "default":
document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + pageformat)
if len(subentry2) > 0:
document.body[ple:ple] = put_cmd_in_ert("!") + subentry2
if len(subentry) > 0:
document.body[ple:ple] = put_cmd_in_ert("!") + subentry
if len(sortkey) > 0:
document.body[pl : pl + 1] = document.body[pl:pl] + sortkey + put_cmd_in_ert("@")
def revert_starred_refs(document):
"Revert starred refs"
i = find_token(document.header, "\\use_hyperref true", 0)
use_hyperref = i != -1
i = 0
in_inset = False
cmd = ref = ""
nolink = False
nolinkline = -1
while True:
if not in_inset:
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
if i == -1:
break
start = i
end = find_end_of_inset(document.body, i)
if end == -1:
document.warning(
"Malformed LyX document: Can't find end of inset at line %d" % i
)
i += 1
continue
# If we are not using hyperref, then we just need to delete the line
if not use_hyperref:
k = find_token(document.body, "nolink", i, end)
if k == -1:
i = end
continue
del document.body[k]
i = end - 1
continue
# If we are using hyperref, then we'll need to do more.
in_inset = True
i += 1
continue
# so we are in an InsetRef
if i == end:
in_inset = False
# If nolink is False, just remove that line
if nolink == False or cmd == "formatted" or cmd == "labelonly":
# document.warning("Skipping " + cmd + " " + ref)
if nolinkline != -1:
del document.body[nolinkline]
nolinkline = -1
continue
# We need to construct a new command and put it in ERT
newcmd = "\\" + cmd + "*{" + ref + "}"
# document.warning(newcmd)
newlines = put_cmd_in_ert(newcmd)
document.body[start : end + 1] = newlines
i += len(newlines) - (end - start) + 1
# reset variables
cmd = ref = ""
nolink = False
nolinkline = -1
continue
l = document.body[i]
if l.startswith("LatexCommand"):
cmd = l[13:]
elif l.startswith("reference"):
ref = l[11:-1]
elif l.startswith("nolink"):
tmp = l[8:-1]
nolink = tmp == "true"
nolinkline = i
i += 1
def convert_starred_refs(document):
"Convert starred refs"
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
if i == -1:
break
end = find_end_of_inset(document.body, i)
if end == -1:
document.warning("Malformed LyX document: Can't find end of inset at line %d" % i)
i += 1
continue
newlineat = end - 1
document.body.insert(newlineat, 'nolink "false"')
i = end + 1
def revert_familydefault(document):
"Revert \\font_default_family for non-TeX fonts"
if find_token(document.header, "\\use_non_tex_fonts true", 0) == -1:
return
i = find_token(document.header, "\\font_default_family", 0)
if i == -1:
document.warning("Malformed LyX document: Can't find \\font_default_family header")
return
dfamily = get_value(document.header, "\\font_default_family", i)
if dfamily == "default":
return
document.header[i] = "\\font_default_family default"
add_to_preamble(document, ["\\renewcommand{\\familydefault}{\\" + dfamily + "}"])
def convert_hyper_other(document):
'Classify "run:" links as other'
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset href", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Cannot find end of inset at line " << str(i))
i += 1
continue
k = find_token(document.body, 'type "', i, j)
if k != -1:
# not a "Web" type. Continue.
i = j
continue
t = find_token(document.body, "target", i, j)
if t == -1:
document.warning("Malformed hyperlink inset at line " + str(i))
i = j
continue
if document.body[t][8:12] == "run:":
document.body.insert(t, 'type "other"')
i += 1
def revert_hyper_other(document):
'Revert other link type to ERT and "run:" to Web'
i = 0
while True:
i = find_token(document.body, "\\begin_inset CommandInset href", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Cannot find end of inset at line " << str(i))
i += 1
continue
k = find_token(document.body, 'type "other"', i, j)
if k == -1:
i = j
continue
# build command
n = find_token(document.body, "name", i, j)
t = find_token(document.body, "target", i, j)
if n == -1 or t == -1:
document.warning("Malformed hyperlink inset at line " + str(i))
i = j
continue
name = document.body[n][6:-1]
target = document.body[t][8:-1]
if target[:4] == "run:":
del document.body[k]
else:
cmd = r"\href{" + target + "}{" + name + "}"
ecmd = put_cmd_in_ert(cmd)
document.body[i : j + 1] = ecmd
i += 1
ack_layouts_new = {
"aa": "Acknowledgments",
"aapaper": "Acknowledgments",
"aastex": "Acknowledgments",
"aastex62": "Acknowledgments",
"achemso": "Acknowledgments",
"acmart": "Acknowledgments",
"AEA": "Acknowledgments",
"apa": "Acknowledgments",
"copernicus": "Acknowledgments",
"egs": "Acknowledgments", # + Acknowledgment
"elsart": "Acknowledgment",
"isprs": "Acknowledgments",
"iucr": "Acknowledgments",
"kluwer": "Acknowledgments",
"svglobal3": "Acknowledgments",
"svglobal": "Acknowledgment",
"svjog": "Acknowledgment",
"svmono": "Acknowledgment",
"svmult": "Acknowledgment",
"svprobth": "Acknowledgment",
}
ack_layouts_old = {
"aa": "Acknowledgement",
"aapaper": "Acknowledgement",
"aastex": "Acknowledgement",
"aastex62": "Acknowledgement",
"achemso": "Acknowledgement",
"acmart": "Acknowledgements",
"AEA": "Acknowledgement",
"apa": "Acknowledgements",
"copernicus": "Acknowledgements",
"egs": "Acknowledgements", # + Acknowledgement
"elsart": "Acknowledegment",
"isprs": "Acknowledgements",
"iucr": "Acknowledgements",
"kluwer": "Acknowledgements",
"svglobal3": "Acknowledgements",
"svglobal": "Acknowledgement",
"svjog": "Acknowledgement",
"svmono": "Acknowledgement",
"svmult": "Acknowledgement",
"svprobth": "Acknowledgement",
}
def convert_acknowledgment(document):
"Fix spelling of acknowledgment styles"
if document.textclass not in list(ack_layouts_old.keys()):
return
i = 0
while True:
i = find_token(
document.body, "\\begin_layout " + ack_layouts_old[document.textclass], i
)
if i == -1:
break
document.body[i] = "\\begin_layout " + ack_layouts_new[document.textclass]
if document.textclass != "egs":
return
# egs has two styles
i = 0
while True:
i = find_token(document.body, "\\begin_layout Acknowledgement", i)
if i == -1:
break
document.body[i] = "\\begin_layout Acknowledgment"
def revert_acknowledgment(document):
"Restore old spelling of acknowledgment styles"
if document.textclass not in list(ack_layouts_new.keys()):
return
i = 0
while True:
i = find_token(
document.body, "\\begin_layout " + ack_layouts_new[document.textclass], i
)
if i == -1:
break
document.body[i] = "\\begin_layout " + ack_layouts_old[document.textclass]
if document.textclass != "egs":
return
# egs has two styles
i = 0
while True:
i = find_token(document.body, "\\begin_layout Acknowledgment", i)
if i == -1:
break
document.body[i] = "\\begin_layout Acknowledgement"
ack_theorem_def = [
r"### Inserted by lyx2lyx (ams extended theorems) ###",
r"### This requires theorems-ams-extended module to be loaded",
r"Style Acknowledgement",
r" CopyStyle Remark",
r" LatexName acknowledgement",
r' LabelString "Acknowledgement \thetheorem."',
r" Preamble",
r" \theoremstyle{remark}",
r" \newtheorem{acknowledgement}[thm]{\protect\acknowledgementname}",
r" EndPreamble",
r" LangPreamble",
r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
r" EndLangPreamble",
r" BabelPreamble",
r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
r" EndBabelPreamble",
r" DocBookTag para",
r' DocBookAttr role="acknowledgement"',
r' DocBookItemTag ""',
r"End",
]
ackStar_theorem_def = [
r"### Inserted by lyx2lyx (ams extended theorems) ###",
r"### This requires a theorems-ams-extended-* module to be loaded",
r"Style Acknowledgement*",
r" CopyStyle Remark*",
r" LatexName acknowledgement*",
r' LabelString "Acknowledgement."',
r" Preamble",
r" \theoremstyle{remark}",
r" \newtheorem*{acknowledgement*}{\protect\acknowledgementname}",
r" EndPreamble",
r" LangPreamble",
r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
r" EndLangPreamble",
r" BabelPreamble",
r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
r" EndBabelPreamble",
r" DocBookTag para",
r' DocBookAttr role="acknowledgement"',
r' DocBookItemTag ""',
r"End",
]
ack_bytype_theorem_def = [
r"### Inserted by lyx2lyx (ams extended theorems) ###",
r"### This requires theorems-ams-extended-bytype module to be loaded",
r"Counter acknowledgement",
r" GuiName Acknowledgment",
r"End",
r"Style Acknowledgement",
r" CopyStyle Remark",
r" LatexName acknowledgement",
r' LabelString "Acknowledgement \theacknowledgement."',
r" Preamble",
r" \theoremstyle{remark}",
r" \newtheorem{acknowledgement}{\protect\acknowledgementname}",
r" EndPreamble",
r" LangPreamble",
r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
r" EndLangPreamble",
r" BabelPreamble",
r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
r" EndBabelPreamble",
r" DocBookTag para",
r' DocBookAttr role="acknowledgement"',
r' DocBookItemTag ""',
r"End",
]
ack_chap_bytype_theorem_def = [
r"### Inserted by lyx2lyx (ams extended theorems) ###",
r"### This requires theorems-ams-extended-chap-bytype module to be loaded",
r"Counter acknowledgement",
r" GuiName Acknowledgment",
r" Within chapter",
r"End",
r"Style Acknowledgement",
r" CopyStyle Remark",
r" LatexName acknowledgement",
r' LabelString "Acknowledgement \theacknowledgement."',
r" Preamble",
r" \theoremstyle{remark}",
r" \ifx\thechapter\undefined",
r" \newtheorem{acknowledgement}{\protect\acknowledgementname}",
r" \else",
r" \newtheorem{acknowledgement}{\protect\acknowledgementname}[chapter]",
r" \fi",
r" EndPreamble",
r" LangPreamble",
r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
r" EndLangPreamble",
r" BabelPreamble",
r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
r" EndBabelPreamble",
r" DocBookTag para",
r' DocBookAttr role="acknowledgement"',
r' DocBookItemTag ""',
r"End",
]
def convert_ack_theorems(document):
"""Put removed acknowledgement theorems to local layout"""
haveAck = False
haveStarAck = False
if "theorems-ams-extended-bytype" in document.get_module_list():
i = 0
while True:
if haveAck and haveStarAck:
break
i = find_token(document.body, "\\begin_layout Acknowledgement", i)
if i == -1:
break
if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
document.append_local_layout(ackStar_theorem_def)
haveStarAck = True
elif not haveAck:
document.append_local_layout(ack_bytype_theorem_def)
haveAck = True
i += 1
elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
i = 0
while True:
if haveAck and haveStarAck:
break
i = find_token(document.body, "\\begin_layout Acknowledgement", i)
if i == -1:
break
if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
document.append_local_layout(ackStar_theorem_def)
haveStarAck = True
elif not haveAck:
document.append_local_layout(ack_chap_bytype_theorem_def)
haveAck = True
i += 1
elif "theorems-ams-extended" in document.get_module_list():
i = 0
while True:
if haveAck and haveStarAck:
break
i = find_token(document.body, "\\begin_layout Acknowledgement", i)
if i == -1:
break
if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
document.append_local_layout(ackStar_theorem_def)
haveStarAck = True
elif not haveAck:
document.append_local_layout(ack_theorem_def)
haveAck = True
i += 1
def revert_ack_theorems(document):
"""Remove acknowledgement theorems from local layout"""
if "theorems-ams-extended-bytype" in document.get_module_list():
document.del_local_layout(ackStar_theorem_def)
document.del_local_layout(ack_bytype_theorem_def)
elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
document.del_local_layout(ackStar_theorem_def)
document.del_local_layout(ack_chap_bytype_theorem_def)
elif "theorems-ams-extended" in document.get_module_list():
document.del_local_layout(ackStar_theorem_def)
document.del_local_layout(ack_theorem_def)
def revert_empty_macro(document):
"""Remove macros with empty LaTeX part"""
i = 0
while True:
i = find_token(document.body, "\\begin_inset FormulaMacro", i)
if i == -1:
break
cmd = document.body[i + 1]
if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
i += 1
continue
j = find_end_of_inset(document.body, i)
document.body[i : j + 1] = []
def convert_empty_macro(document):
"""In the unlikely event someone defined a macro with empty LaTeX, add {}"""
i = 0
while True:
i = find_token(document.body, "\\begin_inset FormulaMacro", i)
if i == -1:
break
cmd = document.body[i + 1]
if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
i += 1
continue
newstr = cmd[:-2] + "{\\{\\}}"
document.body[i + 1] = newstr
i += 1
def convert_cov_options(document):
"""Update examples item argument structure"""
if "linguistics" not in document.get_module_list():
return
layouts = ["Numbered Examples (consecutive)", "Subexample"]
for layout in layouts:
i = 0
while True:
i = find_token(document.body, "\\begin_layout %s" % layout, i)
if i == -1:
break
j = find_end_of_layout(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of example layout at line %d" % i
)
i += 1
continue
k = find_token(document.body, "\\begin_inset Argument item:1", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument item:2"
i += 1
# Shift gloss arguments
i = 0
while True:
i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of gloss inset at line %d" % i
)
i += 1
continue
k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:4"
k = find_token(document.body, "\\begin_inset Argument post:1", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:2"
i += 1
i = 0
while True:
i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of gloss inset at line %d" % i
)
i += 1
continue
k = find_token(document.body, "\\begin_inset Argument post:3", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:6"
k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:4"
k = find_token(document.body, "\\begin_inset Argument post:1", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:2"
i += 1
def revert_linggloss2(document):
"Revert gloss with new args to ERT"
if "linguistics" not in document.get_module_list():
return
cov_req = False
glosses = [
"\\begin_inset Flex Interlinear Gloss (2 Lines)",
"\\begin_inset Flex Interlinear Gloss (3 Lines)",
]
for glosse in glosses:
i = 0
while True:
i = find_token(document.body, glosse, i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Gloss inset")
continue
# Check if we have new options
arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
if arg == -1:
arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
if arg == -1:
arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
if arg == -1:
# nothing to do
continue
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
endarg = find_end_of_inset(document.body, arg)
optargcontent = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find optarg plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
endarg = find_end_of_inset(document.body, arg)
marg1content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
endarg = find_end_of_inset(document.body, arg)
marg2content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
endarg = find_end_of_inset(document.body, arg)
marg3content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
endarg = find_end_of_inset(document.body, arg)
marg4content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find arg 4 plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
marg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
endarg = find_end_of_inset(document.body, arg)
marg5content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find arg 5 plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
marg5content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
arg = find_token(document.body, "\\begin_inset Argument post:6", i, j)
endarg = find_end_of_inset(document.body, arg)
marg6content = []
if arg != -1:
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find arg 6 plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
marg6content = document.body[argbeginPlain + 1 : argendPlain - 2]
# remove Arg insets and paragraph, if it only contains this inset
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
cmd = "\\digloss"
if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
cmd = "\\trigloss"
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
endInset = find_end_of_inset(document.body, i)
endPlain = find_end_of_layout(document.body, beginPlain)
precontent = put_cmd_in_ert(cmd)
if len(optargcontent) > 0:
precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
precontent += put_cmd_in_ert("{")
postcontent = put_cmd_in_ert("}")
if len(marg1content) > 0:
postcontent += put_cmd_in_ert("[") + marg1content + put_cmd_in_ert("]")
postcontent += put_cmd_in_ert("{") + marg2content + put_cmd_in_ert("}")
if len(marg3content) > 0:
postcontent += put_cmd_in_ert("[") + marg3content + put_cmd_in_ert("]")
postcontent += put_cmd_in_ert("{") + marg4content + put_cmd_in_ert("}")
if cmd == "\\trigloss":
if len(marg5content) > 0:
postcontent += put_cmd_in_ert("[") + marg5content + put_cmd_in_ert("]")
postcontent += put_cmd_in_ert("{") + marg6content + put_cmd_in_ert("}")
document.body[endPlain : endInset + 1] = postcontent
document.body[beginPlain + 1 : beginPlain] = precontent
del document.body[i : beginPlain + 1]
if not cov_req:
document.append_local_layout("Requires covington")
cov_req = True
i = beginPlain
def revert_exarg2(document):
"Revert linguistic examples with new arguments to ERT"
if "linguistics" not in document.get_module_list():
return
cov_req = False
layouts = ["Numbered Example", "Subexample"]
for layout in layouts:
i = 0
while True:
i = find_token(document.body, "\\begin_layout %s" % layout, i + 1)
if i == -1:
break
j = find_end_of_layout(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of example layout")
continue
consecex = document.body[i] == "\\begin_layout Numbered Examples (consecutive)"
subexpl = document.body[i] == "\\begin_layout Subexample"
singleex = document.body[i] == "\\begin_layout Numbered Examples (multiline)"
layouttype = "\\begin_layout Numbered Examples (multiline)"
if consecex:
layouttype = "\\begin_layout Numbered Examples (consecutive)"
elif subexpl:
layouttype = "\\begin_layout Subexample"
k = i
l = j
while True:
if singleex:
break
m = find_end_of_layout(document.body, k)
# check for consecutive layouts
k = find_token(document.body, "\\begin_layout", m)
if k == -1 or document.body[k] != layouttype:
break
l = find_end_of_layout(document.body, k)
if l == -1:
document.warning("Malformed LyX document: Can't find end of example layout")
continue
arg = find_token(document.body, "\\begin_inset Argument 1", i, l)
if (
arg != -1
and layouttype
!= "\\begin_layout " + get_containing_layout(document.body, arg)[0]
):
# this is not our argument!
arg = -1
if subexpl or arg == -1:
iarg = find_token(document.body, "\\begin_inset Argument item:1", i, l)
if iarg == -1:
continue
if arg != -1:
endarg = find_end_of_inset(document.body, arg)
optargcontent = ""
argbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", arg, endarg
)
if argbeginPlain == -1:
document.warning("Malformed LyX document: Can't find optarg plain Layout")
continue
argendPlain = find_end_of_inset(document.body, argbeginPlain)
optargcontent = lyx2latex(
document, document.body[argbeginPlain + 1 : argendPlain - 2]
)
# This is a verbatim argument
optargcontent = re.sub(r"textbackslash{}", r"", optargcontent)
itemarg = ""
iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
if iarg != -1:
endiarg = find_end_of_inset(document.body, iarg)
iargcontent = ""
iargbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", iarg, endiarg
)
if iargbeginPlain == -1:
document.warning("Malformed LyX document: Can't find optarg plain Layout")
continue
iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
itemarg = (
"<" + lyx2latex(document, document.body[iargbeginPlain:iargendPlain]) + ">"
)
iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
if iarg2 != -1:
endiarg2 = find_end_of_inset(document.body, iarg2)
iarg2content = ""
iarg2beginPlain = find_token(
document.body, "\\begin_layout Plain Layout", iarg2, endiarg2
)
if iarg2beginPlain == -1:
document.warning("Malformed LyX document: Can't find optarg plain Layout")
continue
iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
itemarg += (
"["
+ lyx2latex(document, document.body[iarg2beginPlain:iarg2endPlain])
+ "]"
)
if itemarg == "":
itemarg = " "
# remove Arg insets and paragraph, if it only contains this inset
if arg != -1:
if (
document.body[arg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, arg - 1) == endarg + 3
):
del document.body[arg - 1 : endarg + 4]
else:
del document.body[arg : endarg + 1]
if iarg != -1:
iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
if iarg == -1:
document.warning("Unable to re-find item:1 Argument")
else:
endiarg = find_end_of_inset(document.body, iarg)
if (
document.body[iarg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, iarg - 1) == endiarg + 3
):
del document.body[iarg - 1 : endiarg + 4]
else:
del document.body[iarg : endiarg + 1]
if iarg2 != -1:
iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
if iarg2 == -1:
document.warning("Unable to re-find item:2 Argument")
else:
endiarg2 = find_end_of_inset(document.body, iarg2)
if (
document.body[iarg2 - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3
):
del document.body[iarg2 - 1 : endiarg2 + 4]
else:
del document.body[iarg2 : endiarg2 + 1]
envname = "example"
if consecex:
envname = "examples"
elif subexpl:
envname = "subexamples"
cmd = put_cmd_in_ert("\\begin{" + envname + "}[" + optargcontent + "]")
# re-find end of layout
j = find_end_of_layout(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Subexample layout")
continue
l = j
while True:
# check for consecutive layouts
k = find_token(document.body, "\\begin_layout", l)
if k == -1 or document.body[k] != layouttype:
break
if not singleex:
subitemarg = ""
m = find_end_of_layout(document.body, k)
iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
if iarg != -1:
endiarg = find_end_of_inset(document.body, iarg)
iargcontent = ""
iargbeginPlain = find_token(
document.body, "\\begin_layout Plain Layout", iarg, endiarg
)
if iargbeginPlain == -1:
document.warning(
"Malformed LyX document: Can't find optarg plain Layout"
)
continue
iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
subitemarg = (
"<"
+ lyx2latex(document, document.body[iargbeginPlain:iargendPlain])
+ ">"
)
iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
if iarg2 != -1:
endiarg2 = find_end_of_inset(document.body, iarg2)
iarg2content = ""
iarg2beginPlain = find_token(
document.body,
"\\begin_layout Plain Layout",
iarg2,
endiarg2,
)
if iarg2beginPlain == -1:
document.warning(
"Malformed LyX document: Can't find optarg plain Layout"
)
continue
iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
subitemarg += (
"["
+ lyx2latex(document, document.body[iarg2beginPlain:iarg2endPlain])
+ "]"
)
if subitemarg == "":
subitemarg = " "
document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert(
"\\item" + subitemarg
)
# Refind and remove arg insets
if iarg != -1:
iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
if iarg == -1:
document.warning("Unable to re-find item:1 Argument")
else:
endiarg = find_end_of_inset(document.body, iarg)
if (
document.body[iarg - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, iarg - 1) == endiarg + 3
):
del document.body[iarg - 1 : endiarg + 4]
else:
del document.body[iarg : endiarg + 1]
if iarg2 != -1:
iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
if iarg2 == -1:
document.warning("Unable to re-find item:2 Argument")
else:
endiarg2 = find_end_of_inset(document.body, iarg2)
if (
document.body[iarg2 - 1] == "\\begin_layout Plain Layout"
and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3
):
del document.body[iarg2 - 1 : endiarg2 + 4]
else:
del document.body[iarg2 : endiarg2 + 1]
else:
document.body[k : k + 1] = ["\\begin_layout Standard"]
l = find_end_of_layout(document.body, k)
if l == -1:
document.warning("Malformed LyX document: Can't find end of example layout")
continue
endev = put_cmd_in_ert("\\end{" + envname + "}")
document.body[l:l] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
document.body[i : i + 1] = (
["\\begin_layout Standard"]
+ cmd
+ ["\\end_layout", "", "\\begin_layout Standard"]
+ put_cmd_in_ert("\\item" + itemarg)
)
if not cov_req:
document.append_local_layout("Requires covington")
cov_req = True
def revert_cov_options(document):
"""Revert examples item argument structure"""
if "linguistics" not in document.get_module_list():
return
layouts = ["Numbered Examples (consecutive)", "Subexample"]
for layout in layouts:
i = 0
while True:
i = find_token(document.body, "\\begin_layout %s" % layout, i)
if i == -1:
break
j = find_end_of_layout(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of example layout at line %d" % i
)
i += 1
continue
k = find_token(document.body, "\\begin_inset Argument item:2", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument item:1"
i += 1
# Shift gloss arguments
i = 0
while True:
i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of gloss inset at line %d" % i
)
i += 1
continue
k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:1"
k = find_token(document.body, "\\begin_inset Argument post:4", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:2"
i += 1
i = 0
while True:
i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of gloss inset at line %d" % i
)
i += 1
continue
k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:1"
k = find_token(document.body, "\\begin_inset Argument post:4", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:2"
k = find_token(document.body, "\\begin_inset Argument post:6", i, j)
if k != -1:
document.body[k] = "\\begin_inset Argument post:3"
i += 1
def revert_expreambles(document):
"""Revert covington example preamble flex insets to ERT"""
revert_flex_inset(document, "Example Preamble", "\\expreamble")
revert_flex_inset(document, "Subexample Preamble", "\\subexpreamble")
revert_flex_inset(document, "Example Postamble", "\\expostamble")
revert_flex_inset(document, "Subexample Postamble", "\\subexpostamble")
def revert_hequotes(document):
"Revert Hebrew Quotation marks"
i = find_token(document.header, "\\quotes_style hebrew", 0)
if i != -1:
document.header[i] = "\\quotes_style english"
i = 0
while True:
i = find_token(document.body, "\\begin_inset Quotes d")
if i == -1:
return
if document.body[i] == "\\begin_inset Quotes dld":
document.body[i] = "\\begin_inset Quotes prd"
elif document.body[i] == "\\begin_inset Quotes drd":
document.body[i] = "\\begin_inset Quotes pld"
elif document.body[i] == "\\begin_inset Quotes dls":
document.body[i] = "\\begin_inset Quotes prd"
elif document.body[i] == "\\begin_inset Quotes drs":
document.body[i] = "\\begin_inset Quotes pld"
def revert_formatted_refs(document):
i = find_token(document.header, "\\use_formatted_ref", 0)
if i != -1:
del document.header[i]
def revert_box_fcolor(document):
i = 0
while True:
i = find_token(document.body, "\\begin_inset Box Boxed", i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning(
"Malformed LyX document: Can't find end of framed box inset at line %d" % i
)
continue
k = find_token(document.body, 'framecolor "default"', i, j)
if k != -1:
document.body[k] = 'framecolor "black"'
##
# Conversion hub
#
supported_versions = ["2.4.0", "2.4"]
convert = [
[545, [convert_lst_literalparam]],
[546, []],
[547, []],
[548, []],
[549, []],
[550, [convert_fontenc]],
[551, []],
[552, []],
[553, []],
[554, []],
[555, []],
[556, []],
[557, [convert_vcsinfo]],
[558, [removeFrontMatterStyles]],
[559, []],
[560, []],
[561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
[562, []],
[563, []],
[564, []],
[565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
[566, [convert_hebrew_parentheses]],
[567, []],
[568, []],
[569, []],
[570, []],
[571, []],
[572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
[573, [convert_inputencoding_namechange]],
[574, [convert_ruby_module, convert_utf8_japanese]],
[575, [convert_lineno, convert_aaencoding]],
[576, []],
[577, [convert_linggloss]],
[578, []],
[579, []],
[580, []],
[581, [convert_osf]],
[
582,
[
convert_AdobeFonts,
convert_latexFonts,
convert_notoFonts,
convert_CantarellFont,
convert_FiraFont,
],
], # old font re-converterted due to extra options
[
583,
[
convert_ChivoFont,
convert_Semibolds,
convert_NotoRegulars,
convert_CrimsonProFont,
],
],
[584, []],
[585, [convert_pagesizes]],
[586, []],
[587, [convert_pagesizenames]],
[588, []],
[589, [convert_totalheight]],
[590, [convert_changebars]],
[591, [convert_postpone_fragile]],
[592, []],
[593, [convert_counter_maintenance]],
[594, []],
[595, []],
[596, [convert_parskip]],
[597, [convert_libertinus_rm_fonts]],
[598, []],
[599, []],
[600, []],
[601, [convert_math_refs]],
[602, [convert_branch_colors]],
[603, []],
[604, []],
[605, [convert_vcolumns2]],
[606, [convert_koma_frontispiece]],
[607, []],
[608, []],
[609, []],
[610, []],
[611, []],
[612, [convert_starred_refs]],
[613, []],
[614, [convert_hyper_other]],
[615, [convert_acknowledgment, convert_ack_theorems]],
[616, [convert_empty_macro]],
[617, [convert_cov_options]],
[618, []],
[619, []],
[620, []],
]
revert = [
[619, [revert_box_fcolor]],
[618, [revert_formatted_refs]],
[617, [revert_hequotes]],
[616, [revert_expreambles, revert_exarg2, revert_linggloss2, revert_cov_options]],
[615, [revert_empty_macro]],
[614, [revert_ack_theorems, revert_acknowledgment]],
[613, [revert_hyper_other]],
[612, [revert_familydefault]],
[611, [revert_starred_refs]],
[610, []],
[609, [revert_index_macros]],
[608, [revert_document_metadata]],
[607, [revert_docbook_mathml_prefix]],
[606, [revert_spellchecker_ignore]],
[605, [revert_koma_frontispiece]],
[604, [revert_vcolumns2]],
[603, [revert_branch_darkcols]],
[602, [revert_darkmode_graphics]],
[601, [revert_branch_colors]],
[600, []],
[599, [revert_math_refs]],
[598, [revert_hrquotes]],
[598, [revert_nopagebreak]],
[597, [revert_docbook_table_output]],
[596, [revert_libertinus_rm_fonts, revert_libertinus_sftt_fonts]],
[595, [revert_parskip, revert_line_vspaces]],
[594, [revert_ams_spaces]],
[593, [revert_counter_inset]],
[592, [revert_counter_maintenance]],
[591, [revert_colrow_tracking]],
[590, [revert_postpone_fragile]],
[589, [revert_changebars]],
[588, [revert_totalheight]],
[587, [revert_memoir_endnotes, revert_enotez, revert_theendnotes]],
[586, [revert_pagesizenames]],
[585, [revert_dupqualicites]],
[584, [revert_pagesizes, revert_komafontsizes]],
[583, [revert_vcsinfo_rev_abbrev]],
[582, [revert_ChivoFont, revert_CrimsonProFont]],
[581, [revert_CantarellFont, revert_FiraFont]],
[580, [revert_texfontopts, revert_osf]],
[
579,
[
revert_minionpro,
revert_plainNotoFonts_xopts,
revert_notoFonts_xopts,
revert_IBMFonts_xopts,
revert_AdobeFonts_xopts,
revert_font_opts,
],
], # keep revert_font_opts last!
[578, [revert_babelfont]],
[577, [revert_drs]],
[576, [revert_linggloss, revert_subexarg]],
[575, [revert_new_languages]],
[574, [revert_lineno, revert_aaencoding]],
[573, [revert_ruby_module, revert_utf8_japanese]],
[572, [revert_inputencoding_namechange]],
[571, [revert_notoFonts]],
[570, [revert_cmidruletrimming]],
[569, [revert_bibfileencodings]],
[568, [revert_tablestyle]],
[567, [revert_soul]],
[566, [revert_malayalam]],
[565, [revert_hebrew_parentheses]],
[564, [revert_AdobeFonts]],
[563, [revert_lformatinfo]],
[562, [revert_listpargs]],
[561, [revert_l7ninfo]],
[560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
[559, [revert_timeinfo, revert_namenoextinfo]],
[558, [revert_dateinfo]],
[557, [addFrontMatterStyles]],
[556, [revert_vcsinfo]],
[555, [revert_bibencoding]],
[554, [revert_vcolumns]],
[553, [revert_stretchcolumn]],
[552, [revert_tuftecite]],
[551, [revert_floatpclass, revert_floatalignment]],
[550, [revert_nospellcheck]],
[549, [revert_fontenc]],
[548, []], # dummy format change
[547, [revert_lscape]],
[546, [revert_xcharter]],
[545, [revert_paratype]],
[544, [revert_lst_literalparam]],
]
if __name__ == "__main__":
pass