mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-11-13 22:49:20 +00:00
52295693d6
Please linter where it makes sense: * Avoid bare exceptions; * Use formatted strings instead of string interpolation
2311 lines
74 KiB
Python
2311 lines
74 KiB
Python
# This file is part of lyx2lyx
|
|
# Copyright (C) 2016 The LyX team
|
|
#
|
|
# This program is free software; you can redistribute it and/or
|
|
# modify it under the terms of the GNU General Public License
|
|
# as published by the Free Software Foundation; either version 2
|
|
# of the License, or (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program; if not, write to the Free Software
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
"""Convert files to the file format generated by lyx 2.3"""
|
|
|
|
import re
|
|
|
|
from lyx2lyx_tools import (
|
|
add_to_preamble,
|
|
insert_document_option,
|
|
insert_to_preamble,
|
|
is_document_option,
|
|
latex_length,
|
|
put_cmd_in_ert,
|
|
remove_document_option,
|
|
revert_font_attrs,
|
|
revert_language,
|
|
)
|
|
|
|
# Uncomment only what you need to import, please.
|
|
from parser_tools import (
|
|
del_complete_lines,
|
|
del_token,
|
|
del_value,
|
|
find_complete_lines,
|
|
find_end_of_inset,
|
|
find_end_of_layout,
|
|
find_re,
|
|
find_substring,
|
|
find_token,
|
|
find_token_backwards,
|
|
get_bool_value,
|
|
get_containing_inset,
|
|
get_containing_layout,
|
|
get_quoted_value,
|
|
get_value,
|
|
is_in_inset,
|
|
set_bool_value,
|
|
)
|
|
|
|
####################################################################
|
|
# Private helper functions
|
|
|
|
|
|
###############################################################################
|
|
###
|
|
### Conversion and reversion routines
|
|
###
|
|
###############################################################################
|
|
|
|
|
|
def convert_microtype(document):
|
|
"Add microtype settings."
|
|
i = find_token(document.header, "\\font_tt_scale")
|
|
j = find_token(document.preamble, "\\usepackage{microtype}")
|
|
if j == -1:
|
|
document.header.insert(i + 1, "\\use_microtype false")
|
|
else:
|
|
document.header.insert(i + 1, "\\use_microtype true")
|
|
del document.preamble[j]
|
|
if j and document.preamble[j - 1] == "% Added by lyx2lyx":
|
|
del document.preamble[j - 1]
|
|
|
|
|
|
def revert_microtype(document):
|
|
"Remove microtype settings."
|
|
use_microtype = get_bool_value(document.header, "\\use_microtype", delete=True)
|
|
if use_microtype:
|
|
add_to_preamble(document, ["\\usepackage{microtype}"])
|
|
|
|
|
|
def convert_dateinset(document):
|
|
"Convert date external inset to ERT"
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset External", i + 1)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning(
|
|
"Malformed lyx document: Missing '\\end_inset' in convert_dateinset."
|
|
)
|
|
continue
|
|
if get_value(document.body, "template", i, j) == "Date":
|
|
document.body[i : j + 1] = put_cmd_in_ert("\\today ")
|
|
i = j # skip inset
|
|
|
|
|
|
def convert_inputenc(document):
|
|
"""Replace no longer supported input encoding setting."""
|
|
i = find_token(document.header, "\\inputencoding pt254")
|
|
if i != -1:
|
|
document.header[i] = "\\inputencoding pt154"
|
|
|
|
|
|
def convert_ibranches(document):
|
|
'Add "inverted 0" to branch insets'
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Branch", i + 1)
|
|
if i == -1:
|
|
return
|
|
document.body.insert(i + 1, "inverted 0")
|
|
|
|
|
|
def revert_ibranches(document):
|
|
"Convert inverted branches to explicit anti-branches"
|
|
# Get list of branches
|
|
ourbranches = {}
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.header, "\\branch", i + 1)
|
|
if i == -1:
|
|
break
|
|
branch = document.header[i][8:].strip()
|
|
selected = get_bool_value(document.header, "\\selected", i + 1, i + 2)
|
|
if selected is None:
|
|
document.warning(
|
|
"Malformed LyX document: No selection indicator " "for branch %s." % branch
|
|
)
|
|
selected = True
|
|
# the value tells us whether the branch is selected
|
|
ourbranches[branch] = selected
|
|
|
|
# Find branch insets, remove "inverted" tag and
|
|
# convert inverted insets to "Anti-OldBranch" insets
|
|
antibranches = {}
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Branch", i + 1)
|
|
if i == -1:
|
|
break
|
|
inverted = get_bool_value(document.body, "inverted", i + 1, i + 2, delete=True)
|
|
if inverted is None:
|
|
document.warning("Malformed LyX document: Missing 'inverted' tag in branch inset.")
|
|
continue
|
|
if inverted:
|
|
branch = document.body[i][20:].strip()
|
|
if branch not in antibranches:
|
|
antibranch = "Anti-" + branch
|
|
while antibranch in antibranches:
|
|
antibranch = "x" + antibranch
|
|
antibranches[branch] = antibranch
|
|
else:
|
|
antibranch = antibranches[branch]
|
|
document.body[i] = "\\begin_inset Branch " + antibranch
|
|
|
|
# now we need to add the new branches to the header
|
|
for old, new in antibranches.items():
|
|
i = find_token(document.header, "\\branch " + old, 0)
|
|
if i == -1:
|
|
document.warning("Can't find branch %s even though we found it before!" % (old))
|
|
continue
|
|
j = find_token(document.header, "\\end_branch", i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document! Can't find end of branch " + old)
|
|
continue
|
|
lines = ["\\branch " + new, "\\selected %d" % (not ourbranches[old])]
|
|
# these are the old lines telling us color, etc.
|
|
lines += document.header[i + 2 : j + 1]
|
|
document.header[i:i] = lines
|
|
|
|
|
|
beamer_article_styles = [
|
|
"### Inserted by lyx2lyx (more [scr]article styles) ###",
|
|
"Input article.layout",
|
|
"Input beamer.layout",
|
|
"Provides geometry 0",
|
|
"Provides hyperref 0",
|
|
"DefaultFont",
|
|
" Family Roman",
|
|
" Series Medium",
|
|
" Shape Up",
|
|
" Size Normal",
|
|
" Color None",
|
|
"EndFont",
|
|
"Preamble",
|
|
" \\usepackage{beamerarticle,pgf}",
|
|
" % this default might be overridden by plain title style",
|
|
" \\newcommand\\makebeamertitle{\\frame{\\maketitle}}%",
|
|
" \\AtBeginDocument{",
|
|
" \\let\\origtableofcontents=\\tableofcontents",
|
|
" \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
|
|
" \\def\\gobbletableofcontents#1{\\origtableofcontents}",
|
|
" }",
|
|
"EndPreamble",
|
|
"### End of insertion by lyx2lyx (more [scr]article styles) ###",
|
|
]
|
|
|
|
|
|
def revert_beamer_article_styles(document):
|
|
"Include (scr)article styles in beamer article"
|
|
|
|
beamer_articles = ["article-beamer", "scrarticle-beamer"]
|
|
if document.textclass not in beamer_articles:
|
|
return
|
|
|
|
if document.textclass == "scrarticle-beamer":
|
|
beamer_article_styles[1] = "Input scrartcl.layout"
|
|
document.append_local_layout(beamer_article_styles)
|
|
|
|
|
|
def convert_beamer_article_styles(document):
|
|
"Remove included (scr)article styles in beamer article"
|
|
|
|
beamer_articles = ["article-beamer", "scrarticle-beamer"]
|
|
if document.textclass not in beamer_articles:
|
|
return
|
|
|
|
if document.textclass == "scrarticle-beamer":
|
|
beamer_article_styles[1] = "Input scrartcl.layout"
|
|
document.del_local_layout(beamer_article_styles)
|
|
|
|
|
|
def revert_new_babel_languages(document):
|
|
"""Revert "bosnian", "friulan", "macedonian", "piedmontese", "romansh".
|
|
|
|
Set the document language to English but use correct babel setting.
|
|
"""
|
|
|
|
nblanguages = ["bosnian", "friulan", "macedonian", "piedmontese", "romansh"]
|
|
|
|
for lang in nblanguages:
|
|
if lang == "bosnian" or lang == "macedonian":
|
|
# These are only supported by babel
|
|
revert_language(document, lang, lang, "")
|
|
else:
|
|
# These are supported by babel and polyglossia
|
|
revert_language(document, lang, lang, lang)
|
|
|
|
|
|
# TODO:
|
|
# def convert_new_babel_languages(document)
|
|
# set to native support if get_value(document.header, "\\options") in
|
|
# ["bosnian", "friulan", "macedonian", "piedmontese", "romansh"]
|
|
# and Babel is used.
|
|
|
|
|
|
def revert_amharic(document):
|
|
"Set the document language to English but assure Amharic output"
|
|
|
|
revert_language(document, "amharic", "", "amharic")
|
|
|
|
|
|
def revert_asturian(document):
|
|
"Set the document language to English but assure Asturian output"
|
|
|
|
revert_language(document, "asturian", "", "asturian")
|
|
|
|
|
|
def revert_kannada(document):
|
|
"Set the document language to English but assure Kannada output"
|
|
|
|
revert_language(document, "kannada", "", "kannada")
|
|
|
|
|
|
def revert_khmer(document):
|
|
"Set the document language to English but assure Khmer output"
|
|
|
|
revert_language(document, "khmer", "", "khmer")
|
|
|
|
|
|
def revert_urdu(document):
|
|
"Set the document language to English but assure Urdu output"
|
|
|
|
revert_language(document, "urdu", "", "urdu")
|
|
|
|
|
|
def revert_syriac(document):
|
|
"Set the document language to English but assure Syriac output"
|
|
|
|
revert_language(document, "syriac", "", "syriac")
|
|
|
|
|
|
def revert_quotes(document):
|
|
"Revert Quote Insets in verbatim or Hebrew context to plain quotes"
|
|
|
|
# First handle verbatim insets
|
|
i = 0
|
|
j = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
if (
|
|
len(words) > 1
|
|
and words[0] == "\\begin_inset"
|
|
and (
|
|
words[1] in ["ERT", "listings"]
|
|
or (len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"])
|
|
)
|
|
):
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of "
|
|
+ words[1]
|
|
+ " inset at line "
|
|
+ str(i)
|
|
)
|
|
i += 1
|
|
continue
|
|
while True:
|
|
k = find_token(document.body, "\\begin_inset Quotes", i, j)
|
|
if k == -1:
|
|
i += 1
|
|
break
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of Quote inset at line "
|
|
+ str(k)
|
|
)
|
|
i = k
|
|
continue
|
|
replace = '"'
|
|
if document.body[k].endswith("s"):
|
|
replace = "'"
|
|
document.body[k : l + 2] = [replace]
|
|
else:
|
|
i += 1
|
|
continue
|
|
|
|
# Now verbatim layouts
|
|
i = 0
|
|
j = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
if (
|
|
len(words) > 1
|
|
and words[0] == "\\begin_layout"
|
|
and words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]
|
|
):
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of "
|
|
+ words[1]
|
|
+ " layout at line "
|
|
+ str(i)
|
|
)
|
|
i += 1
|
|
continue
|
|
while True:
|
|
k = find_token(document.body, "\\begin_inset Quotes", i, j)
|
|
if k == -1:
|
|
i += 1
|
|
break
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of Quote inset at line "
|
|
+ str(k)
|
|
)
|
|
i = k
|
|
continue
|
|
replace = '"'
|
|
if document.body[k].endswith("s"):
|
|
replace = "'"
|
|
document.body[k : l + 2] = [replace]
|
|
else:
|
|
i += 1
|
|
continue
|
|
|
|
# Now handle Hebrew
|
|
if (
|
|
not document.language == "hebrew"
|
|
and find_token(document.body, "\\lang hebrew", 0) == -1
|
|
):
|
|
return
|
|
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
k = find_token(document.body, "\\begin_inset Quotes", i)
|
|
if k == -1:
|
|
return
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of Quote inset at line " + str(k)
|
|
)
|
|
i = k
|
|
continue
|
|
hebrew = False
|
|
parent = get_containing_layout(document.body, k)
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
if ql == -1 or ql < parent[1]:
|
|
hebrew = document.language == "hebrew"
|
|
elif document.body[ql] == "\\lang hebrew":
|
|
hebrew = True
|
|
if hebrew:
|
|
replace = '"'
|
|
if document.body[k].endswith("s"):
|
|
replace = "'"
|
|
document.body[k : l + 2] = [replace]
|
|
i = l
|
|
|
|
|
|
iopart_local_layout = [
|
|
"### Inserted by lyx2lyx (stdlayouts) ###",
|
|
"Input stdlayouts.inc",
|
|
"### End of insertion by lyx2lyx (stdlayouts) ###" "",
|
|
]
|
|
|
|
|
|
def revert_iopart(document):
|
|
"Input new styles via local layout"
|
|
if document.textclass != "iopart":
|
|
return
|
|
document.append_local_layout(iopart_local_layout)
|
|
|
|
|
|
def convert_iopart(document):
|
|
"Remove local layout we added, if it is there"
|
|
if document.textclass != "iopart":
|
|
return
|
|
document.del_local_layout(iopart_local_layout)
|
|
|
|
|
|
def convert_quotestyle(document):
|
|
"Convert \\quotes_language to \\quotes_style"
|
|
i = find_token(document.header, "\\quotes_language", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document! Can't find \\quotes_language!")
|
|
return
|
|
val = get_value(document.header, "\\quotes_language", i)
|
|
document.header[i] = "\\quotes_style " + val
|
|
|
|
|
|
def revert_quotestyle(document):
|
|
"Revert \\quotes_style to \\quotes_language"
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document! Can't find \\quotes_style!")
|
|
return
|
|
val = get_value(document.header, "\\quotes_style", i)
|
|
document.header[i] = "\\quotes_language " + val
|
|
|
|
|
|
def revert_plainquote(document):
|
|
"Revert plain quote insets"
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style plain", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
# now the insets
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
k = find_token(document.body, "\\begin_inset Quotes q", i)
|
|
if k == -1:
|
|
return
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of Quote inset at line " + str(k)
|
|
)
|
|
i = k
|
|
continue
|
|
replace = '"'
|
|
if document.body[k].endswith("s"):
|
|
replace = "'"
|
|
document.body[k : l + 2] = [replace]
|
|
i = l
|
|
|
|
|
|
def convert_frenchquotes(document):
|
|
"Convert french quote insets to swiss"
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style french", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style swiss"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Quotes f", i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val.replace("f", "c", 1)
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_swissquotes(document):
|
|
"Revert swiss quote insets to french"
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style swiss", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Quotes c", i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val.replace("c", "f", 1)
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_britishquotes(document):
|
|
"Revert british quote insets to english"
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style british", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Quotes b", i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val.replace("b", "e", 1)
|
|
if val[2] == "d":
|
|
# opening mark
|
|
newval = newval.replace("d", "s")
|
|
else:
|
|
# closing mark
|
|
newval = newval.replace("s", "d")
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_swedishgquotes(document):
|
|
"Revert swedish quote insets"
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style swedishg", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style danish"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Quotes w", i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
if val[2] == "d":
|
|
# outer marks
|
|
newval = val.replace("w", "a", 1).replace("r", "l")
|
|
else:
|
|
# inner marks
|
|
newval = val.replace("w", "s", 1)
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_frenchquotes(document):
|
|
"Revert french inner quote insets"
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Quotes f", i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
if val[2] == "s":
|
|
# inner marks
|
|
newval = val.replace("f", "e", 1).replace("s", "d")
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_frenchinquotes(document):
|
|
"Revert inner frenchin quote insets"
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style frenchin", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Quotes i", i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val.replace("i", "f", 1)
|
|
if val[2] == "s":
|
|
# inner marks
|
|
newval = newval.replace("s", "d")
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_russianquotes(document):
|
|
"Revert russian quote insets"
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style russian", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Quotes r", i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val
|
|
if val[2] == "s":
|
|
# inner marks
|
|
newval = val.replace("r", "g", 1).replace("s", "d")
|
|
else:
|
|
# outer marks
|
|
newval = val.replace("r", "f", 1)
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_dynamicquotes(document):
|
|
"Revert dynamic quote insets"
|
|
|
|
# First, revert header
|
|
i = find_token(document.header, "\\dynamic_quotes", 0)
|
|
if i != -1:
|
|
del document.header[i]
|
|
|
|
# Get global style
|
|
style = "english"
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\quotes_style")
|
|
else:
|
|
style = get_value(document.header, "\\quotes_style", i)
|
|
|
|
s = "e"
|
|
if style == "english":
|
|
s = "e"
|
|
elif style == "swedish":
|
|
s = "s"
|
|
elif style == "german":
|
|
s = "g"
|
|
elif style == "polish":
|
|
s = "p"
|
|
elif style == "swiss":
|
|
s = "c"
|
|
elif style == "danish":
|
|
s = "a"
|
|
elif style == "plain":
|
|
s = "q"
|
|
elif style == "british":
|
|
s = "b"
|
|
elif style == "swedishg":
|
|
s = "w"
|
|
elif style == "french":
|
|
s = "f"
|
|
elif style == "frenchin":
|
|
s = "i"
|
|
elif style == "russian":
|
|
s = "r"
|
|
|
|
# now transform the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Quotes x", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace("x", s)
|
|
i += 1
|
|
|
|
|
|
def revert_cjkquotes(document):
|
|
"Revert cjk quote insets"
|
|
|
|
# Get global style
|
|
style = "english"
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\quotes_style")
|
|
else:
|
|
style = get_value(document.header, "\\quotes_style", i)
|
|
|
|
global_cjk = style.find("cjk") != -1
|
|
|
|
if global_cjk:
|
|
document.header[i] = "\\quotes_style english"
|
|
# transform dynamic insets
|
|
s = "j"
|
|
if style == "cjkangle":
|
|
s = "k"
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Quotes x", i)
|
|
if i == -1:
|
|
break
|
|
document.body[i] = document.body[i].replace("x", s)
|
|
i += 1
|
|
|
|
cjk_langs = [
|
|
"chinese-simplified",
|
|
"chinese-traditional",
|
|
"japanese",
|
|
"japanese-cjk",
|
|
"korean",
|
|
]
|
|
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
k = find_token(document.body, "\\begin_inset Quotes j", i)
|
|
if k == -1:
|
|
break
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of Quote inset at line " + str(k)
|
|
)
|
|
i = k
|
|
continue
|
|
cjk = False
|
|
parent = get_containing_layout(document.body, k)
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
if ql == -1 or ql < parent[1]:
|
|
cjk = document.language in cjk_langs
|
|
elif document.body[ql].split()[1] in cjk_langs:
|
|
cjk = True
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
replace = []
|
|
if val[2] == "s":
|
|
# inner marks
|
|
if val[1] == "l":
|
|
# inner opening mark
|
|
if cjk:
|
|
replace = ["\u300e"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
|
|
else:
|
|
# inner closing mark
|
|
if cjk:
|
|
replace = ["\u300f"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
|
|
else:
|
|
# outer marks
|
|
if val[1] == "l":
|
|
# outer opening mark
|
|
if cjk:
|
|
replace = ["\u300c"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
|
|
else:
|
|
# outer closing mark
|
|
if cjk:
|
|
replace = ["\u300d"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
|
|
|
|
document.body[k : l + 1] = replace
|
|
i = l
|
|
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
k = find_token(document.body, "\\begin_inset Quotes k", i)
|
|
if k == -1:
|
|
return
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of Quote inset at line " + str(k)
|
|
)
|
|
i = k
|
|
continue
|
|
cjk = False
|
|
parent = get_containing_layout(document.body, k)
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
if ql == -1 or ql < parent[1]:
|
|
cjk = document.language in cjk_langs
|
|
elif document.body[ql].split()[1] in cjk_langs:
|
|
cjk = True
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
replace = []
|
|
if val[2] == "s":
|
|
# inner marks
|
|
if val[1] == "l":
|
|
# inner opening mark
|
|
if cjk:
|
|
replace = ["\u3008"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
|
|
else:
|
|
# inner closing mark
|
|
if cjk:
|
|
replace = ["\u3009"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
|
|
else:
|
|
# outer marks
|
|
if val[1] == "l":
|
|
# outer opening mark
|
|
if cjk:
|
|
replace = ["\u300a"]
|
|
else:
|
|
replace = [
|
|
"\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$",
|
|
"\\end_inset",
|
|
]
|
|
else:
|
|
# outer closing mark
|
|
if cjk:
|
|
replace = ["\u300b"]
|
|
else:
|
|
replace = [
|
|
"\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$",
|
|
"\\end_inset",
|
|
]
|
|
|
|
document.body[k : l + 1] = replace
|
|
i = l
|
|
|
|
|
|
def convert_crimson(document):
|
|
"""Transform preamble code to native font setting."""
|
|
# Quick-check:
|
|
i = find_substring(document.preamble, "{cochineal}")
|
|
if i == -1:
|
|
return
|
|
# Find and delete user-preamble code:
|
|
if document.preamble[i] == "\\usepackage[proportional,osf]{cochineal}":
|
|
osf = True
|
|
elif document.preamble[i] == "\\usepackage{cochineal}":
|
|
osf = False
|
|
else:
|
|
return
|
|
del document.preamble[i]
|
|
if i and document.preamble[i - 1] == "% Added by lyx2lyx":
|
|
del document.preamble[i - 1]
|
|
|
|
# Convert to native font setting:
|
|
j = find_token(document.header, "\\font_roman")
|
|
if j == -1:
|
|
romanfont = ["\font_roman", '"cochineal"', '"default"']
|
|
else:
|
|
romanfont = document.header[j].split()
|
|
romanfont[1] = '"cochineal"'
|
|
document.header[j] = " ".join(romanfont)
|
|
try:
|
|
set_bool_value(document.header, "\\font_osf", osf)
|
|
except ValueError: # no \\font_osf setting in document.header
|
|
if osf:
|
|
document.header.insert(-1, "\\font_osf true")
|
|
|
|
|
|
def revert_crimson(document):
|
|
"Revert native Cochineal/Crimson font definition to LaTeX"
|
|
|
|
i = find_token(document.header, '\\font_roman "cochineal"')
|
|
if i == -1:
|
|
return
|
|
# replace unsupported font setting
|
|
document.header[i] = document.header[i].replace("cochineal", "default")
|
|
# no need for preamble code with system fonts
|
|
if get_bool_value(document.header, "\\use_non_tex_fonts"):
|
|
return
|
|
# transfer old style figures setting to package options
|
|
j = find_token(document.header, "\\font_osf true")
|
|
if j != -1:
|
|
options = "[proportional,osf]"
|
|
document.header[j] = "\\font_osf false"
|
|
else:
|
|
options = ""
|
|
add_to_preamble(document, ["\\usepackage%s{cochineal}" % options])
|
|
|
|
|
|
def revert_cochinealmath(document):
|
|
"Revert cochineal newtxmath definitions to LaTeX"
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
i = find_token(document.header, '\\font_math "cochineal-ntxm"', 0)
|
|
if i != -1:
|
|
add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
|
|
document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
|
|
|
|
|
|
def revert_labelonly(document):
|
|
"Revert labelonly tag for InsetRef"
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of reference inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
k = find_token(document.body, "LatexCommand labelonly", i, j)
|
|
if k == -1:
|
|
i = j
|
|
continue
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
if not label:
|
|
document.warning("Can't find label for reference at line %d!" % (i))
|
|
i = j + 1
|
|
continue
|
|
document.body[i : j + 1] = put_cmd_in_ert([label])
|
|
i += 1
|
|
|
|
|
|
def revert_plural_refs(document):
|
|
"Revert plural and capitalized references"
|
|
i = find_token(document.header, "\\use_refstyle 1", 0)
|
|
use_refstyle = i != 0
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of reference inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
|
|
plural = caps = suffix = False
|
|
k = find_token(document.body, "LaTeXCommand formatted", i, j)
|
|
if k != -1 and use_refstyle:
|
|
plural = get_bool_value(document.body, "plural", i, j, False)
|
|
caps = get_bool_value(document.body, "caps", i, j, False)
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
if label:
|
|
try:
|
|
(prefix, suffix) = label.split(":", 1)
|
|
except:
|
|
document.warning(
|
|
"No `:' separator in formatted reference at line %d!" % (i)
|
|
)
|
|
else:
|
|
document.warning("Can't find label for reference at line %d!" % (i))
|
|
|
|
# this effectively tests also for use_refstyle and a formatted reference
|
|
# we do this complicated test because we would otherwise do this erasure
|
|
# over and over and over
|
|
if not ((plural or caps) and suffix):
|
|
del_token(document.body, "plural", i, j)
|
|
del_token(document.body, "caps", i, j - 1) # since we deleted a line
|
|
i = j - 1
|
|
continue
|
|
|
|
if caps:
|
|
prefix = prefix[0].title() + prefix[1:]
|
|
cmd = "\\" + prefix + "ref"
|
|
if plural:
|
|
cmd += "[s]"
|
|
cmd += "{" + suffix + "}"
|
|
document.body[i : j + 1] = put_cmd_in_ert([cmd])
|
|
i += 1
|
|
|
|
|
|
def revert_noprefix(document):
|
|
"Revert labelonly tags with 'noprefix' set"
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of reference inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
k = find_token(document.body, "LatexCommand labelonly", i, j)
|
|
noprefix = False
|
|
if k != -1:
|
|
noprefix = get_bool_value(document.body, "noprefix", i, j)
|
|
if not noprefix:
|
|
# either it was not a labelonly command, or else noprefix was not set.
|
|
# in that case, we just delete the option.
|
|
del_token(document.body, "noprefix", i, j)
|
|
i = j
|
|
continue
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
if not label:
|
|
document.warning("Can't find label for reference at line %d!" % (i))
|
|
i = j + 1
|
|
continue
|
|
try:
|
|
(prefix, suffix) = label.split(":", 1)
|
|
except:
|
|
document.warning("No `:' separator in formatted reference at line %d!" % (i))
|
|
# we'll leave this as an ordinary labelonly reference
|
|
del_token(document.body, "noprefix", i, j)
|
|
i = j
|
|
continue
|
|
document.body[i : j + 1] = put_cmd_in_ert([suffix])
|
|
i += 1
|
|
|
|
|
|
def revert_biblatex(document):
|
|
"Revert biblatex support"
|
|
|
|
#
|
|
# Header
|
|
#
|
|
|
|
# 1. Get cite engine
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
# 2. Store biblatex state and revert to natbib
|
|
biblatex = False
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
biblatex = True
|
|
document.header[i] = "\\cite_engine natbib"
|
|
|
|
# 3. Store and remove new document headers
|
|
bibstyle = ""
|
|
i = find_token(document.header, "\\biblatex_bibstyle", 0)
|
|
if i != -1:
|
|
bibstyle = get_value(document.header, "\\biblatex_bibstyle", i)
|
|
del document.header[i]
|
|
|
|
citestyle = ""
|
|
i = find_token(document.header, "\\biblatex_citestyle", 0)
|
|
if i != -1:
|
|
citestyle = get_value(document.header, "\\biblatex_citestyle", i)
|
|
del document.header[i]
|
|
|
|
biblio_options = ""
|
|
i = find_token(document.header, "\\biblio_options", 0)
|
|
if i != -1:
|
|
biblio_options = get_value(document.header, "\\biblio_options", i)
|
|
del document.header[i]
|
|
|
|
if biblatex:
|
|
bbxopts = "[natbib=true"
|
|
if bibstyle != "":
|
|
bbxopts += ",bibstyle=" + bibstyle
|
|
if citestyle != "":
|
|
bbxopts += ",citestyle=" + citestyle
|
|
if biblio_options != "":
|
|
bbxopts += "," + biblio_options
|
|
bbxopts += "]"
|
|
add_to_preamble(document, "\\usepackage" + bbxopts + "{biblatex}")
|
|
|
|
#
|
|
# Body
|
|
#
|
|
|
|
# 1. Bibtex insets
|
|
i = 0
|
|
bibresources = []
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of bibtex inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
bibs = get_quoted_value(document.body, "bibfiles", i, j)
|
|
opts = get_quoted_value(document.body, "biblatexopts", i, j)
|
|
# store resources
|
|
if bibs:
|
|
bibresources += bibs.split(",")
|
|
else:
|
|
document.warning("Can't find bibfiles for bibtex inset at line %d!" % (i))
|
|
# remove biblatexopts line
|
|
k = find_token(document.body, "biblatexopts", i, j)
|
|
if k != -1:
|
|
del document.body[k]
|
|
# Re-find inset end line
|
|
j = find_end_of_inset(document.body, i)
|
|
# Insert ERT \\printbibliography and wrap bibtex inset to a Note
|
|
if biblatex:
|
|
pcmd = "printbibliography"
|
|
if opts:
|
|
pcmd += "[" + opts + "]"
|
|
repl = [
|
|
"\\begin_inset ERT",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
"",
|
|
"",
|
|
"\\backslash",
|
|
pcmd,
|
|
"\\end_layout",
|
|
"",
|
|
"\\end_inset",
|
|
"",
|
|
"",
|
|
"\\end_layout",
|
|
"",
|
|
"\\begin_layout Standard",
|
|
"\\begin_inset Note Note",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
]
|
|
repl += document.body[i : j + 1]
|
|
repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
|
|
document.body[i : j + 1] = repl
|
|
j += 27
|
|
|
|
i = j + 1
|
|
|
|
if biblatex:
|
|
for b in bibresources:
|
|
add_to_preamble(document, "\\addbibresource{" + b + ".bib}")
|
|
|
|
# 2. Citation insets
|
|
|
|
# Specific citation insets used in biblatex that need to be reverted to ERT
|
|
new_citations = {
|
|
"Cite": "Cite",
|
|
"citebyear": "citeyear",
|
|
"citeyear": "cite*",
|
|
"Footcite": "Smartcite",
|
|
"footcite": "smartcite",
|
|
"Autocite": "Autocite",
|
|
"autocite": "autocite",
|
|
"citetitle": "citetitle",
|
|
"citetitle*": "citetitle*",
|
|
"fullcite": "fullcite",
|
|
"footfullcite": "footfullcite",
|
|
"supercite": "supercite",
|
|
"citeauthor": "citeauthor",
|
|
"citeauthor*": "citeauthor*",
|
|
"Citeauthor": "Citeauthor",
|
|
"Citeauthor*": "Citeauthor*",
|
|
}
|
|
|
|
# All commands accepted by LyX < 2.3. Everything else throws an error.
|
|
old_citations = [
|
|
"cite",
|
|
"nocite",
|
|
"citet",
|
|
"citep",
|
|
"citealt",
|
|
"citealp",
|
|
"citeauthor",
|
|
"citeyear",
|
|
"citeyearpar",
|
|
"citet*",
|
|
"citep*",
|
|
"citealt*",
|
|
"citealp*",
|
|
"citeauthor*",
|
|
"Citet",
|
|
"Citep",
|
|
"Citealt",
|
|
"Citealp",
|
|
"Citeauthor",
|
|
"Citet*",
|
|
"Citep*",
|
|
"Citealt*",
|
|
"Citealp*",
|
|
"Citeauthor*",
|
|
"fullcite",
|
|
"footcite",
|
|
"footcitet",
|
|
"footcitep",
|
|
"footcitealt",
|
|
"footcitealp",
|
|
"footciteauthor",
|
|
"footciteyear",
|
|
"footciteyearpar",
|
|
"citefield",
|
|
"citetitle",
|
|
"cite*",
|
|
]
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of citation inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
if k == -1:
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
|
|
i = j + 1
|
|
continue
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
if biblatex and cmd in list(new_citations.keys()):
|
|
pre = get_quoted_value(document.body, "before", i, j)
|
|
post = get_quoted_value(document.body, "after", i, j)
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
if not key:
|
|
document.warning("Citation inset at line %d does not have a key!" % (i))
|
|
key = "???"
|
|
# Replace known new commands with ERT
|
|
res = "\\" + new_citations[cmd]
|
|
if pre:
|
|
res += "[" + pre + "]"
|
|
if post:
|
|
res += "[" + post + "]"
|
|
elif pre:
|
|
res += "[]"
|
|
res += "{" + key + "}"
|
|
document.body[i : j + 1] = put_cmd_in_ert([res])
|
|
elif cmd not in old_citations:
|
|
# Reset unknown commands to cite. This is what LyX does as well
|
|
# (but LyX 2.2 would break on unknown commands)
|
|
document.body[k] = "LatexCommand cite"
|
|
document.warning("Reset unknown cite command '%s' with cite" % cmd)
|
|
i = j + 1
|
|
|
|
# Emulate the old biblatex-workaround (pretend natbib in order to use the styles)
|
|
if biblatex:
|
|
biblatex_emulation = [
|
|
"### Inserted by lyx2lyx (biblatex emulation) ###",
|
|
"Provides natbib 1",
|
|
"### End of insertion by lyx2lyx (biblatex emulation) ###",
|
|
]
|
|
document.append_local_layout(biblatex_emulation)
|
|
|
|
|
|
def revert_citekeyonly(document):
|
|
"Revert keyonly cite command to ERT"
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of citation inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
if k == -1:
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
|
|
i = j + 1
|
|
continue
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
if cmd != "keyonly":
|
|
i = j + 1
|
|
continue
|
|
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
if not key:
|
|
document.warning("Citation inset at line %d does not have a key!" % (i))
|
|
# Replace known new commands with ERT
|
|
document.body[i : j + 1] = put_cmd_in_ert([key])
|
|
i = j + 1
|
|
|
|
|
|
def revert_bibpackopts(document):
|
|
"Revert support for natbib/jurabib package options"
|
|
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
biblatex = False
|
|
if engine not in ["natbib", "jurabib"]:
|
|
return
|
|
|
|
i = find_token(document.header, "\\biblio_options", 0)
|
|
if i == -1:
|
|
# Nothing to do if we have no options
|
|
return
|
|
|
|
biblio_options = get_value(document.header, "\\biblio_options", i)
|
|
del document.header[i]
|
|
|
|
if not biblio_options:
|
|
# Nothing to do for empty options
|
|
return
|
|
|
|
bibliography_package_options = [
|
|
"### Inserted by lyx2lyx (bibliography package options) ###",
|
|
"PackageOptions " + engine + " " + biblio_options,
|
|
"### End of insertion by lyx2lyx (bibliography package options) ###",
|
|
]
|
|
document.append_local_layout(bibliography_package_options)
|
|
|
|
|
|
def revert_qualicites(document):
|
|
"Revert qualified citation list commands to ERT"
|
|
|
|
# Citation insets that support qualified lists, with their LaTeX code
|
|
ql_citations = {
|
|
"cite": "cites",
|
|
"Cite": "Cites",
|
|
"citet": "textcites",
|
|
"Citet": "Textcites",
|
|
"citep": "parencites",
|
|
"Citep": "Parencites",
|
|
"Footcite": "Smartcites",
|
|
"footcite": "smartcites",
|
|
"Autocite": "Autocites",
|
|
"autocite": "autocites",
|
|
}
|
|
|
|
# Get cite engine
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
biblatex = engine in ["biblatex", "biblatex-natbib"]
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of citation inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
pres = find_token(document.body, "pretextlist", i, j)
|
|
posts = find_token(document.body, "posttextlist", i, j)
|
|
if pres == -1 and posts == -1:
|
|
# nothing to do.
|
|
i = j + 1
|
|
continue
|
|
pretexts = get_quoted_value(document.body, "pretextlist", pres)
|
|
posttexts = get_quoted_value(document.body, "posttextlist", posts)
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
if k == -1:
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
|
|
i = j + 1
|
|
continue
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
if biblatex and cmd in list(ql_citations.keys()):
|
|
pre = get_quoted_value(document.body, "before", i, j)
|
|
post = get_quoted_value(document.body, "after", i, j)
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
if not key:
|
|
document.warning("Citation inset at line %d does not have a key!" % (i))
|
|
key = "???"
|
|
keys = key.split(",")
|
|
prelist = pretexts.split("\t")
|
|
premap = dict()
|
|
for pp in prelist:
|
|
ppp = pp.split(" ", 1)
|
|
premap[ppp[0]] = ppp[1]
|
|
postlist = posttexts.split("\t")
|
|
postmap = dict()
|
|
for pp in postlist:
|
|
ppp = pp.split(" ", 1)
|
|
postmap[ppp[0]] = ppp[1]
|
|
# Replace known new commands with ERT
|
|
if "(" in pre or ")" in pre:
|
|
pre = "{" + pre + "}"
|
|
if "(" in post or ")" in post:
|
|
post = "{" + post + "}"
|
|
res = "\\" + ql_citations[cmd]
|
|
if pre:
|
|
res += "(" + pre + ")"
|
|
if post:
|
|
res += "(" + post + ")"
|
|
elif pre:
|
|
res += "()"
|
|
for kk in keys:
|
|
if premap.get(kk, "") != "":
|
|
res += "[" + premap[kk] + "]"
|
|
if postmap.get(kk, "") != "":
|
|
res += "[" + postmap[kk] + "]"
|
|
elif premap.get(kk, "") != "":
|
|
res += "[]"
|
|
res += "{" + kk + "}"
|
|
document.body[i : j + 1] = put_cmd_in_ert([res])
|
|
else:
|
|
# just remove the params
|
|
del document.body[posttexts]
|
|
del document.body[pretexts]
|
|
i += 1
|
|
|
|
|
|
command_insets = ["bibitem", "citation", "href", "index_print", "nomenclature"]
|
|
|
|
|
|
def convert_literalparam(document):
|
|
"Add param literal"
|
|
|
|
pos = len("\\begin_inset CommandInset ")
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset", i)
|
|
if i == -1:
|
|
break
|
|
inset = document.body[i][pos:].strip()
|
|
if inset not in command_insets:
|
|
i += 1
|
|
continue
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i)
|
|
)
|
|
i += 1
|
|
continue
|
|
while i < j and document.body[i].strip() != "":
|
|
i += 1
|
|
# href is already fully latexified. Here we can switch off literal.
|
|
if inset == "href":
|
|
document.body.insert(i, 'literal "false"')
|
|
else:
|
|
document.body.insert(i, 'literal "true"')
|
|
i = j + 1
|
|
|
|
|
|
def revert_literalparam(document):
|
|
"Remove param literal"
|
|
|
|
for inset in command_insets:
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset %s" % inset, i + 1)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning(
|
|
"Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i)
|
|
)
|
|
continue
|
|
del_token(document.body, "literal", i, j)
|
|
|
|
|
|
def revert_multibib(document):
|
|
"Revert multibib support"
|
|
|
|
# 1. Get cite engine
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
# 2. Do we use biblatex?
|
|
biblatex = False
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
biblatex = True
|
|
|
|
# 3. Store and remove multibib document header
|
|
multibib = ""
|
|
i = find_token(document.header, "\\multibib", 0)
|
|
if i != -1:
|
|
multibib = get_value(document.header, "\\multibib", i)
|
|
del document.header[i]
|
|
|
|
if not multibib:
|
|
return
|
|
|
|
# 4. The easy part: Biblatex
|
|
if biblatex:
|
|
i = find_token(document.header, "\\biblio_options", 0)
|
|
if i == -1:
|
|
k = find_token(document.header, "\\use_bibtopic", 0)
|
|
if k == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\use_bibtopic header found!")
|
|
return
|
|
document.header[k - 1 : k - 1] = ["\\biblio_options " + "refsection=" + multibib]
|
|
else:
|
|
biblio_options = get_value(document.header, "\\biblio_options", i)
|
|
if biblio_options:
|
|
biblio_options += ","
|
|
biblio_options += "refsection=" + multibib
|
|
document.header[i] = "\\biblio_options " + biblio_options
|
|
|
|
# Bibtex insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of bibtex inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
btprint = get_quoted_value(document.body, "btprint", i, j)
|
|
if btprint != "bibbysection":
|
|
i += 1
|
|
continue
|
|
opts = get_quoted_value(document.body, "biblatexopts", i, j)
|
|
# change btprint line
|
|
k = find_token(document.body, "btprint", i, j)
|
|
if k != -1:
|
|
document.body[k] = 'btprint "btPrintCited"'
|
|
# Insert ERT \\bibbysection and wrap bibtex inset to a Note
|
|
pcmd = "bibbysection"
|
|
if opts:
|
|
pcmd += "[" + opts + "]"
|
|
repl = [
|
|
"\\begin_inset ERT",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
"",
|
|
"",
|
|
"\\backslash",
|
|
pcmd,
|
|
"\\end_layout",
|
|
"",
|
|
"\\end_inset",
|
|
"",
|
|
"",
|
|
"\\end_layout",
|
|
"",
|
|
"\\begin_layout Standard",
|
|
"\\begin_inset Note Note",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
]
|
|
repl += document.body[i : j + 1]
|
|
repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
|
|
document.body[i : j + 1] = repl
|
|
j += 27
|
|
|
|
i = j + 1
|
|
return
|
|
|
|
# 5. More tricky: Bibtex/Bibtopic
|
|
k = find_token(document.header, "\\use_bibtopic", 0)
|
|
if k == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\use_bibtopic header found!")
|
|
return
|
|
document.header[k] = "\\use_bibtopic true"
|
|
|
|
# Possible units. This assumes that the LyX name follows the std,
|
|
# which might not always be the case. But it's as good as we can get.
|
|
units = {
|
|
"part": "Part",
|
|
"chapter": "Chapter",
|
|
"section": "Section",
|
|
"subsection": "Subsection",
|
|
}
|
|
|
|
if multibib not in units.keys():
|
|
document.warning("Unknown multibib value `%s'!" % multibib)
|
|
return
|
|
unit = units[multibib]
|
|
btunit = False
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_layout " + unit, i)
|
|
if i == -1:
|
|
break
|
|
if btunit:
|
|
document.body[i - 1 : i - 1] = [
|
|
"\\begin_layout Standard",
|
|
"\\begin_inset ERT",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
"",
|
|
"",
|
|
"\\backslash",
|
|
"end{btUnit}",
|
|
"\\end_layout",
|
|
"\\begin_layout Plain Layout",
|
|
"",
|
|
"\\backslash",
|
|
"begin{btUnit}" "\\end_layout",
|
|
"",
|
|
"\\end_inset",
|
|
"",
|
|
"",
|
|
"\\end_layout",
|
|
"",
|
|
]
|
|
i += 21
|
|
else:
|
|
document.body[i - 1 : i - 1] = [
|
|
"\\begin_layout Standard",
|
|
"\\begin_inset ERT",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
"",
|
|
"",
|
|
"\\backslash",
|
|
"begin{btUnit}" "\\end_layout",
|
|
"",
|
|
"\\end_inset",
|
|
"",
|
|
"",
|
|
"\\end_layout",
|
|
"",
|
|
]
|
|
i += 16
|
|
btunit = True
|
|
i += 1
|
|
|
|
if btunit:
|
|
i = find_token(document.body, "\\end_body", i)
|
|
document.body[i - 1 : i - 1] = [
|
|
"\\begin_layout Standard",
|
|
"\\begin_inset ERT",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
"",
|
|
"",
|
|
"\\backslash",
|
|
"end{btUnit}" "\\end_layout",
|
|
"",
|
|
"\\end_inset",
|
|
"",
|
|
"",
|
|
"\\end_layout",
|
|
"",
|
|
]
|
|
|
|
|
|
def revert_chapterbib(document):
|
|
"Revert chapterbib support"
|
|
|
|
# 1. Get cite engine
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
# 2. Do we use biblatex?
|
|
biblatex = False
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
biblatex = True
|
|
|
|
# 3. Store multibib document header value
|
|
multibib = ""
|
|
i = find_token(document.header, "\\multibib", 0)
|
|
if i != -1:
|
|
multibib = get_value(document.header, "\\multibib", i)
|
|
|
|
if not multibib or multibib != "child":
|
|
# nothing to do
|
|
return
|
|
|
|
# 4. remove multibib header
|
|
del document.header[i]
|
|
|
|
# 5. Biblatex
|
|
if biblatex:
|
|
# find include insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of bibtex inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
parent = get_containing_layout(document.body, i)
|
|
parbeg = parent[1]
|
|
|
|
# Insert ERT \\newrefsection before inset
|
|
beg = [
|
|
"\\begin_layout Standard",
|
|
"\\begin_inset ERT",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
"",
|
|
"",
|
|
"\\backslash",
|
|
"newrefsection" "\\end_layout",
|
|
"",
|
|
"\\end_inset",
|
|
"",
|
|
"",
|
|
"\\end_layout",
|
|
"",
|
|
]
|
|
document.body[parbeg - 1 : parbeg - 1] = beg
|
|
j += len(beg)
|
|
i = j + 1
|
|
return
|
|
|
|
# 6. Bibtex/Bibtopic
|
|
i = find_token(document.header, "\\use_bibtopic", 0)
|
|
if i == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\use_bibtopic header found!")
|
|
return
|
|
if get_value(document.header, "\\use_bibtopic", i) == "true":
|
|
# find include insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of bibtex inset at line %d!!" % (i))
|
|
i += 1
|
|
continue
|
|
parent = get_containing_layout(document.body, i)
|
|
parbeg = parent[1]
|
|
parend = parent[2]
|
|
|
|
# Insert wrap inset into \\begin{btUnit}...\\end{btUnit}
|
|
beg = [
|
|
"\\begin_layout Standard",
|
|
"\\begin_inset ERT",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
"",
|
|
"",
|
|
"\\backslash",
|
|
"begin{btUnit}" "\\end_layout",
|
|
"",
|
|
"\\end_inset",
|
|
"",
|
|
"",
|
|
"\\end_layout",
|
|
"",
|
|
]
|
|
end = [
|
|
"\\begin_layout Standard",
|
|
"\\begin_inset ERT",
|
|
"status open",
|
|
"",
|
|
"\\begin_layout Plain Layout",
|
|
"",
|
|
"",
|
|
"\\backslash",
|
|
"end{btUnit}" "\\end_layout",
|
|
"",
|
|
"\\end_inset",
|
|
"",
|
|
"",
|
|
"\\end_layout",
|
|
"",
|
|
]
|
|
document.body[parend + 1 : parend + 1] = end
|
|
document.body[parbeg - 1 : parbeg - 1] = beg
|
|
j += len(beg) + len(end)
|
|
i = j + 1
|
|
return
|
|
|
|
# 7. Chapterbib proper
|
|
add_to_preamble(document, ["\\usepackage{chapterbib}"])
|
|
|
|
|
|
def convert_dashligatures(document):
|
|
"""Set 'use_dash_ligatures' according to content."""
|
|
# Look for and remove dashligatures workaround from 2.3->2.2 reversion,
|
|
# set use_dash_ligatures to True if found, to None else.
|
|
use_dash_ligatures = (
|
|
del_complete_lines(
|
|
document.preamble,
|
|
[
|
|
"% Added by lyx2lyx",
|
|
r"\renewcommand{\textendash}{--}",
|
|
r"\renewcommand{\textemdash}{---}",
|
|
],
|
|
)
|
|
or None
|
|
)
|
|
|
|
if use_dash_ligatures is None:
|
|
# Look for dashes (Documents by LyX 2.1 or older have "\twohyphens\n"
|
|
# or "\threehyphens\n" as interim representation for -- an ---.)
|
|
lines = document.body
|
|
has_literal_dashes = has_ligature_dashes = False
|
|
dash_pattern = re.compile(".*[\u2013\u2014]|\\twohyphens|\\threehyphens")
|
|
i = j = 0
|
|
while True:
|
|
# skip lines without dashes:
|
|
i = find_re(lines, dash_pattern, i + 1)
|
|
if i == -1:
|
|
break
|
|
line = lines[i]
|
|
# skip label width string (see bug 10243):
|
|
if line.startswith("\\labelwidthstring"):
|
|
continue
|
|
# do not touch hyphens in some insets (cf. lyx_2_2.convert_dashes):
|
|
try:
|
|
inset_type, start, end = get_containing_inset(lines, i)
|
|
except TypeError: # no containing inset
|
|
inset_type, start, end = "no inset", -1, -1
|
|
if (
|
|
inset_type.split()[0]
|
|
in [
|
|
"CommandInset",
|
|
"ERT",
|
|
"External",
|
|
"Formula",
|
|
"FormulaMacro",
|
|
"Graphics",
|
|
"IPA",
|
|
"listings",
|
|
]
|
|
or inset_type == "Flex Code"
|
|
):
|
|
i = end
|
|
continue
|
|
try:
|
|
layoutname, start, end, j = get_containing_layout(lines, i)
|
|
except TypeError: # no (or malformed) containing layout
|
|
document.warning("Malformed LyX document: " "Can't find layout at line %d" % i)
|
|
continue
|
|
if not layoutname:
|
|
document.warning(
|
|
"Malformed LyX document: " "Missing layout name on line %d" % start
|
|
)
|
|
if layoutname == "LyX-Code":
|
|
i = end
|
|
continue
|
|
|
|
# literal dash followed by a non-white-character or no-break space:
|
|
if re.search("[\u2013\u2014]([\\S\u00a0\u202f\u2060]|$)", line, flags=re.UNICODE):
|
|
has_literal_dashes = True
|
|
# ligature dash followed by non-white-char or no-break space on next line:
|
|
if re.search(r"(\\twohyphens|\\threehyphens)", line) and re.match(
|
|
"[\\S\u00a0\u202f\u2060]", lines[i + 1], flags=re.UNICODE
|
|
):
|
|
has_ligature_dashes = True
|
|
if has_literal_dashes and has_ligature_dashes:
|
|
# TODO: insert a warning note in the document?
|
|
document.warning(
|
|
"This document contained both literal and "
|
|
'"ligature" dashes.\n Line breaks may have changed. '
|
|
"See UserGuide chapter 3.9.1 for details."
|
|
)
|
|
break
|
|
|
|
if has_literal_dashes and not has_ligature_dashes:
|
|
use_dash_ligatures = False
|
|
elif has_ligature_dashes and not has_literal_dashes:
|
|
use_dash_ligatures = True
|
|
|
|
# insert the setting if there is a preferred value
|
|
if use_dash_ligatures is True:
|
|
document.header.insert(-1, "\\use_dash_ligatures true")
|
|
elif use_dash_ligatures is False:
|
|
document.header.insert(-1, "\\use_dash_ligatures false")
|
|
|
|
|
|
def revert_dashligatures(document):
|
|
"""Remove font ligature settings for en- and em-dashes.
|
|
Revert conversion of \twodashes or \threedashes to literal dashes.
|
|
"""
|
|
use_dash_ligatures = del_value(document.header, "\\use_dash_ligatures")
|
|
if use_dash_ligatures != "true" or document.backend != "latex":
|
|
return
|
|
i = 0
|
|
dash_pattern = re.compile(".*[\u2013\u2014]")
|
|
while True:
|
|
# skip lines without dashes:
|
|
i = find_re(document.body, dash_pattern, i + 1)
|
|
if i == -1:
|
|
break
|
|
line = document.body[i]
|
|
# skip label width string (see bug 10243):
|
|
if line.startswith("\\labelwidthstring"):
|
|
continue
|
|
# do not touch hyphens in some insets (cf. lyx_2_2.convert_dashes):
|
|
try:
|
|
inset_type, start, end = get_containing_inset(document.body, i)
|
|
except TypeError: # no containing inset
|
|
inset_type, start, end = "no inset", -1, -1
|
|
if (
|
|
inset_type.split()[0]
|
|
in [
|
|
"CommandInset",
|
|
"ERT",
|
|
"External",
|
|
"Formula",
|
|
"FormulaMacro",
|
|
"Graphics",
|
|
"IPA",
|
|
"listings",
|
|
]
|
|
or inset_type == "Flex Code"
|
|
):
|
|
i = end
|
|
continue
|
|
try:
|
|
layoutname, start, end, j = get_containing_layout(document.body, i)
|
|
except TypeError: # no (or malformed) containing layout
|
|
document.warning("Malformed LyX document: " "Can't find layout at body line %d" % i)
|
|
continue
|
|
if layoutname == "LyX-Code":
|
|
i = end
|
|
continue
|
|
# TODO: skip replacement in typewriter fonts
|
|
line = line.replace("\u2013", "\\twohyphens\n")
|
|
line = line.replace("\u2014", "\\threehyphens\n")
|
|
document.body[i : i + 1] = line.split("\n")
|
|
# redefine the dash LICRs to use ligature dashes:
|
|
add_to_preamble(
|
|
document,
|
|
[r"\renewcommand{\textendash}{--}", r"\renewcommand{\textemdash}{---}"],
|
|
)
|
|
|
|
|
|
def revert_noto(document):
|
|
"Revert Noto font definitions to LaTeX"
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
preamble = ""
|
|
i = find_token(document.header, '\\font_roman "NotoSerif-TLF"', 0)
|
|
if i != -1:
|
|
add_to_preamble(document, ["\\renewcommand{\\rmdefault}{NotoSerif-TLF}"])
|
|
document.header[i] = document.header[i].replace("NotoSerif-TLF", "default")
|
|
i = find_token(document.header, '\\font_sans "NotoSans-TLF"', 0)
|
|
if i != -1:
|
|
add_to_preamble(document, ["\\renewcommand{\\sfdefault}{NotoSans-TLF}"])
|
|
document.header[i] = document.header[i].replace("NotoSans-TLF", "default")
|
|
i = find_token(document.header, '\\font_typewriter "NotoMono-TLF"', 0)
|
|
if i != -1:
|
|
add_to_preamble(document, ["\\renewcommand{\\ttdefault}{NotoMono-TLF}"])
|
|
document.header[i] = document.header[i].replace("NotoMono-TLF", "default")
|
|
|
|
|
|
def revert_xout(document):
|
|
"Reverts \\xout font attribute"
|
|
changed = revert_font_attrs(document.body, "\\xout", "\\xout")
|
|
if changed == True:
|
|
insert_to_preamble(
|
|
document,
|
|
[
|
|
"% for proper cross-out",
|
|
"\\PassOptionsToPackage{normalem}{ulem}",
|
|
"\\usepackage{ulem}",
|
|
],
|
|
)
|
|
|
|
|
|
def convert_mathindent(document):
|
|
"""Add the \\is_math_indent tag."""
|
|
k = find_token(document.header, "\\quotes_style") # where to insert
|
|
# check if the document uses the class option "fleqn"
|
|
options = get_value(document.header, "\\options")
|
|
if "fleqn" in options:
|
|
document.header.insert(k, "\\is_math_indent 1")
|
|
# delete the fleqn option
|
|
i = find_token(document.header, "\\options")
|
|
options = [option for option in options.split(",") if option.strip() != "fleqn"]
|
|
if options:
|
|
document.header[i] = "\\options " + ",".join(options)
|
|
else:
|
|
del document.header[i]
|
|
else:
|
|
document.header.insert(k, "\\is_math_indent 0")
|
|
|
|
|
|
def revert_mathindent(document):
|
|
"Define mathindent if set in the document"
|
|
# emulate and delete \math_indentation
|
|
value = get_value(document.header, "\\math_indentation", default="default", delete=True)
|
|
if value != "default":
|
|
add_to_preamble(document, [r"\setlength{\mathindent}{%s}" % value])
|
|
# delete \is_math_indent and emulate via document class option
|
|
if not get_bool_value(document.header, "\\is_math_indent", delete=True):
|
|
return
|
|
i = find_token(document.header, "\\options")
|
|
if i != -1:
|
|
document.header[i] = document.header[i].replace("\\options ", "\\options fleqn,")
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options")
|
|
document.header.insert(l, "\\options fleqn")
|
|
|
|
|
|
def revert_baselineskip(document):
|
|
"Revert baselineskips to TeX code"
|
|
i = 0
|
|
while True:
|
|
i = find_substring(document.body, "baselineskip%", i + 1)
|
|
if i == -1:
|
|
return
|
|
if document.body[i].startswith("\\begin_inset VSpace"):
|
|
# output VSpace inset as TeX code
|
|
end = find_end_of_inset(document.body, i)
|
|
if end == -1:
|
|
document.warning(
|
|
"Malformed LyX document: " "Can't find end of VSpace inset at line %d." % i
|
|
)
|
|
continue
|
|
# read out the value
|
|
baselineskip = document.body[i].split()[-1]
|
|
# check if it is the starred version
|
|
star = "*" if "*" in document.body[i] else ""
|
|
# now output TeX code
|
|
cmd = f"\\vspace{star}{{{latex_length(baselineskip)[1]}}}"
|
|
document.body[i : end + 1] = put_cmd_in_ert(cmd)
|
|
i += 8
|
|
continue
|
|
begin, end = is_in_inset(document.body, i, "\\begin_inset space \\hspace")
|
|
if begin != -1:
|
|
# output space inset as TeX code
|
|
baselineskip = document.body[i].split()[-1]
|
|
star = "*" if "*" in document.body[i - 1] else ""
|
|
cmd = f"\\hspace{star}{{{latex_length(baselineskip)[1]}}}"
|
|
document.body[begin : end + 1] = put_cmd_in_ert(cmd)
|
|
|
|
|
|
def revert_rotfloat(document):
|
|
"Revert placement options for rotated floats"
|
|
i = 0
|
|
j = 0
|
|
k = 0
|
|
while True:
|
|
i = find_token(document.body, "sideways true", i)
|
|
if i == -1:
|
|
return
|
|
if not document.body[i - 2].startswith("placement "):
|
|
i = i + 1
|
|
continue
|
|
# we found a sideways float with placement options
|
|
# at first store the placement
|
|
beg = document.body[i - 2].rfind(" ")
|
|
placement = document.body[i - 2][beg + 1 :]
|
|
# check if the option'H' is used
|
|
if placement.find("H") != -1:
|
|
add_to_preamble(document, ["\\usepackage{float}"])
|
|
# now check if it is a starred type
|
|
if document.body[i - 1].find("wide true") != -1:
|
|
star = "*"
|
|
else:
|
|
star = ""
|
|
# store the float type
|
|
beg = document.body[i - 3].rfind(" ")
|
|
fType = document.body[i - 3][beg + 1 :]
|
|
# now output TeX code
|
|
endInset = find_end_of_inset(document.body, i - 3)
|
|
if endInset == -1:
|
|
document.warning("Malformed LyX document: Missing '\\end_inset' of Float inset.")
|
|
return
|
|
else:
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert(
|
|
"\\end{sideways" + fType + star + "}"
|
|
)
|
|
document.body[i - 3 : i + 2] = put_cmd_in_ert(
|
|
"\\begin{sideways" + fType + star + "}[" + placement + "]"
|
|
)
|
|
add_to_preamble(document, ["\\usepackage{rotfloat}"])
|
|
|
|
i = i + 1
|
|
|
|
|
|
allowbreak_emulation = [
|
|
r"\begin_inset space \hspace{}",
|
|
r"\length 0dd",
|
|
r"\end_inset",
|
|
r"",
|
|
]
|
|
|
|
|
|
def convert_allowbreak(document):
|
|
r"Zero widths Space-inset -> \SpecialChar allowbreak."
|
|
lines = document.body
|
|
i = find_complete_lines(lines, allowbreak_emulation, 2)
|
|
while i != -1:
|
|
lines[i - 1 : i + 4] = [lines[i - 1] + r"\SpecialChar allowbreak"]
|
|
i = find_complete_lines(lines, allowbreak_emulation, i + 3)
|
|
|
|
|
|
def revert_allowbreak(document):
|
|
r"\SpecialChar allowbreak -> Zero widths Space-inset."
|
|
i = 1
|
|
lines = document.body
|
|
while i < len(lines):
|
|
if lines[i].endswith(r"\SpecialChar allowbreak"):
|
|
lines[i : i + 1] = [
|
|
lines[i].replace(r"\SpecialChar allowbreak", "")
|
|
] + allowbreak_emulation
|
|
i += 5
|
|
else:
|
|
i += 1
|
|
|
|
|
|
def convert_mathnumberpos(document):
|
|
"add the \\math_number_before tag"
|
|
i = find_token(document.header, "\\quotes_style")
|
|
# check if the document uses the class option "leqno"
|
|
if is_document_option(document, "leqno"):
|
|
remove_document_option(document, "leqno")
|
|
document.header.insert(i, "\\math_number_before 1")
|
|
else:
|
|
document.header.insert(i, "\\math_number_before 0")
|
|
|
|
|
|
def revert_mathnumberpos(document):
|
|
"""Remove \\math_number_before tag,
|
|
add the document class option leqno if required.
|
|
"""
|
|
math_number_before = get_bool_value(document.header, "\\math_number_before", delete=True)
|
|
if math_number_before:
|
|
insert_document_option(document, "leqno")
|
|
|
|
|
|
def convert_mathnumberingname(document):
|
|
"rename the \\math_number_before tag to \\math_numbering_side"
|
|
i = find_token(document.header, "\\math_number_before")
|
|
math_number_before = get_bool_value(document.header, "\\math_number_before", i)
|
|
if math_number_before:
|
|
document.header[i] = "\\math_numbering_side left"
|
|
return
|
|
# check if the document uses the class option "reqno"
|
|
k = find_token(document.header, "\\options")
|
|
if "reqno" in document.header[k]:
|
|
document.header[i] = "\\math_numbering_side right"
|
|
# delete the found option
|
|
document.header[k] = document.header[k].replace(",reqno", "")
|
|
document.header[k] = document.header[k].replace(", reqno", "")
|
|
document.header[k] = document.header[k].replace("reqno,", "")
|
|
if "reqno" in document.header[k]:
|
|
# then we have reqno as the only option
|
|
del document.header[k]
|
|
else:
|
|
document.header[i] = "\\math_numbering_side default"
|
|
|
|
|
|
def revert_mathnumberingname(document):
|
|
"rename the \\math_numbering_side tag back to \\math_number_before"
|
|
i = find_token(document.header, "\\math_numbering_side")
|
|
math_numbering_side = get_value(document.header, "\\math_numbering_side", i)
|
|
# rename tag and set boolean value:
|
|
if math_numbering_side == "left":
|
|
document.header[i] = "\\math_number_before 1"
|
|
elif math_numbering_side == "right":
|
|
# also add the option reqno:
|
|
document.header[i] = "\\math_number_before 0"
|
|
k = find_token(document.header, "\\options")
|
|
if k != -1 and "reqno" not in document.header[k]:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options reqno,")
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l, "\\options reqno")
|
|
else:
|
|
document.header[i] = "\\math_number_before 0"
|
|
|
|
|
|
def convert_minted(document):
|
|
"add the \\use_minted tag"
|
|
i = find_token(document.header, "\\index ")
|
|
document.header.insert(i, "\\use_minted 0")
|
|
|
|
|
|
def revert_minted(document):
|
|
"remove the \\use_minted tag"
|
|
del_token(document.header, "\\use_minted")
|
|
|
|
|
|
def revert_longtable_lscape(document):
|
|
"revert the longtable landcape mode to ERT"
|
|
i = 0
|
|
regexp = re.compile(r"^<features rotate=\"90\"\s.*islongtable=\"true\"\s.*$", re.IGNORECASE)
|
|
while True:
|
|
i = find_re(document.body, regexp, i)
|
|
if i == -1:
|
|
return
|
|
|
|
document.body[i] = document.body[i].replace(' rotate="90"', "")
|
|
lay = get_containing_layout(document.body, i)
|
|
if lay == False:
|
|
document.warning("Longtable has not layout!")
|
|
i += 1
|
|
continue
|
|
begcmd = put_cmd_in_ert("\\begin{landscape}")
|
|
endcmd = put_cmd_in_ert("\\end{landscape}")
|
|
document.body[lay[2] : lay[2]] = endcmd + ["\\end_layout"]
|
|
document.body[lay[1] : lay[1]] = ["\\begin_layout " + lay[0], ""] + begcmd
|
|
|
|
add_to_preamble(document, ["\\usepackage{pdflscape}"])
|
|
i = lay[2]
|
|
|
|
|
|
##
|
|
# Conversion hub
|
|
#
|
|
|
|
supported_versions = ["2.3.0", "2.3"]
|
|
convert = [
|
|
[509, [convert_microtype]],
|
|
[510, [convert_dateinset]],
|
|
[511, [convert_ibranches]],
|
|
[512, [convert_beamer_article_styles]],
|
|
[513, []],
|
|
[514, []],
|
|
[515, []],
|
|
[516, [convert_inputenc]],
|
|
[517, []],
|
|
[518, [convert_iopart]],
|
|
[519, [convert_quotestyle]],
|
|
[520, []],
|
|
[521, [convert_frenchquotes]],
|
|
[522, []],
|
|
[523, []],
|
|
[524, [convert_crimson]],
|
|
[525, []],
|
|
[526, []],
|
|
[527, []],
|
|
[528, []],
|
|
[529, []],
|
|
[530, []],
|
|
[531, []],
|
|
[532, [convert_literalparam]],
|
|
[533, []],
|
|
[534, []],
|
|
[535, [convert_dashligatures]],
|
|
[536, []],
|
|
[537, []],
|
|
[538, [convert_mathindent]],
|
|
[539, []],
|
|
[540, []],
|
|
[541, [convert_allowbreak]],
|
|
[542, [convert_mathnumberpos]],
|
|
[543, [convert_mathnumberingname]],
|
|
[544, [convert_minted]],
|
|
]
|
|
|
|
revert = [
|
|
[543, [revert_minted, revert_longtable_lscape]],
|
|
[542, [revert_mathnumberingname]],
|
|
[541, [revert_mathnumberpos]],
|
|
[540, [revert_allowbreak]],
|
|
[539, [revert_rotfloat]],
|
|
[538, [revert_baselineskip]],
|
|
[537, [revert_mathindent]],
|
|
[536, [revert_xout]],
|
|
[535, [revert_noto]],
|
|
[534, [revert_dashligatures]],
|
|
[533, [revert_chapterbib]],
|
|
[532, [revert_multibib]],
|
|
[531, [revert_literalparam]],
|
|
[530, [revert_qualicites]],
|
|
[529, [revert_bibpackopts]],
|
|
[528, [revert_citekeyonly]],
|
|
[527, [revert_biblatex]],
|
|
[526, [revert_noprefix]],
|
|
[525, [revert_plural_refs]],
|
|
[524, [revert_labelonly]],
|
|
[523, [revert_crimson, revert_cochinealmath]],
|
|
[522, [revert_cjkquotes]],
|
|
[521, [revert_dynamicquotes]],
|
|
[
|
|
520,
|
|
[
|
|
revert_britishquotes,
|
|
revert_swedishgquotes,
|
|
revert_frenchquotes,
|
|
revert_frenchinquotes,
|
|
revert_russianquotes,
|
|
revert_swissquotes,
|
|
],
|
|
],
|
|
[519, [revert_plainquote]],
|
|
[518, [revert_quotestyle]],
|
|
[517, [revert_iopart]],
|
|
[516, [revert_quotes]],
|
|
[515, []],
|
|
[514, [revert_urdu, revert_syriac]],
|
|
[513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
|
|
[512, [revert_new_babel_languages]],
|
|
[511, [revert_beamer_article_styles]],
|
|
[510, [revert_ibranches]],
|
|
[509, []],
|
|
[508, [revert_microtype]],
|
|
]
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pass
|