mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-12-27 14:29:21 +00:00
8dab1cfe7e
Update the listings inset to optionally use the minted package (instead of the listings one) for typesetting code listings. Only one of the two packages can be used in a document, but it is possible to switch packages without issues if the used options are the same. If a switch is made and the options differ, one needs to manually adjust them if they were entered in the advanced options tab, or apply again the gui settings. Note that minted requires the -shell-escape option for the latex backend and the installation of additional software (python pygments).
2335 lines
84 KiB
Python
2335 lines
84 KiB
Python
# -*- coding: utf-8 -*-
|
|
# This file is part of lyx2lyx
|
|
# Copyright (C) 2016 The LyX team
|
|
#
|
|
# This program is free software; you can redistribute it and/or
|
|
# modify it under the terms of the GNU General Public License
|
|
# as published by the Free Software Foundation; either version 2
|
|
# of the License, or (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program; if not, write to the Free Software
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
""" Convert files to the file format generated by lyx 2.3"""
|
|
|
|
import re, string
|
|
import unicodedata
|
|
import sys, os
|
|
|
|
# Uncomment only what you need to import, please.
|
|
|
|
from parser_tools import find_end_of, find_token_backwards, find_end_of_layout, \
|
|
find_token, find_end_of_inset, get_value, get_bool_value, \
|
|
get_containing_layout, get_quoted_value, del_token, find_re
|
|
# find_tokens, find_token_exact, is_in_inset, \
|
|
# check_token, get_option_value
|
|
|
|
from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, revert_font_attrs, \
|
|
insert_to_preamble
|
|
# get_ert, lyx2latex, \
|
|
# lyx2verbatim, length_in_bp, convert_info_insets
|
|
# latex_length, revert_flex_inset, hex2ratio, str2bool
|
|
|
|
####################################################################
|
|
# Private helper functions
|
|
|
|
|
|
|
|
###############################################################################
|
|
###
|
|
### Conversion and reversion routines
|
|
###
|
|
###############################################################################
|
|
|
|
def convert_microtype(document):
|
|
" Add microtype settings. "
|
|
i = find_token(document.header, "\\font_tt_scale" , 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document: Can't find \\font_tt_scale.")
|
|
i = len(document.header) - 1
|
|
|
|
j = find_token(document.preamble, "\\usepackage{microtype}", 0)
|
|
if j == -1:
|
|
document.header.insert(i + 1, "\\use_microtype false")
|
|
else:
|
|
document.header.insert(i + 1, "\\use_microtype true")
|
|
del document.preamble[j]
|
|
|
|
|
|
def revert_microtype(document):
|
|
" Remove microtype settings. "
|
|
i = find_token(document.header, "\\use_microtype", 0)
|
|
if i == -1:
|
|
return
|
|
use_microtype = get_bool_value(document.header, "\\use_microtype" , i)
|
|
del document.header[i]
|
|
if use_microtype:
|
|
add_to_preamble(document, ["\\usepackage{microtype}"])
|
|
|
|
|
|
def convert_dateinset(document):
|
|
' Convert date external inset to ERT '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset External", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset' in convert_dateinset.")
|
|
i += 1
|
|
continue
|
|
if get_value(document.body, 'template', i, j) == "Date":
|
|
document.body[i : j + 1] = put_cmd_in_ert("\\today ")
|
|
i += 1
|
|
continue
|
|
|
|
|
|
def convert_inputenc(document):
|
|
" Replace no longer supported input encoding settings. "
|
|
i = find_token(document.header, "\\inputenc", 0)
|
|
if i == -1:
|
|
return
|
|
if get_value(document.header, "\\inputencoding", i) == "pt254":
|
|
document.header[i] = "\\inputencoding pt154"
|
|
|
|
|
|
def convert_ibranches(document):
|
|
' Add "inverted 0" to branch insets'
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Branch", i)
|
|
if i == -1:
|
|
return
|
|
document.body.insert(i + 1, "inverted 0")
|
|
i += 1
|
|
|
|
|
|
def revert_ibranches(document):
|
|
' Convert inverted branches to explicit anti-branches'
|
|
# Get list of branches
|
|
ourbranches = {}
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.header, "\\branch", i)
|
|
if i == -1:
|
|
break
|
|
branch = document.header[i][8:].strip()
|
|
if document.header[i+1].startswith("\\selected "):
|
|
#document.warning(document.header[i+1])
|
|
#document.warning(document.header[i+1][10])
|
|
selected = int(document.header[i+1][10])
|
|
else:
|
|
document.warning("Malformed LyX document: No selection indicator for branch " + branch)
|
|
selected = 1
|
|
|
|
# the value tells us whether the branch is selected
|
|
ourbranches[document.header[i][8:].strip()] = selected
|
|
i += 1
|
|
|
|
# Figure out what inverted branches, if any, have been used
|
|
# and convert them to "Anti-OldBranch"
|
|
ibranches = {}
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Branch", i)
|
|
if i == -1:
|
|
break
|
|
if not document.body[i+1].startswith("inverted "):
|
|
document.warning("Malformed LyX document: Missing 'inverted' tag!")
|
|
i += 1
|
|
continue
|
|
inverted = document.body[i+1][9]
|
|
#document.warning(document.body[i+1])
|
|
|
|
if inverted == "1":
|
|
branch = document.body[i][20:].strip()
|
|
#document.warning(branch)
|
|
if not branch in ibranches:
|
|
antibranch = "Anti-" + branch
|
|
while antibranch in ibranches:
|
|
antibranch = "x" + antibranch
|
|
ibranches[branch] = antibranch
|
|
else:
|
|
antibranch = ibranches[branch]
|
|
#document.warning(antibranch)
|
|
document.body[i] = "\\begin_inset Branch " + antibranch
|
|
|
|
# remove "inverted" key
|
|
del document.body[i+1]
|
|
i += 1
|
|
|
|
# now we need to add the new branches to the header
|
|
for old, new in ibranches.items():
|
|
i = find_token(document.header, "\\branch " + old, 0)
|
|
if i == -1:
|
|
document.warning("Can't find branch %s even though we found it before!" % (old))
|
|
continue
|
|
j = find_token(document.header, "\\end_branch", i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document! Can't find end of branch " + old)
|
|
continue
|
|
# ourbranches[old] - 1 inverts the selection status of the old branch
|
|
lines = ["\\branch " + new,
|
|
"\\selected " + str(ourbranches[old] - 1)]
|
|
# these are the old lines telling us color, etc.
|
|
lines += document.header[i+2 : j+1]
|
|
document.header[i:i] = lines
|
|
|
|
|
|
def revert_beamer_article_styles(document):
|
|
" Include (scr)article styles in beamer article "
|
|
|
|
beamer_articles = ["article-beamer", "scrarticle-beamer"]
|
|
if document.textclass not in beamer_articles:
|
|
return
|
|
|
|
inclusion = "article.layout"
|
|
if document.textclass == "scrarticle-beamer":
|
|
inclusion = "scrartcl.layout"
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
if i == -1:
|
|
k = find_token(document.header, "\\language", 0)
|
|
if k == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
return
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
i = k - 1
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
if j == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document: Can't find end of local layout!")
|
|
return
|
|
|
|
document.header[i+1 : i+1] = [
|
|
"### Inserted by lyx2lyx (more [scr]article styles) ###",
|
|
"Input " + inclusion,
|
|
"Input beamer.layout",
|
|
"Provides geometry 0",
|
|
"Provides hyperref 0",
|
|
"DefaultFont",
|
|
" Family Roman",
|
|
" Series Medium",
|
|
" Shape Up",
|
|
" Size Normal",
|
|
" Color None",
|
|
"EndFont",
|
|
"Preamble",
|
|
" \\usepackage{beamerarticle,pgf}",
|
|
" % this default might be overridden by plain title style",
|
|
" \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
|
|
" \\AtBeginDocument{",
|
|
" \\let\\origtableofcontents=\\tableofcontents",
|
|
" \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
|
|
" \\def\\gobbletableofcontents#1{\\origtableofcontents}",
|
|
" }",
|
|
"EndPreamble",
|
|
"### End of insertion by lyx2lyx (more [scr]article styles) ###"
|
|
]
|
|
|
|
|
|
def convert_beamer_article_styles(document):
|
|
" Remove included (scr)article styles in beamer article "
|
|
|
|
beamer_articles = ["article-beamer", "scrarticle-beamer"]
|
|
if document.textclass not in beamer_articles:
|
|
return
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
if i == -1:
|
|
return
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
if j == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document: Can't find end of local layout!")
|
|
return
|
|
|
|
k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
|
|
if k != -1:
|
|
l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
|
|
if l == -1:
|
|
# this should not happen
|
|
document.warning("End of lyx2lyx local layout insertion not found!")
|
|
return
|
|
|
|
if k == i + 1 and l == j - 1:
|
|
# that was all the local layout there was
|
|
document.header[i : j + 1] = []
|
|
else:
|
|
document.header[k : l + 1] = []
|
|
|
|
|
|
def revert_bosnian(document):
|
|
"Set the document language to English but assure Bosnian output"
|
|
|
|
if document.language == "bosnian":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language bosnian", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package babel"
|
|
k = find_token(document.header, "\\options", 0)
|
|
if k != -1:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options bosnian,")
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l + 1, "\\options bosnian")
|
|
|
|
|
|
def revert_friulan(document):
|
|
"Set the document language to English but assure Friulan output"
|
|
|
|
if document.language == "friulan":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language friulan", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package babel"
|
|
k = find_token(document.header, "\\options", 0)
|
|
if k != -1:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options friulan,")
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l + 1, "\\options friulan")
|
|
|
|
|
|
def revert_macedonian(document):
|
|
"Set the document language to English but assure Macedonian output"
|
|
|
|
if document.language == "macedonian":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language macedonian", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package babel"
|
|
k = find_token(document.header, "\\options", 0)
|
|
if k != -1:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options macedonian,")
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l + 1, "\\options macedonian")
|
|
|
|
|
|
def revert_piedmontese(document):
|
|
"Set the document language to English but assure Piedmontese output"
|
|
|
|
if document.language == "piedmontese":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language piedmontese", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package babel"
|
|
k = find_token(document.header, "\\options", 0)
|
|
if k != -1:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options piedmontese,")
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l + 1, "\\options piedmontese")
|
|
|
|
|
|
def revert_romansh(document):
|
|
"Set the document language to English but assure Romansh output"
|
|
|
|
if document.language == "romansh":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language romansh", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package babel"
|
|
k = find_token(document.header, "\\options", 0)
|
|
if k != -1:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options romansh,")
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l + 1, "\\options romansh")
|
|
|
|
|
|
def revert_amharic(document):
|
|
"Set the document language to English but assure Amharic output"
|
|
|
|
if document.language == "amharic":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language amharic", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package default"
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{amharic}}"])
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"resetdefaultlanguage{amharic}",
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
|
|
|
|
def revert_asturian(document):
|
|
"Set the document language to English but assure Asturian output"
|
|
|
|
if document.language == "asturian":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language asturian", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package default"
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{asturian}}"])
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"resetdefaultlanguage{asturian}",
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
|
|
|
|
def revert_kannada(document):
|
|
"Set the document language to English but assure Kannada output"
|
|
|
|
if document.language == "kannada":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language kannada", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package default"
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{kannada}}"])
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"resetdefaultlanguage{kannada}",
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
|
|
|
|
def revert_khmer(document):
|
|
"Set the document language to English but assure Khmer output"
|
|
|
|
if document.language == "khmer":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language khmer", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package default"
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{khmer}}"])
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"resetdefaultlanguage{khmer}",
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
|
|
|
|
def revert_urdu(document):
|
|
"Set the document language to English but assure Urdu output"
|
|
|
|
if document.language == "urdu":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language urdu", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package default"
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{urdu}}"])
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"resetdefaultlanguage{urdu}",
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
|
|
|
|
def revert_syriac(document):
|
|
"Set the document language to English but assure Syriac output"
|
|
|
|
if document.language == "syriac":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language syriac", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\language_package default"
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{syriac}}"])
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"resetdefaultlanguage{syriac}",
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
|
|
|
|
def revert_quotes(document):
|
|
" Revert Quote Insets in verbatim or Hebrew context to plain quotes "
|
|
|
|
# First handle verbatim insets
|
|
i = 0
|
|
j = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
|
( words[1] in ["ERT", "listings"] or ( len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"]) ):
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
|
|
i += 1
|
|
continue
|
|
while True:
|
|
k = find_token(document.body, '\\begin_inset Quotes', i, j)
|
|
if k == -1:
|
|
i += 1
|
|
break
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
i = k
|
|
continue
|
|
replace = "\""
|
|
if document.body[k].endswith("s"):
|
|
replace = "'"
|
|
document.body[k:l+1] = [replace]
|
|
else:
|
|
i += 1
|
|
continue
|
|
|
|
# Now verbatim layouts
|
|
i = 0
|
|
j = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
if len(words) > 1 and words[0] == "\\begin_layout" and \
|
|
words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]:
|
|
j = find_end_of_layout(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of " + words[1] + " layout at line " + str(i))
|
|
i += 1
|
|
continue
|
|
while True:
|
|
k = find_token(document.body, '\\begin_inset Quotes', i, j)
|
|
if k == -1:
|
|
i += 1
|
|
break
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
i = k
|
|
continue
|
|
replace = "\""
|
|
if document.body[k].endswith("s"):
|
|
replace = "'"
|
|
document.body[k:l+1] = [replace]
|
|
else:
|
|
i += 1
|
|
continue
|
|
|
|
# Now handle Hebrew
|
|
if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
|
|
return
|
|
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
k = find_token(document.body, '\\begin_inset Quotes', i)
|
|
if k == -1:
|
|
return
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
i = k
|
|
continue
|
|
hebrew = False
|
|
parent = get_containing_layout(document.body, k)
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
if ql == -1 or ql < parent[1]:
|
|
hebrew = document.language == "hebrew"
|
|
elif document.body[ql] == "\\lang hebrew":
|
|
hebrew = True
|
|
if hebrew:
|
|
replace = "\""
|
|
if document.body[k].endswith("s"):
|
|
replace = "'"
|
|
document.body[k:l+1] = [replace]
|
|
i = l
|
|
|
|
|
|
def revert_iopart(document):
|
|
" Input new styles via local layout "
|
|
if document.textclass != "iopart":
|
|
return
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
if i == -1:
|
|
k = find_token(document.header, "\\language", 0)
|
|
if k == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
return
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
i = k-1
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
if j == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
return
|
|
|
|
document.header[i+1 : i+1] = [
|
|
"### Inserted by lyx2lyx (stdlayouts) ###",
|
|
"Input stdlayouts.inc",
|
|
"### End of insertion by lyx2lyx (stdlayouts) ###"
|
|
]
|
|
|
|
|
|
def convert_iopart(document):
|
|
" Remove local layout we added, if it is there "
|
|
if document.textclass != "iopart":
|
|
return
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
if i == -1:
|
|
return
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
if j == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
return
|
|
|
|
k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
|
|
if k != -1:
|
|
l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
|
|
if l == -1:
|
|
# this should not happen
|
|
document.warning("End of lyx2lyx local layout insertion not found!")
|
|
return
|
|
if k == i + 1 and l == j - 1:
|
|
# that was all the local layout there was
|
|
document.header[i : j + 1] = []
|
|
else:
|
|
document.header[k : l + 1] = []
|
|
|
|
|
|
def convert_quotestyle(document):
|
|
" Convert \\quotes_language to \\quotes_style "
|
|
i = find_token(document.header, "\\quotes_language", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document! Can't find \\quotes_language!")
|
|
return
|
|
val = get_value(document.header, "\\quotes_language", i)
|
|
document.header[i] = "\\quotes_style " + val
|
|
|
|
|
|
def revert_quotestyle(document):
|
|
" Revert \\quotes_style to \\quotes_language "
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
if i == -1:
|
|
document.warning("Malformed LyX document! Can't find \\quotes_style!")
|
|
return
|
|
val = get_value(document.header, "\\quotes_style", i)
|
|
document.header[i] = "\\quotes_language " + val
|
|
|
|
|
|
def revert_plainquote(document):
|
|
" Revert plain quote insets "
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style plain", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
# now the insets
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
k = find_token(document.body, '\\begin_inset Quotes q', i)
|
|
if k == -1:
|
|
return
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
i = k
|
|
continue
|
|
replace = "\""
|
|
if document.body[k].endswith("s"):
|
|
replace = "'"
|
|
document.body[k:l+1] = [replace]
|
|
i = l
|
|
|
|
|
|
def convert_frenchquotes(document):
|
|
" Convert french quote insets to swiss "
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style french", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style swiss"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Quotes f', i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val.replace("f", "c", 1)
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_swissquotes(document):
|
|
" Revert swiss quote insets to french "
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style swiss", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Quotes c', i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val.replace("c", "f", 1)
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_britishquotes(document):
|
|
" Revert british quote insets to english "
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style british", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Quotes b', i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val.replace("b", "e", 1)
|
|
if val[2] == "d":
|
|
# opening mark
|
|
newval = newval.replace("d", "s")
|
|
else:
|
|
# closing mark
|
|
newval = newval.replace("s", "d")
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_swedishgquotes(document):
|
|
" Revert swedish quote insets "
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style swedishg", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style danish"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Quotes w', i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
if val[2] == "d":
|
|
# outer marks
|
|
newval = val.replace("w", "a", 1).replace("r", "l")
|
|
else:
|
|
# inner marks
|
|
newval = val.replace("w", "s", 1)
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_frenchquotes(document):
|
|
" Revert french inner quote insets "
|
|
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Quotes f', i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
if val[2] == "s":
|
|
# inner marks
|
|
newval = val.replace("f", "e", 1).replace("s", "d")
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_frenchinquotes(document):
|
|
" Revert inner frenchin quote insets "
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style frenchin", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Quotes i', i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val.replace("i", "f", 1)
|
|
if val[2] == "s":
|
|
# inner marks
|
|
newval = newval.replace("s", "d")
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_russianquotes(document):
|
|
" Revert russian quote insets "
|
|
|
|
# First, revert style setting
|
|
i = find_token(document.header, "\\quotes_style russian", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
# now the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Quotes r', i)
|
|
if i == -1:
|
|
return
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
newval = val
|
|
if val[2] == "s":
|
|
# inner marks
|
|
newval = val.replace("r", "g", 1).replace("s", "d")
|
|
else:
|
|
# outer marks
|
|
newval = val.replace("r", "f", 1)
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
i += 1
|
|
|
|
|
|
def revert_dynamicquotes(document):
|
|
" Revert dynamic quote insets "
|
|
|
|
# First, revert header
|
|
i = find_token(document.header, "\\dynamic_quotes", 0)
|
|
if i != -1:
|
|
del document.header[i]
|
|
|
|
# Get global style
|
|
style = "english"
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\quotes_style")
|
|
else:
|
|
style = get_value(document.header, "\\quotes_style", i)
|
|
|
|
s = "e"
|
|
if style == "english":
|
|
s = "e"
|
|
elif style == "swedish":
|
|
s = "s"
|
|
elif style == "german":
|
|
s = "g"
|
|
elif style == "polish":
|
|
s = "p"
|
|
elif style == "swiss":
|
|
s = "c"
|
|
elif style == "danish":
|
|
s = "a"
|
|
elif style == "plain":
|
|
s = "q"
|
|
elif style == "british":
|
|
s = "b"
|
|
elif style == "swedishg":
|
|
s = "w"
|
|
elif style == "french":
|
|
s = "f"
|
|
elif style == "frenchin":
|
|
s = "i"
|
|
elif style == "russian":
|
|
s = "r"
|
|
|
|
# now transform the insets
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Quotes x', i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace("x", s)
|
|
i += 1
|
|
|
|
|
|
def revert_cjkquotes(document):
|
|
" Revert cjk quote insets "
|
|
|
|
# Get global style
|
|
style = "english"
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\quotes_style")
|
|
else:
|
|
style = get_value(document.header, "\\quotes_style", i)
|
|
|
|
global_cjk = style.find("cjk") != -1
|
|
|
|
if global_cjk:
|
|
document.header[i] = "\\quotes_style english"
|
|
# transform dynamic insets
|
|
s = "j"
|
|
if style == "cjkangle":
|
|
s = "k"
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Quotes x', i)
|
|
if i == -1:
|
|
break
|
|
document.body[i] = document.body[i].replace("x", s)
|
|
i += 1
|
|
|
|
cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
|
|
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
k = find_token(document.body, '\\begin_inset Quotes j', i)
|
|
if k == -1:
|
|
break
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
i = k
|
|
continue
|
|
cjk = False
|
|
parent = get_containing_layout(document.body, k)
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
if ql == -1 or ql < parent[1]:
|
|
cjk = document.language in cjk_langs
|
|
elif document.body[ql].split()[1] in cjk_langs:
|
|
cjk = True
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
replace = []
|
|
if val[2] == "s":
|
|
# inner marks
|
|
if val[1] == "l":
|
|
# inner opening mark
|
|
if cjk:
|
|
replace = [u"\u300E"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
|
|
else:
|
|
# inner closing mark
|
|
if cjk:
|
|
replace = [u"\u300F"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
|
|
else:
|
|
# outer marks
|
|
if val[1] == "l":
|
|
# outer opening mark
|
|
if cjk:
|
|
replace = [u"\u300C"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
|
|
else:
|
|
# outer closing mark
|
|
if cjk:
|
|
replace = [u"\u300D"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
|
|
|
|
document.body[k:l+1] = replace
|
|
i = l
|
|
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
k = find_token(document.body, '\\begin_inset Quotes k', i)
|
|
if k == -1:
|
|
return
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
i = k
|
|
continue
|
|
cjk = False
|
|
parent = get_containing_layout(document.body, k)
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
if ql == -1 or ql < parent[1]:
|
|
cjk = document.language in cjk_langs
|
|
elif document.body[ql].split()[1] in cjk_langs:
|
|
cjk = True
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
replace = []
|
|
if val[2] == "s":
|
|
# inner marks
|
|
if val[1] == "l":
|
|
# inner opening mark
|
|
if cjk:
|
|
replace = [u"\u3008"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
|
|
else:
|
|
# inner closing mark
|
|
if cjk:
|
|
replace = [u"\u3009"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
|
|
else:
|
|
# outer marks
|
|
if val[1] == "l":
|
|
# outer opening mark
|
|
if cjk:
|
|
replace = [u"\u300A"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
|
|
else:
|
|
# outer closing mark
|
|
if cjk:
|
|
replace = [u"\u300B"]
|
|
else:
|
|
replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
|
|
|
|
document.body[k:l+1] = replace
|
|
i = l
|
|
|
|
|
|
def revert_crimson(document):
|
|
" Revert native Cochineal/Crimson font definition to LaTeX "
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
preamble = ""
|
|
i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
|
|
if i != -1:
|
|
osf = False
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
if j != -1:
|
|
osf = True
|
|
preamble = "\\usepackage"
|
|
if osf:
|
|
document.header[j] = "\\font_osf false"
|
|
preamble += "[proportional,osf]"
|
|
preamble += "{cochineal}"
|
|
add_to_preamble(document, [preamble])
|
|
document.header[i] = document.header[i].replace("cochineal", "default")
|
|
|
|
|
|
def revert_cochinealmath(document):
|
|
" Revert cochineal newtxmath definitions to LaTeX "
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
|
|
if i != -1:
|
|
add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
|
|
document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
|
|
|
|
|
|
def revert_labelonly(document):
|
|
" Revert labelonly tag for InsetRef "
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of reference inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
k = find_token(document.body, "LatexCommand labelonly", i, j)
|
|
if k == -1:
|
|
i = j
|
|
continue
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
if not label:
|
|
document.warning("Can't find label for reference at line %d!" %(i))
|
|
i = j + 1
|
|
continue
|
|
document.body[i:j+1] = put_cmd_in_ert([label])
|
|
i += 1
|
|
|
|
|
|
def revert_plural_refs(document):
|
|
" Revert plural and capitalized references "
|
|
i = find_token(document.header, "\\use_refstyle 1", 0)
|
|
use_refstyle = (i != 0)
|
|
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of reference inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
|
|
plural = caps = suffix = False
|
|
k = find_token(document.body, "LaTeXCommand formatted", i, j)
|
|
if k != -1 and use_refstyle:
|
|
plural = get_bool_value(document.body, "plural", i, j, False)
|
|
caps = get_bool_value(document.body, "caps", i, j, False)
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
if label:
|
|
try:
|
|
(prefix, suffix) = label.split(":", 1)
|
|
except:
|
|
document.warning("No `:' separator in formatted reference at line %d!" % (i))
|
|
else:
|
|
document.warning("Can't find label for reference at line %d!" % (i))
|
|
|
|
# this effectively tests also for use_refstyle and a formatted reference
|
|
# we do this complicated test because we would otherwise do this erasure
|
|
# over and over and over
|
|
if not ((plural or caps) and suffix):
|
|
del_token(document.body, "plural", i, j)
|
|
del_token(document.body, "caps", i, j - 1) # since we deleted a line
|
|
i = j - 1
|
|
continue
|
|
|
|
if caps:
|
|
prefix = prefix[0].title() + prefix[1:]
|
|
cmd = "\\" + prefix + "ref"
|
|
if plural:
|
|
cmd += "[s]"
|
|
cmd += "{" + suffix + "}"
|
|
document.body[i:j+1] = put_cmd_in_ert([cmd])
|
|
i += 1
|
|
|
|
|
|
def revert_noprefix(document):
|
|
" Revert labelonly tags with 'noprefix' set "
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of reference inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
k = find_token(document.body, "LatexCommand labelonly", i, j)
|
|
noprefix = False
|
|
if k != -1:
|
|
noprefix = get_bool_value(document.body, "noprefix", i, j)
|
|
if not noprefix:
|
|
# either it was not a labelonly command, or else noprefix was not set.
|
|
# in that case, we just delete the option.
|
|
del_token(document.body, "noprefix", i, j)
|
|
i = j
|
|
continue
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
if not label:
|
|
document.warning("Can't find label for reference at line %d!" %(i))
|
|
i = j + 1
|
|
continue
|
|
try:
|
|
(prefix, suffix) = label.split(":", 1)
|
|
except:
|
|
document.warning("No `:' separator in formatted reference at line %d!" % (i))
|
|
# we'll leave this as an ordinary labelonly reference
|
|
del_token(document.body, "noprefix", i, j)
|
|
i = j
|
|
continue
|
|
document.body[i:j+1] = put_cmd_in_ert([suffix])
|
|
i += 1
|
|
|
|
|
|
def revert_biblatex(document):
|
|
" Revert biblatex support "
|
|
|
|
#
|
|
# Header
|
|
#
|
|
|
|
# 1. Get cite engine
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
# 2. Store biblatex state and revert to natbib
|
|
biblatex = False
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
biblatex = True
|
|
document.header[i] = "\\cite_engine natbib"
|
|
|
|
# 3. Store and remove new document headers
|
|
bibstyle = ""
|
|
i = find_token(document.header, "\\biblatex_bibstyle", 0)
|
|
if i != -1:
|
|
bibstyle = get_value(document.header, "\\biblatex_bibstyle", i)
|
|
del document.header[i]
|
|
|
|
citestyle = ""
|
|
i = find_token(document.header, "\\biblatex_citestyle", 0)
|
|
if i != -1:
|
|
citestyle = get_value(document.header, "\\biblatex_citestyle", i)
|
|
del document.header[i]
|
|
|
|
biblio_options = ""
|
|
i = find_token(document.header, "\\biblio_options", 0)
|
|
if i != -1:
|
|
biblio_options = get_value(document.header, "\\biblio_options", i)
|
|
del document.header[i]
|
|
|
|
if biblatex:
|
|
bbxopts = "[natbib=true"
|
|
if bibstyle != "":
|
|
bbxopts += ",bibstyle=" + bibstyle
|
|
if citestyle != "":
|
|
bbxopts += ",citestyle=" + citestyle
|
|
if biblio_options != "":
|
|
bbxopts += "," + biblio_options
|
|
bbxopts += "]"
|
|
add_to_preamble(document, "\\usepackage" + bbxopts + "{biblatex}")
|
|
|
|
#
|
|
# Body
|
|
#
|
|
|
|
# 1. Bibtex insets
|
|
i = 0
|
|
bibresources = []
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of bibtex inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
bibs = get_quoted_value(document.body, "bibfiles", i, j)
|
|
opts = get_quoted_value(document.body, "biblatexopts", i, j)
|
|
# store resources
|
|
if bibs:
|
|
bibresources += bibs.split(",")
|
|
else:
|
|
document.warning("Can't find bibfiles for bibtex inset at line %d!" %(i))
|
|
# remove biblatexopts line
|
|
k = find_token(document.body, "biblatexopts", i, j)
|
|
if k != -1:
|
|
del document.body[k]
|
|
# Re-find inset end line
|
|
j = find_end_of_inset(document.body, i)
|
|
# Insert ERT \\printbibliography and wrap bibtex inset to a Note
|
|
if biblatex:
|
|
pcmd = "printbibliography"
|
|
if opts:
|
|
pcmd += "[" + opts + "]"
|
|
repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
|
|
"", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
|
|
"\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
|
|
"status open", "", "\\begin_layout Plain Layout" ]
|
|
repl += document.body[i:j+1]
|
|
repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
|
|
document.body[i:j+1] = repl
|
|
j += 27
|
|
|
|
i = j + 1
|
|
|
|
if biblatex:
|
|
for b in bibresources:
|
|
add_to_preamble(document, "\\addbibresource{" + b + ".bib}")
|
|
|
|
# 2. Citation insets
|
|
|
|
# Specific citation insets used in biblatex that need to be reverted to ERT
|
|
new_citations = {
|
|
"Cite" : "Cite",
|
|
"citebyear" : "citeyear",
|
|
"citeyear" : "cite*",
|
|
"Footcite" : "Smartcite",
|
|
"footcite" : "smartcite",
|
|
"Autocite" : "Autocite",
|
|
"autocite" : "autocite",
|
|
"citetitle" : "citetitle",
|
|
"citetitle*" : "citetitle*",
|
|
"fullcite" : "fullcite",
|
|
"footfullcite" : "footfullcite",
|
|
"supercite" : "supercite",
|
|
"citeauthor" : "citeauthor",
|
|
"citeauthor*" : "citeauthor*",
|
|
"Citeauthor" : "Citeauthor",
|
|
"Citeauthor*" : "Citeauthor*"
|
|
}
|
|
|
|
# All commands accepted by LyX < 2.3. Everything else throws an error.
|
|
old_citations = [ "cite", "nocite", "citet", "citep", "citealt", "citealp",\
|
|
"citeauthor", "citeyear", "citeyearpar", "citet*", "citep*",\
|
|
"citealt*", "citealp*", "citeauthor*", "Citet", "Citep",\
|
|
"Citealt", "Citealp", "Citeauthor", "Citet*", "Citep*",\
|
|
"Citealt*", "Citealp*", "Citeauthor*", "fullcite", "footcite",\
|
|
"footcitet", "footcitep", "footcitealt", "footcitealp",\
|
|
"footciteauthor", "footciteyear", "footciteyearpar",\
|
|
"citefield", "citetitle", "cite*" ]
|
|
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of citation inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
if k == -1:
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
|
|
i = j + 1
|
|
continue
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
if biblatex and cmd in list(new_citations.keys()):
|
|
pre = get_quoted_value(document.body, "before", i, j)
|
|
post = get_quoted_value(document.body, "after", i, j)
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
if not key:
|
|
document.warning("Citation inset at line %d does not have a key!" %(i))
|
|
key = "???"
|
|
# Replace known new commands with ERT
|
|
res = "\\" + new_citations[cmd]
|
|
if pre:
|
|
res += "[" + pre + "]"
|
|
if post:
|
|
res += "[" + post + "]"
|
|
elif pre:
|
|
res += "[]"
|
|
res += "{" + key + "}"
|
|
document.body[i:j+1] = put_cmd_in_ert([res])
|
|
elif cmd not in old_citations:
|
|
# Reset unknown commands to cite. This is what LyX does as well
|
|
# (but LyX 2.2 would break on unknown commands)
|
|
document.body[k] = "LatexCommand cite"
|
|
document.warning("Reset unknown cite command '%s' with cite" % cmd)
|
|
i = j + 1
|
|
|
|
# Emulate the old biblatex-workaround (pretend natbib in order to use the styles)
|
|
if biblatex:
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
if i == -1:
|
|
k = find_token(document.header, "\\language", 0)
|
|
if k == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
return
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
i = k-1
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
if j == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
return
|
|
|
|
document.header[i+1 : i+1] = [
|
|
"### Inserted by lyx2lyx (biblatex emulation) ###",
|
|
"Provides natbib 1",
|
|
"### End of insertion by lyx2lyx (biblatex emulation) ###"
|
|
]
|
|
|
|
|
|
def revert_citekeyonly(document):
|
|
" Revert keyonly cite command to ERT "
|
|
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of citation inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
if k == -1:
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
|
|
i = j + 1
|
|
continue
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
if cmd != "keyonly":
|
|
i = j + 1
|
|
continue
|
|
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
if not key:
|
|
document.warning("Citation inset at line %d does not have a key!" %(i))
|
|
# Replace known new commands with ERT
|
|
document.body[i:j+1] = put_cmd_in_ert([key])
|
|
i = j + 1
|
|
|
|
|
|
|
|
def revert_bibpackopts(document):
|
|
" Revert support for natbib/jurabib package options "
|
|
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
biblatex = False
|
|
if engine not in ["natbib", "jurabib"]:
|
|
return
|
|
|
|
i = find_token(document.header, "\\biblio_options", 0)
|
|
if i == -1:
|
|
# Nothing to do if we have no options
|
|
return
|
|
|
|
biblio_options = get_value(document.header, "\\biblio_options", i)
|
|
del document.header[i]
|
|
|
|
if not biblio_options:
|
|
# Nothing to do for empty options
|
|
return
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
if i == -1:
|
|
k = find_token(document.header, "\\language", 0)
|
|
if k == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
return
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
i = k - 1
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
if j == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
return
|
|
|
|
document.header[i+1 : i+1] = [
|
|
"### Inserted by lyx2lyx (bibliography package options) ###",
|
|
"PackageOptions " + engine + " " + biblio_options,
|
|
"### End of insertion by lyx2lyx (bibliography package options) ###"
|
|
]
|
|
|
|
|
|
def revert_qualicites(document):
|
|
" Revert qualified citation list commands to ERT "
|
|
|
|
# Citation insets that support qualified lists, with their LaTeX code
|
|
ql_citations = {
|
|
"cite" : "cites",
|
|
"Cite" : "Cites",
|
|
"citet" : "textcites",
|
|
"Citet" : "Textcites",
|
|
"citep" : "parencites",
|
|
"Citep" : "Parencites",
|
|
"Footcite" : "Smartcites",
|
|
"footcite" : "smartcites",
|
|
"Autocite" : "Autocites",
|
|
"autocite" : "autocites",
|
|
}
|
|
|
|
# Get cite engine
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
biblatex = engine in ["biblatex", "biblatex-natbib"]
|
|
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of citation inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
pres = find_token(document.body, "pretextlist", i, j)
|
|
posts = find_token(document.body, "posttextlist", i, j)
|
|
if pres == -1 and posts == -1:
|
|
# nothing to do.
|
|
i = j + 1
|
|
continue
|
|
pretexts = get_quoted_value(document.body, "pretextlist", pres)
|
|
posttexts = get_quoted_value(document.body, "posttextlist", posts)
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
if k == -1:
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
|
|
i = j + 1
|
|
continue
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
if biblatex and cmd in list(ql_citations.keys()):
|
|
pre = get_quoted_value(document.body, "before", i, j)
|
|
post = get_quoted_value(document.body, "after", i, j)
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
if not key:
|
|
document.warning("Citation inset at line %d does not have a key!" %(i))
|
|
key = "???"
|
|
keys = key.split(",")
|
|
prelist = pretexts.split("\t")
|
|
premap = dict()
|
|
for pp in prelist:
|
|
ppp = pp.split(" ", 1)
|
|
premap[ppp[0]] = ppp[1]
|
|
postlist = posttexts.split("\t")
|
|
postmap = dict()
|
|
for pp in postlist:
|
|
ppp = pp.split(" ", 1)
|
|
postmap[ppp[0]] = ppp[1]
|
|
# Replace known new commands with ERT
|
|
if "(" in pre or ")" in pre:
|
|
pre = "{" + pre + "}"
|
|
if "(" in post or ")" in post:
|
|
post = "{" + post + "}"
|
|
res = "\\" + ql_citations[cmd]
|
|
if pre:
|
|
res += "(" + pre + ")"
|
|
if post:
|
|
res += "(" + post + ")"
|
|
elif pre:
|
|
res += "()"
|
|
for kk in keys:
|
|
if premap.get(kk, "") != "":
|
|
res += "[" + premap[kk] + "]"
|
|
if postmap.get(kk, "") != "":
|
|
res += "[" + postmap[kk] + "]"
|
|
elif premap.get(kk, "") != "":
|
|
res += "[]"
|
|
res += "{" + kk + "}"
|
|
document.body[i:j+1] = put_cmd_in_ert([res])
|
|
else:
|
|
# just remove the params
|
|
del document.body[posttexts]
|
|
del document.body[pretexts]
|
|
i += 1
|
|
|
|
|
|
command_insets = ["bibitem", "citation", "href", "index_print", "nomenclature"]
|
|
def convert_literalparam(document):
|
|
" Add param literal "
|
|
|
|
for inset in command_insets:
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
|
|
i += 1
|
|
continue
|
|
while i < j and document.body[i].strip() != '':
|
|
i += 1
|
|
# href is already fully latexified. Here we can switch off literal.
|
|
if inset == "href":
|
|
document.body.insert(i, "literal \"false\"")
|
|
else:
|
|
document.body.insert(i, "literal \"true\"")
|
|
|
|
|
|
|
|
def revert_literalparam(document):
|
|
" Remove param literal "
|
|
|
|
for inset in command_insets:
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
|
|
i += 1
|
|
continue
|
|
k = find_token(document.body, 'literal', i, j)
|
|
if k == -1:
|
|
i += 1
|
|
continue
|
|
del document.body[k]
|
|
|
|
|
|
|
|
def revert_multibib(document):
|
|
" Revert multibib support "
|
|
|
|
# 1. Get cite engine
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
# 2. Do we use biblatex?
|
|
biblatex = False
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
biblatex = True
|
|
|
|
# 3. Store and remove multibib document header
|
|
multibib = ""
|
|
i = find_token(document.header, "\\multibib", 0)
|
|
if i != -1:
|
|
multibib = get_value(document.header, "\\multibib", i)
|
|
del document.header[i]
|
|
|
|
if not multibib:
|
|
return
|
|
|
|
# 4. The easy part: Biblatex
|
|
if biblatex:
|
|
i = find_token(document.header, "\\biblio_options", 0)
|
|
if i == -1:
|
|
k = find_token(document.header, "\\use_bibtopic", 0)
|
|
if k == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\use_bibtopic header found!")
|
|
return
|
|
document.header[k-1 : k-1] = ["\\biblio_options " + "refsection=" + multibib]
|
|
else:
|
|
biblio_options = get_value(document.header, "\\biblio_options", i)
|
|
if biblio_options:
|
|
biblio_options += ","
|
|
biblio_options += "refsection=" + multibib
|
|
document.header[i] = "\\biblio_options " + biblio_options
|
|
|
|
# Bibtex insets
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of bibtex inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
btprint = get_quoted_value(document.body, "btprint", i, j)
|
|
if btprint != "bibbysection":
|
|
i += 1
|
|
continue
|
|
opts = get_quoted_value(document.body, "biblatexopts", i, j)
|
|
# change btprint line
|
|
k = find_token(document.body, "btprint", i, j)
|
|
if k != -1:
|
|
document.body[k] = "btprint \"btPrintCited\""
|
|
# Insert ERT \\bibbysection and wrap bibtex inset to a Note
|
|
pcmd = "bibbysection"
|
|
if opts:
|
|
pcmd += "[" + opts + "]"
|
|
repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
|
|
"", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
|
|
"\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
|
|
"status open", "", "\\begin_layout Plain Layout" ]
|
|
repl += document.body[i:j+1]
|
|
repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
|
|
document.body[i:j+1] = repl
|
|
j += 27
|
|
|
|
i = j + 1
|
|
return
|
|
|
|
# 5. More tricky: Bibtex/Bibtopic
|
|
k = find_token(document.header, "\\use_bibtopic", 0)
|
|
if k == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\use_bibtopic header found!")
|
|
return
|
|
document.header[k] = "\\use_bibtopic true"
|
|
|
|
# Possible units. This assumes that the LyX name follows the std,
|
|
# which might not always be the case. But it's as good as we can get.
|
|
units = {
|
|
"part" : "Part",
|
|
"chapter" : "Chapter",
|
|
"section" : "Section",
|
|
"subsection" : "Subsection",
|
|
}
|
|
|
|
if multibib not in units.keys():
|
|
document.warning("Unknown multibib value `%s'!" % nultibib)
|
|
return
|
|
unit = units[multibib]
|
|
btunit = False
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_layout " + unit, i)
|
|
if i == -1:
|
|
break
|
|
if btunit:
|
|
document.body[i-1 : i-1] = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"end{btUnit}", "\\end_layout",
|
|
"\\begin_layout Plain Layout", "",
|
|
"\\backslash",
|
|
"begin{btUnit}"
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
i += 21
|
|
else:
|
|
document.body[i-1 : i-1] = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"begin{btUnit}"
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
i += 16
|
|
btunit = True
|
|
i += 1
|
|
|
|
if btunit:
|
|
i = find_token(document.body, "\\end_body", i)
|
|
document.body[i-1 : i-1] = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"end{btUnit}"
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
|
|
|
|
def revert_chapterbib(document):
|
|
" Revert chapterbib support "
|
|
|
|
# 1. Get cite engine
|
|
engine = "basic"
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
if i == -1:
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
else:
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
# 2. Do we use biblatex?
|
|
biblatex = False
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
biblatex = True
|
|
|
|
# 3. Store multibib document header value
|
|
multibib = ""
|
|
i = find_token(document.header, "\\multibib", 0)
|
|
if i != -1:
|
|
multibib = get_value(document.header, "\\multibib", i)
|
|
|
|
if not multibib or multibib != "child":
|
|
# nothing to do
|
|
return
|
|
|
|
# 4. remove multibib header
|
|
del document.header[i]
|
|
|
|
# 5. Biblatex
|
|
if biblatex:
|
|
# find include insets
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of bibtex inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
parent = get_containing_layout(document.body, i)
|
|
parbeg = parent[1]
|
|
|
|
# Insert ERT \\newrefsection before inset
|
|
beg = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"newrefsection"
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
document.body[parbeg-1:parbeg-1] = beg
|
|
j += len(beg)
|
|
i = j + 1
|
|
return
|
|
|
|
# 6. Bibtex/Bibtopic
|
|
i = find_token(document.header, "\\use_bibtopic", 0)
|
|
if i == -1:
|
|
# this should not happen
|
|
document.warning("Malformed LyX document! No \\use_bibtopic header found!")
|
|
return
|
|
if get_value(document.header, "\\use_bibtopic", i) == "true":
|
|
# find include insets
|
|
i = 0
|
|
while (True):
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i)
|
|
if i == -1:
|
|
break
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Can't find end of bibtex inset at line %d!!" %(i))
|
|
i += 1
|
|
continue
|
|
parent = get_containing_layout(document.body, i)
|
|
parbeg = parent[1]
|
|
parend = parent[2]
|
|
|
|
# Insert wrap inset into \\begin{btUnit}...\\end{btUnit}
|
|
beg = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"begin{btUnit}"
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
end = ["\\begin_layout Standard",
|
|
"\\begin_inset ERT", "status open", "",
|
|
"\\begin_layout Plain Layout", "", "",
|
|
"\\backslash",
|
|
"end{btUnit}"
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
"\\end_layout", ""]
|
|
document.body[parend+1:parend+1] = end
|
|
document.body[parbeg-1:parbeg-1] = beg
|
|
j += len(beg) + len(end)
|
|
i = j + 1
|
|
return
|
|
|
|
# 7. Chapterbib proper
|
|
add_to_preamble(document, ["\\usepackage{chapterbib}"])
|
|
|
|
|
|
def convert_dashligatures(document):
|
|
" Remove a zero-length space (U+200B) after en- and em-dashes. "
|
|
|
|
i = find_token(document.header, "\\use_microtype", 0)
|
|
if i != -1:
|
|
if document.initial_format > 474 and document.initial_format < 509:
|
|
# This was created by LyX 2.2
|
|
document.header[i+1:i+1] = ["\\use_dash_ligatures false"]
|
|
else:
|
|
# This was created by LyX 2.1 or earlier
|
|
document.header[i+1:i+1] = ["\\use_dash_ligatures true"]
|
|
|
|
i = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
# Skip some document parts where dashes are not converted
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
|
words[1] in ["CommandInset", "ERT", "External", "Formula", \
|
|
"FormulaMacro", "Graphics", "IPA", "listings"]:
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of " \
|
|
+ words[1] + " inset at line " + str(i))
|
|
i += 1
|
|
else:
|
|
i = j
|
|
continue
|
|
if len(words) > 0 and words[0] in ["\\leftindent", \
|
|
"\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
|
|
i += 1
|
|
continue
|
|
|
|
start = 0
|
|
while True:
|
|
j = document.body[i].find(u"\u2013", start) # en-dash
|
|
k = document.body[i].find(u"\u2014", start) # em-dash
|
|
if j == -1 and k == -1:
|
|
break
|
|
if j == -1 or (k != -1 and k < j):
|
|
j = k
|
|
after = document.body[i][j+1:]
|
|
if after.startswith(u"\u200B"):
|
|
document.body[i] = document.body[i][:j+1] + after[1:]
|
|
else:
|
|
if len(after) == 0 and document.body[i+1].startswith(u"\u200B"):
|
|
document.body[i+1] = document.body[i+1][1:]
|
|
break
|
|
start = j+1
|
|
i += 1
|
|
|
|
|
|
def revert_dashligatures(document):
|
|
" Remove font ligature settings for en- and em-dashes. "
|
|
i = find_token(document.header, "\\use_dash_ligatures", 0)
|
|
if i == -1:
|
|
return
|
|
use_dash_ligatures = get_bool_value(document.header, "\\use_dash_ligatures", i)
|
|
del document.header[i]
|
|
use_non_tex_fonts = False
|
|
i = find_token(document.header, "\\use_non_tex_fonts", 0)
|
|
if i != -1:
|
|
use_non_tex_fonts = get_bool_value(document.header, "\\use_non_tex_fonts", i)
|
|
if not use_dash_ligatures or use_non_tex_fonts:
|
|
return
|
|
|
|
# Add a zero-length space (U+200B) after en- and em-dashes
|
|
i = 0
|
|
while i < len(document.body):
|
|
words = document.body[i].split()
|
|
# Skip some document parts where dashes are not converted
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
|
words[1] in ["CommandInset", "ERT", "External", "Formula", \
|
|
"FormulaMacro", "Graphics", "IPA", "listings"]:
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find end of " \
|
|
+ words[1] + " inset at line " + str(i))
|
|
i += 1
|
|
else:
|
|
i = j
|
|
continue
|
|
if len(words) > 0 and words[0] in ["\\leftindent", \
|
|
"\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
|
|
i += 1
|
|
continue
|
|
|
|
start = 0
|
|
while True:
|
|
j = document.body[i].find(u"\u2013", start) # en-dash
|
|
k = document.body[i].find(u"\u2014", start) # em-dash
|
|
if j == -1 and k == -1:
|
|
break
|
|
if j == -1 or (k != -1 and k < j):
|
|
j = k
|
|
after = document.body[i][j+1:]
|
|
document.body[i] = document.body[i][:j+1] + u"\u200B" + after
|
|
start = j+1
|
|
i += 1
|
|
|
|
|
|
def revert_noto(document):
|
|
" Revert Noto font definitions to LaTeX "
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
preamble = ""
|
|
i = find_token(document.header, "\\font_roman \"NotoSerif-TLF\"", 0)
|
|
if i != -1:
|
|
add_to_preamble(document, ["\\renewcommand{\\rmdefault}{NotoSerif-TLF}"])
|
|
document.header[i] = document.header[i].replace("NotoSerif-TLF", "default")
|
|
i = find_token(document.header, "\\font_sans \"NotoSans-TLF\"", 0)
|
|
if i != -1:
|
|
add_to_preamble(document, ["\\renewcommand{\\sfdefault}{NotoSans-TLF}"])
|
|
document.header[i] = document.header[i].replace("NotoSans-TLF", "default")
|
|
i = find_token(document.header, "\\font_typewriter \"NotoMono-TLF\"", 0)
|
|
if i != -1:
|
|
add_to_preamble(document, ["\\renewcommand{\\ttdefault}{NotoMono-TLF}"])
|
|
document.header[i] = document.header[i].replace("NotoMono-TLF", "default")
|
|
|
|
|
|
def revert_xout(document):
|
|
" Reverts \\xout font attribute "
|
|
changed = revert_font_attrs(document.body, "\\xout", "\\xout")
|
|
if changed == True:
|
|
insert_to_preamble(document, \
|
|
['% for proper cross-out',
|
|
'\\PassOptionsToPackage{normalem}{ulem}',
|
|
'\\usepackage{ulem}'])
|
|
|
|
|
|
def convert_mathindent(document):
|
|
" add the \\is_math_indent tag "
|
|
# check if the document uses the class option "fleqn"
|
|
k = find_token(document.header, "\\quotes_style", 0)
|
|
regexp = re.compile(r'^.*fleqn.*')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i != -1:
|
|
document.header.insert(k, "\\is_math_indent 1")
|
|
# delete the found option
|
|
document.header[i] = document.header[i].replace(",fleqn", "")
|
|
document.header[i] = document.header[i].replace(", fleqn", "")
|
|
document.header[i] = document.header[i].replace("fleqn,", "")
|
|
j = find_re(document.header, regexp, 0)
|
|
if i == j:
|
|
# then we have fleqn as the only option
|
|
del document.header[i]
|
|
else:
|
|
document.header.insert(k, "\\is_math_indent 0")
|
|
|
|
|
|
def revert_mathindent(document):
|
|
" Define mathindent if set in the document "
|
|
# first output the length
|
|
regexp = re.compile(r'(\\math_indentation)')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i != -1:
|
|
value = get_value(document.header, "\\math_indentation" , i).split()[0]
|
|
add_to_preamble(document, ["\\setlength{\\mathindent}{" + value + '}'])
|
|
del document.header[i]
|
|
# now set the document class option
|
|
regexp = re.compile(r'(\\is_math_indent 1)')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i == -1:
|
|
regexp = re.compile(r'(\\is_math_indent)')
|
|
j = find_re(document.header, regexp, 0)
|
|
del document.header[j]
|
|
else:
|
|
k = find_token(document.header, "\\options", 0)
|
|
if k != -1:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options fleqn,")
|
|
del document.header[i]
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l, "\\options fleqn")
|
|
del document.header[i + 1]
|
|
|
|
|
|
def revert_baselineskip(document):
|
|
" Revert baselineskips to TeX code "
|
|
i = 0
|
|
vspaceLine = 0
|
|
hspaceLine = 0
|
|
while True:
|
|
regexp = re.compile(r'^.*baselineskip%.*$')
|
|
i = find_re(document.body, regexp, i)
|
|
if i == -1:
|
|
return
|
|
vspaceLine = find_token(document.body, "\\begin_inset VSpace", i)
|
|
if vspaceLine == i:
|
|
# output VSpace inset as TeX code
|
|
# first read out the values
|
|
beg = document.body[i].rfind("VSpace ");
|
|
end = document.body[i].rfind("baselineskip%");
|
|
baselineskip = float(document.body[i][beg + 7:end]);
|
|
# we store the value in percent, thus divide by 100
|
|
baselineskip = baselineskip/100;
|
|
baselineskip = str(baselineskip);
|
|
# check if it is the starred version
|
|
if document.body[i].find('*') != -1:
|
|
star = '*'
|
|
else:
|
|
star = ''
|
|
# now output TeX code
|
|
endInset = find_end_of_inset(document.body, i)
|
|
if endInset == -1:
|
|
document.warning("Malformed LyX document: Missing '\\end_inset' of VSpace inset.")
|
|
return
|
|
else:
|
|
document.body[vspaceLine: endInset + 1] = put_cmd_in_ert("\\vspace" + star + '{' + baselineskip + "\\baselineskip}")
|
|
hspaceLine = find_token(document.body, "\\begin_inset space \\hspace", i - 1)
|
|
document.warning("hspaceLine: " + str(hspaceLine))
|
|
document.warning("i: " + str(i))
|
|
if hspaceLine == i - 1:
|
|
# output space inset as TeX code
|
|
# first read out the values
|
|
beg = document.body[i].rfind("\\length ");
|
|
end = document.body[i].rfind("baselineskip%");
|
|
baselineskip = float(document.body[i][beg + 7:end]);
|
|
document.warning("baselineskip: " + str(baselineskip))
|
|
# we store the value in percent, thus divide by 100
|
|
baselineskip = baselineskip/100;
|
|
baselineskip = str(baselineskip);
|
|
# check if it is the starred version
|
|
if document.body[i-1].find('*') != -1:
|
|
star = '*'
|
|
else:
|
|
star = ''
|
|
# now output TeX code
|
|
endInset = find_end_of_inset(document.body, i)
|
|
if endInset == -1:
|
|
document.warning("Malformed LyX document: Missing '\\end_inset' of space inset.")
|
|
return
|
|
else:
|
|
document.body[hspaceLine: endInset + 1] = put_cmd_in_ert("\\hspace" + star + '{' + baselineskip + "\\baselineskip}")
|
|
|
|
i = i + 1
|
|
|
|
|
|
def revert_rotfloat(document):
|
|
" Revert placement options for rotated floats "
|
|
i = 0
|
|
j = 0
|
|
k = 0
|
|
while True:
|
|
i = find_token(document.body, "sideways true", i)
|
|
if i != -1:
|
|
regexp = re.compile(r'^.*placement.*$')
|
|
j = find_re(document.body, regexp, i-2)
|
|
if j == -1:
|
|
return
|
|
if j != i-2:
|
|
i = i + 1
|
|
continue
|
|
else:
|
|
return
|
|
# we found a sideways float with placement options
|
|
# at first store the placement
|
|
beg = document.body[i-2].rfind(" ");
|
|
placement = document.body[i-2][beg+1:]
|
|
# check if the option'H' is used
|
|
if placement.find("H") != -1:
|
|
add_to_preamble(document, ["\\usepackage{float}"])
|
|
# now check if it is a starred type
|
|
if document.body[i-1].find("wide true") != -1:
|
|
star = '*'
|
|
else:
|
|
star = ''
|
|
# store the float type
|
|
beg = document.body[i-3].rfind(" ");
|
|
fType = document.body[i-3][beg+1:]
|
|
# now output TeX code
|
|
endInset = find_end_of_inset(document.body, i-3)
|
|
if endInset == -1:
|
|
document.warning("Malformed LyX document: Missing '\\end_inset' of Float inset.")
|
|
return
|
|
else:
|
|
document.body[endInset-2: endInset+1] = put_cmd_in_ert("\\end{sideways" + fType + star + '}')
|
|
document.body[i-3: i+2] = put_cmd_in_ert("\\begin{sideways" + fType + star + "}[" + placement + ']')
|
|
add_to_preamble(document, ["\\usepackage{rotfloat}"])
|
|
|
|
i = i + 1
|
|
|
|
|
|
def convert_allowbreak(document):
|
|
" Zero widths Space-inset -> \SpecialChar allowbreak. "
|
|
body = "\n".join(document.body)
|
|
body = body.replace("\\begin_inset space \hspace{}\n"
|
|
"\\length 0dd\n"
|
|
"\\end_inset\n\n",
|
|
"\\SpecialChar allowbreak\n")
|
|
document.body = body.split("\n")
|
|
|
|
|
|
def revert_allowbreak(document):
|
|
" \SpecialChar allowbreak -> Zero widths Space-inset. "
|
|
body = "\n".join(document.body)
|
|
body = body.replace("\\SpecialChar allowbreak\n",
|
|
"\\begin_inset space \hspace{}\n"
|
|
"\\length 0dd\n"
|
|
"\\end_inset\n\n")
|
|
document.body = body.split("\n")
|
|
|
|
|
|
def convert_mathnumberpos(document):
|
|
" add the \\math_number_before tag "
|
|
# check if the document uses the class option "leqno"
|
|
k = find_token(document.header, "\\quotes_style", 0)
|
|
m = find_token(document.header, "\\options", 0)
|
|
regexp = re.compile(r'^.*leqno.*')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i != -1 and i == m:
|
|
document.header.insert(k, "\\math_number_before 1")
|
|
# delete the found option
|
|
document.header[i] = document.header[i].replace(",leqno", "")
|
|
document.header[i] = document.header[i].replace(", leqno", "")
|
|
document.header[i] = document.header[i].replace("leqno,", "")
|
|
j = find_re(document.header, regexp, 0)
|
|
if i == j:
|
|
# then we have leqno as the only option
|
|
del document.header[i]
|
|
else:
|
|
document.header.insert(k, "\\math_number_before 0")
|
|
|
|
|
|
def revert_mathnumberpos(document):
|
|
" add the document class option leqno"
|
|
regexp = re.compile(r'(\\math_number_before 1)')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i == -1:
|
|
regexp = re.compile(r'(\\math_number_before)')
|
|
j = find_re(document.header, regexp, 0)
|
|
del document.header[j]
|
|
else:
|
|
k = find_token(document.header, "\\options", 0)
|
|
if k != -1:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options leqno,")
|
|
del document.header[i]
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l, "\\options leqno")
|
|
del document.header[i + 1]
|
|
|
|
|
|
def convert_mathnumberingname(document):
|
|
" rename the \\math_number_before tag to \\math_numbering_side "
|
|
regexp = re.compile(r'(\\math_number_before 1)')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i != -1:
|
|
document.header[i] = "\\math_numbering_side left"
|
|
regexp = re.compile(r'(\\math_number_before 0)')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i != -1:
|
|
document.header[i] = "\\math_numbering_side default"
|
|
# check if the document uses the class option "reqno"
|
|
k = find_token(document.header, "\\math_numbering_side", 0)
|
|
m = find_token(document.header, "\\options", 0)
|
|
regexp = re.compile(r'^.*reqno.*')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i != -1 and i == m:
|
|
document.header[k] = "\\math_numbering_side right"
|
|
# delete the found option
|
|
document.header[i] = document.header[i].replace(",reqno", "")
|
|
document.header[i] = document.header[i].replace(", reqno", "")
|
|
document.header[i] = document.header[i].replace("reqno,", "")
|
|
j = find_re(document.header, regexp, 0)
|
|
if i == j:
|
|
# then we have reqno as the only option
|
|
del document.header[i]
|
|
|
|
|
|
def revert_mathnumberingname(document):
|
|
" rename the \\math_numbering_side tag back to \\math_number_before "
|
|
# just rename
|
|
regexp = re.compile(r'(\\math_numbering_side left)')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i != -1:
|
|
document.header[i] = "\\math_number_before 1"
|
|
# add the option reqno and delete the tag
|
|
regexp = re.compile(r'(\\math_numbering_side right)')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i != -1:
|
|
document.header[i] = "\\math_number_before 0"
|
|
k = find_token(document.header, "\\options", 0)
|
|
if k != -1:
|
|
document.header[k] = document.header[k].replace("\\options", "\\options reqno,")
|
|
else:
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
document.header.insert(l, "\\options reqno")
|
|
# add the math_number_before tag
|
|
regexp = re.compile(r'(\\math_numbering_side default)')
|
|
i = find_re(document.header, regexp, 0)
|
|
if i != -1:
|
|
document.header[i] = "\\math_number_before 0"
|
|
|
|
|
|
def convert_minted(document):
|
|
" add the \\use_minted tag "
|
|
document.header.insert(-1, "\\use_minted 0")
|
|
|
|
|
|
def revert_minted(document):
|
|
" remove the \\use_minted tag "
|
|
i = find_token(document.header, "\\use_minted", 0)
|
|
if i != -1:
|
|
document.header.pop(i)
|
|
|
|
|
|
##
|
|
# Conversion hub
|
|
#
|
|
|
|
supported_versions = ["2.3.0", "2.3"]
|
|
convert = [
|
|
[509, [convert_microtype]],
|
|
[510, [convert_dateinset]],
|
|
[511, [convert_ibranches]],
|
|
[512, [convert_beamer_article_styles]],
|
|
[513, []],
|
|
[514, []],
|
|
[515, []],
|
|
[516, [convert_inputenc]],
|
|
[517, []],
|
|
[518, [convert_iopart]],
|
|
[519, [convert_quotestyle]],
|
|
[520, []],
|
|
[521, [convert_frenchquotes]],
|
|
[522, []],
|
|
[523, []],
|
|
[524, []],
|
|
[525, []],
|
|
[526, []],
|
|
[527, []],
|
|
[528, []],
|
|
[529, []],
|
|
[530, []],
|
|
[531, []],
|
|
[532, [convert_literalparam]],
|
|
[533, []],
|
|
[534, []],
|
|
[535, [convert_dashligatures]],
|
|
[536, []],
|
|
[537, []],
|
|
[538, [convert_mathindent]],
|
|
[539, []],
|
|
[540, []],
|
|
[541, [convert_allowbreak]],
|
|
[542, [convert_mathnumberpos]],
|
|
[543, [convert_mathnumberingname]],
|
|
[544, [convert_minted]]
|
|
]
|
|
|
|
revert = [
|
|
[543, [revert_minted]],
|
|
[542, [revert_mathnumberingname]],
|
|
[541, [revert_mathnumberpos]],
|
|
[540, [revert_allowbreak]],
|
|
[539, [revert_rotfloat]],
|
|
[538, [revert_baselineskip]],
|
|
[537, [revert_mathindent]],
|
|
[536, [revert_xout]],
|
|
[535, [revert_noto]],
|
|
[534, [revert_dashligatures]],
|
|
[533, [revert_chapterbib]],
|
|
[532, [revert_multibib]],
|
|
[531, [revert_literalparam]],
|
|
[530, [revert_qualicites]],
|
|
[529, [revert_bibpackopts]],
|
|
[528, [revert_citekeyonly]],
|
|
[527, [revert_biblatex]],
|
|
[526, [revert_noprefix]],
|
|
[525, [revert_plural_refs]],
|
|
[524, [revert_labelonly]],
|
|
[523, [revert_crimson, revert_cochinealmath]],
|
|
[522, [revert_cjkquotes]],
|
|
[521, [revert_dynamicquotes]],
|
|
[520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
|
|
[519, [revert_plainquote]],
|
|
[518, [revert_quotestyle]],
|
|
[517, [revert_iopart]],
|
|
[516, [revert_quotes]],
|
|
[515, []],
|
|
[514, [revert_urdu, revert_syriac]],
|
|
[513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
|
|
[512, [revert_bosnian, revert_friulan, revert_macedonian, revert_piedmontese, revert_romansh]],
|
|
[511, [revert_beamer_article_styles]],
|
|
[510, [revert_ibranches]],
|
|
[509, []],
|
|
[508, [revert_microtype]]
|
|
]
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pass
|