2016-06-03 05:40:21 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# This file is part of lyx2lyx
|
|
|
|
# Copyright (C) 2016 The LyX team
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
|
|
|
|
""" Convert files to the file format generated by lyx 2.3"""
|
|
|
|
|
|
|
|
import re, string
|
|
|
|
import unicodedata
|
|
|
|
import sys, os
|
|
|
|
|
|
|
|
# Uncomment only what you need to import, please.
|
|
|
|
|
2018-01-23 13:01:30 +00:00
|
|
|
from parser_tools import (del_token, del_value, del_complete_lines,
|
|
|
|
find_complete_lines, find_end_of, find_end_of_layout, find_end_of_inset,
|
2018-01-24 16:38:19 +00:00
|
|
|
find_re, find_token, find_token_backwards, get_containing_inset,
|
2018-01-23 13:01:30 +00:00
|
|
|
get_containing_layout, get_bool_value, get_value, get_quoted_value)
|
|
|
|
# find_tokens, find_token_exact, is_in_inset,
|
2016-06-18 23:29:15 +00:00
|
|
|
# check_token, get_option_value
|
2016-06-17 19:11:53 +00:00
|
|
|
|
2017-04-04 22:01:19 +00:00
|
|
|
from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, revert_font_attrs, \
|
|
|
|
insert_to_preamble
|
2016-10-22 13:33:59 +00:00
|
|
|
# get_ert, lyx2latex, \
|
2016-06-03 05:40:21 +00:00
|
|
|
# lyx2verbatim, length_in_bp, convert_info_insets
|
2017-04-04 22:01:19 +00:00
|
|
|
# latex_length, revert_flex_inset, hex2ratio, str2bool
|
2016-06-03 05:40:21 +00:00
|
|
|
|
|
|
|
####################################################################
|
|
|
|
# Private helper functions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
###############################################################################
|
|
|
|
###
|
|
|
|
### Conversion and reversion routines
|
|
|
|
###
|
|
|
|
###############################################################################
|
|
|
|
|
2016-06-17 19:11:53 +00:00
|
|
|
def convert_microtype(document):
|
|
|
|
" Add microtype settings. "
|
|
|
|
i = find_token(document.header, "\\font_tt_scale" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find \\font_tt_scale.")
|
2016-07-13 14:01:17 +00:00
|
|
|
i = len(document.header) - 1
|
|
|
|
|
2016-06-17 19:11:53 +00:00
|
|
|
j = find_token(document.preamble, "\\usepackage{microtype}", 0)
|
|
|
|
if j == -1:
|
2016-07-13 14:01:17 +00:00
|
|
|
document.header.insert(i + 1, "\\use_microtype false")
|
2016-06-17 19:11:53 +00:00
|
|
|
else:
|
2016-07-13 14:01:17 +00:00
|
|
|
document.header.insert(i + 1, "\\use_microtype true")
|
2016-06-17 19:11:53 +00:00
|
|
|
del document.preamble[j]
|
|
|
|
|
|
|
|
|
2016-06-03 05:40:21 +00:00
|
|
|
def revert_microtype(document):
|
|
|
|
" Remove microtype settings. "
|
|
|
|
i = find_token(document.header, "\\use_microtype", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2016-07-13 14:01:17 +00:00
|
|
|
use_microtype = get_bool_value(document.header, "\\use_microtype" , i)
|
2016-06-03 05:40:21 +00:00
|
|
|
del document.header[i]
|
2016-07-13 14:01:17 +00:00
|
|
|
if use_microtype:
|
2016-06-17 19:11:53 +00:00
|
|
|
add_to_preamble(document, ["\\usepackage{microtype}"])
|
2016-06-03 05:40:21 +00:00
|
|
|
|
|
|
|
|
2016-06-19 19:23:25 +00:00
|
|
|
def convert_dateinset(document):
|
|
|
|
' Convert date external inset to ERT '
|
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2016-06-19 19:23:25 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset External", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed lyx document: Missing '\\end_inset' in convert_dateinset.")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
if get_value(document.body, 'template', i, j) == "Date":
|
|
|
|
document.body[i : j + 1] = put_cmd_in_ert("\\today ")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
2016-12-07 17:38:41 +00:00
|
|
|
def convert_inputenc(document):
|
|
|
|
" Replace no longer supported input encoding settings. "
|
|
|
|
i = find_token(document.header, "\\inputenc", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if get_value(document.header, "\\inputencoding", i) == "pt254":
|
|
|
|
document.header[i] = "\\inputencoding pt154"
|
2017-03-27 09:16:31 +00:00
|
|
|
|
2016-12-07 17:38:41 +00:00
|
|
|
|
2016-07-12 03:56:32 +00:00
|
|
|
def convert_ibranches(document):
|
|
|
|
' Add "inverted 0" to branch insets'
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Branch", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body.insert(i + 1, "inverted 0")
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_ibranches(document):
|
|
|
|
' Convert inverted branches to explicit anti-branches'
|
|
|
|
# Get list of branches
|
|
|
|
ourbranches = {}
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.header, "\\branch", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
branch = document.header[i][8:].strip()
|
|
|
|
if document.header[i+1].startswith("\\selected "):
|
|
|
|
#document.warning(document.header[i+1])
|
|
|
|
#document.warning(document.header[i+1][10])
|
|
|
|
selected = int(document.header[i+1][10])
|
|
|
|
else:
|
|
|
|
document.warning("Malformed LyX document: No selection indicator for branch " + branch)
|
|
|
|
selected = 1
|
2017-03-27 09:16:31 +00:00
|
|
|
|
2016-07-12 03:56:32 +00:00
|
|
|
# the value tells us whether the branch is selected
|
|
|
|
ourbranches[document.header[i][8:].strip()] = selected
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# Figure out what inverted branches, if any, have been used
|
|
|
|
# and convert them to "Anti-OldBranch"
|
|
|
|
ibranches = {}
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Branch", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
if not document.body[i+1].startswith("inverted "):
|
|
|
|
document.warning("Malformed LyX document: Missing 'inverted' tag!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
inverted = document.body[i+1][9]
|
|
|
|
#document.warning(document.body[i+1])
|
|
|
|
|
|
|
|
if inverted == "1":
|
|
|
|
branch = document.body[i][20:].strip()
|
|
|
|
#document.warning(branch)
|
|
|
|
if not branch in ibranches:
|
|
|
|
antibranch = "Anti-" + branch
|
|
|
|
while antibranch in ibranches:
|
|
|
|
antibranch = "x" + antibranch
|
|
|
|
ibranches[branch] = antibranch
|
|
|
|
else:
|
|
|
|
antibranch = ibranches[branch]
|
|
|
|
#document.warning(antibranch)
|
|
|
|
document.body[i] = "\\begin_inset Branch " + antibranch
|
|
|
|
|
|
|
|
# remove "inverted" key
|
|
|
|
del document.body[i+1]
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# now we need to add the new branches to the header
|
2017-03-29 11:34:53 +00:00
|
|
|
for old, new in ibranches.items():
|
2016-07-12 03:56:32 +00:00
|
|
|
i = find_token(document.header, "\\branch " + old, 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Can't find branch %s even though we found it before!" % (old))
|
|
|
|
continue
|
|
|
|
j = find_token(document.header, "\\end_branch", i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document! Can't find end of branch " + old)
|
|
|
|
continue
|
|
|
|
# ourbranches[old] - 1 inverts the selection status of the old branch
|
|
|
|
lines = ["\\branch " + new,
|
|
|
|
"\\selected " + str(ourbranches[old] - 1)]
|
|
|
|
# these are the old lines telling us color, etc.
|
|
|
|
lines += document.header[i+2 : j+1]
|
|
|
|
document.header[i:i] = lines
|
2016-08-04 09:42:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_beamer_article_styles(document):
|
|
|
|
" Include (scr)article styles in beamer article "
|
|
|
|
|
|
|
|
beamer_articles = ["article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_articles:
|
|
|
|
return
|
|
|
|
|
|
|
|
inclusion = "article.layout"
|
|
|
|
if document.textclass == "scrarticle-beamer":
|
|
|
|
inclusion = "scrartcl.layout"
|
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
k = find_token(document.header, "\\language", 0)
|
|
|
|
if k == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
|
|
return
|
|
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
|
|
i = k - 1
|
|
|
|
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document: Can't find end of local layout!")
|
2016-08-04 09:42:06 +00:00
|
|
|
return
|
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
document.header[i+1 : i+1] = [
|
|
|
|
"### Inserted by lyx2lyx (more [scr]article styles) ###",
|
|
|
|
"Input " + inclusion,
|
|
|
|
"Input beamer.layout",
|
|
|
|
"Provides geometry 0",
|
|
|
|
"Provides hyperref 0",
|
|
|
|
"DefaultFont",
|
|
|
|
" Family Roman",
|
|
|
|
" Series Medium",
|
|
|
|
" Shape Up",
|
|
|
|
" Size Normal",
|
|
|
|
" Color None",
|
|
|
|
"EndFont",
|
|
|
|
"Preamble",
|
|
|
|
" \\usepackage{beamerarticle,pgf}",
|
|
|
|
" % this default might be overridden by plain title style",
|
|
|
|
" \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
|
|
|
|
" \\AtBeginDocument{",
|
|
|
|
" \\let\\origtableofcontents=\\tableofcontents",
|
|
|
|
" \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
|
|
|
|
" \\def\\gobbletableofcontents#1{\\origtableofcontents}",
|
|
|
|
" }",
|
|
|
|
"EndPreamble",
|
|
|
|
"### End of insertion by lyx2lyx (more [scr]article styles) ###"
|
|
|
|
]
|
|
|
|
|
2016-08-04 09:42:06 +00:00
|
|
|
|
|
|
|
def convert_beamer_article_styles(document):
|
|
|
|
" Remove included (scr)article styles in beamer article "
|
|
|
|
|
|
|
|
beamer_articles = ["article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_articles:
|
|
|
|
return
|
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2016-08-04 09:42:06 +00:00
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document: Can't find end of local layout!")
|
|
|
|
return
|
2016-08-04 09:42:06 +00:00
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
|
|
|
|
if k != -1:
|
|
|
|
l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
|
|
|
|
if l == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("End of lyx2lyx local layout insertion not found!")
|
|
|
|
return
|
2016-08-04 09:42:06 +00:00
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
if k == i + 1 and l == j - 1:
|
|
|
|
# that was all the local layout there was
|
|
|
|
document.header[i : j + 1] = []
|
|
|
|
else:
|
2016-08-04 09:42:06 +00:00
|
|
|
document.header[k : l + 1] = []
|
|
|
|
|
2016-06-03 05:40:21 +00:00
|
|
|
|
2016-10-16 13:33:23 +00:00
|
|
|
def revert_bosnian(document):
|
|
|
|
"Set the document language to English but assure Bosnian output"
|
|
|
|
|
|
|
|
if document.language == "bosnian":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language bosnian", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options bosnian,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options bosnian")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_friulan(document):
|
|
|
|
"Set the document language to English but assure Friulan output"
|
|
|
|
|
|
|
|
if document.language == "friulan":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language friulan", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options friulan,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options friulan")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_macedonian(document):
|
|
|
|
"Set the document language to English but assure Macedonian output"
|
|
|
|
|
|
|
|
if document.language == "macedonian":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language macedonian", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options macedonian,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options macedonian")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_piedmontese(document):
|
|
|
|
"Set the document language to English but assure Piedmontese output"
|
|
|
|
|
|
|
|
if document.language == "piedmontese":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language piedmontese", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options piedmontese,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options piedmontese")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_romansh(document):
|
|
|
|
"Set the document language to English but assure Romansh output"
|
|
|
|
|
|
|
|
if document.language == "romansh":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language romansh", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options romansh,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options romansh")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
2016-10-22 13:33:59 +00:00
|
|
|
def revert_amharic(document):
|
|
|
|
"Set the document language to English but assure Amharic output"
|
|
|
|
|
|
|
|
if document.language == "amharic":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language amharic", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-22 13:33:59 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-22 13:33:59 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{amharic}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{amharic}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_asturian(document):
|
|
|
|
"Set the document language to English but assure Asturian output"
|
|
|
|
|
|
|
|
if document.language == "asturian":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language asturian", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-22 13:33:59 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-22 13:33:59 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{asturian}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{asturian}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_kannada(document):
|
|
|
|
"Set the document language to English but assure Kannada output"
|
|
|
|
|
|
|
|
if document.language == "kannada":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language kannada", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-22 13:33:59 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-22 13:33:59 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{kannada}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{kannada}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_khmer(document):
|
|
|
|
"Set the document language to English but assure Khmer output"
|
|
|
|
|
|
|
|
if document.language == "khmer":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language khmer", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-22 13:33:59 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-22 13:33:59 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{khmer}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{khmer}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
2016-10-27 22:21:58 +00:00
|
|
|
def revert_urdu(document):
|
|
|
|
"Set the document language to English but assure Urdu output"
|
|
|
|
|
|
|
|
if document.language == "urdu":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language urdu", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-27 22:21:58 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-27 22:21:58 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{urdu}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{urdu}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_syriac(document):
|
|
|
|
"Set the document language to English but assure Syriac output"
|
|
|
|
|
|
|
|
if document.language == "syriac":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language syriac", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-27 22:21:58 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-27 22:21:58 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{syriac}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{syriac}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
2016-12-10 10:53:42 +00:00
|
|
|
def revert_quotes(document):
|
|
|
|
" Revert Quote Insets in verbatim or Hebrew context to plain quotes "
|
|
|
|
|
|
|
|
# First handle verbatim insets
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while i < len(document.body):
|
|
|
|
words = document.body[i].split()
|
|
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
2016-12-12 09:49:08 +00:00
|
|
|
( words[1] in ["ERT", "listings"] or ( len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"]) ):
|
2016-12-10 10:53:42 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes', i, j)
|
|
|
|
if k == -1:
|
|
|
|
i += 1
|
|
|
|
break
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
2016-12-10 11:54:12 +00:00
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
replace = "\""
|
|
|
|
if document.body[k].endswith("s"):
|
|
|
|
replace = "'"
|
|
|
|
document.body[k:l+1] = [replace]
|
|
|
|
else:
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Now verbatim layouts
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while i < len(document.body):
|
|
|
|
words = document.body[i].split()
|
|
|
|
if len(words) > 1 and words[0] == "\\begin_layout" and \
|
|
|
|
words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]:
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of " + words[1] + " layout at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes', i, j)
|
|
|
|
if k == -1:
|
|
|
|
i += 1
|
|
|
|
break
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
2016-12-10 10:53:42 +00:00
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
replace = "\""
|
|
|
|
if document.body[k].endswith("s"):
|
|
|
|
replace = "'"
|
|
|
|
document.body[k:l+1] = [replace]
|
|
|
|
else:
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Now handle Hebrew
|
2016-12-13 06:30:40 +00:00
|
|
|
if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
|
|
|
|
return
|
|
|
|
|
2016-12-10 10:53:42 +00:00
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
2016-12-12 09:49:08 +00:00
|
|
|
k = find_token(document.body, '\\begin_inset Quotes', i)
|
2016-12-10 10:53:42 +00:00
|
|
|
if k == -1:
|
|
|
|
return
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
hebrew = False
|
|
|
|
parent = get_containing_layout(document.body, k)
|
|
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
|
|
if ql == -1 or ql < parent[1]:
|
|
|
|
hebrew = document.language == "hebrew"
|
|
|
|
elif document.body[ql] == "\\lang hebrew":
|
|
|
|
hebrew = True
|
|
|
|
if hebrew:
|
|
|
|
replace = "\""
|
|
|
|
if document.body[k].endswith("s"):
|
|
|
|
replace = "'"
|
|
|
|
document.body[k:l+1] = [replace]
|
2016-12-12 09:49:08 +00:00
|
|
|
i = l
|
2017-03-27 09:16:31 +00:00
|
|
|
|
2016-12-10 10:53:42 +00:00
|
|
|
|
2016-12-14 02:49:04 +00:00
|
|
|
def revert_iopart(document):
|
|
|
|
" Input new styles via local layout "
|
|
|
|
if document.textclass != "iopart":
|
|
|
|
return
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
k = find_token(document.header, "\\language", 0)
|
|
|
|
if k == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
|
|
return
|
|
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
|
|
i = k-1
|
|
|
|
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
|
|
return
|
|
|
|
|
|
|
|
document.header[i+1 : i+1] = [
|
|
|
|
"### Inserted by lyx2lyx (stdlayouts) ###",
|
|
|
|
"Input stdlayouts.inc",
|
|
|
|
"### End of insertion by lyx2lyx (stdlayouts) ###"
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def convert_iopart(document):
|
|
|
|
" Remove local layout we added, if it is there "
|
|
|
|
if document.textclass != "iopart":
|
|
|
|
return
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
|
|
return
|
|
|
|
|
|
|
|
k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
|
|
|
|
if k != -1:
|
|
|
|
l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
|
|
|
|
if l == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("End of lyx2lyx local layout insertion not found!")
|
|
|
|
return
|
|
|
|
if k == i + 1 and l == j - 1:
|
|
|
|
# that was all the local layout there was
|
|
|
|
document.header[i : j + 1] = []
|
|
|
|
else:
|
|
|
|
document.header[k : l + 1] = []
|
|
|
|
|
2016-12-10 10:53:42 +00:00
|
|
|
|
2016-12-20 17:54:07 +00:00
|
|
|
def convert_quotestyle(document):
|
|
|
|
" Convert \\quotes_language to \\quotes_style "
|
|
|
|
i = find_token(document.header, "\\quotes_language", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document! Can't find \\quotes_language!")
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\quotes_language", i)
|
|
|
|
document.header[i] = "\\quotes_style " + val
|
|
|
|
|
|
|
|
|
|
|
|
def revert_quotestyle(document):
|
|
|
|
" Revert \\quotes_style to \\quotes_language "
|
|
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document! Can't find \\quotes_style!")
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\quotes_style", i)
|
|
|
|
document.header[i] = "\\quotes_language " + val
|
|
|
|
|
|
|
|
|
2016-12-21 14:17:30 +00:00
|
|
|
def revert_plainquote(document):
|
2016-12-24 13:27:00 +00:00
|
|
|
" Revert plain quote insets "
|
2016-12-21 14:17:30 +00:00
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style plain", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes q', i)
|
|
|
|
if k == -1:
|
|
|
|
return
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
replace = "\""
|
|
|
|
if document.body[k].endswith("s"):
|
|
|
|
replace = "'"
|
|
|
|
document.body[k:l+1] = [replace]
|
|
|
|
i = l
|
|
|
|
|
|
|
|
|
2016-12-24 13:27:00 +00:00
|
|
|
def convert_frenchquotes(document):
|
|
|
|
" Convert french quote insets to swiss "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style french", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style swiss"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes f', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val.replace("f", "c", 1)
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_swissquotes(document):
|
|
|
|
" Revert swiss quote insets to french "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style swiss", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes c', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val.replace("c", "f", 1)
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_britishquotes(document):
|
|
|
|
" Revert british quote insets to english "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style british", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes b', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val.replace("b", "e", 1)
|
|
|
|
if val[2] == "d":
|
|
|
|
# opening mark
|
|
|
|
newval = newval.replace("d", "s")
|
|
|
|
else:
|
2016-12-26 13:03:48 +00:00
|
|
|
# closing mark
|
2016-12-24 13:27:00 +00:00
|
|
|
newval = newval.replace("s", "d")
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_swedishgquotes(document):
|
|
|
|
" Revert swedish quote insets "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style swedishg", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style danish"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes w', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
if val[2] == "d":
|
|
|
|
# outer marks
|
|
|
|
newval = val.replace("w", "a", 1).replace("r", "l")
|
|
|
|
else:
|
|
|
|
# inner marks
|
|
|
|
newval = val.replace("w", "s", 1)
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_frenchquotes(document):
|
|
|
|
" Revert french inner quote insets "
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes f', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
newval = val.replace("f", "e", 1).replace("s", "d")
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_frenchinquotes(document):
|
|
|
|
" Revert inner frenchin quote insets "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style frenchin", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes i', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val.replace("i", "f", 1)
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
newval = newval.replace("s", "d")
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_russianquotes(document):
|
|
|
|
" Revert russian quote insets "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style russian", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes r', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
newval = val.replace("r", "g", 1).replace("s", "d")
|
|
|
|
else:
|
|
|
|
# outer marks
|
|
|
|
newval = val.replace("r", "f", 1)
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2016-12-25 11:19:02 +00:00
|
|
|
def revert_dynamicquotes(document):
|
|
|
|
" Revert dynamic quote insets "
|
|
|
|
|
|
|
|
# First, revert header
|
|
|
|
i = find_token(document.header, "\\dynamic_quotes", 0)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
# Get global style
|
|
|
|
style = "english"
|
|
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document! Missing \\quotes_style")
|
|
|
|
else:
|
|
|
|
style = get_value(document.header, "\\quotes_style", i)
|
|
|
|
|
2016-12-26 13:03:48 +00:00
|
|
|
s = "e"
|
2016-12-25 11:19:02 +00:00
|
|
|
if style == "english":
|
|
|
|
s = "e"
|
|
|
|
elif style == "swedish":
|
|
|
|
s = "s"
|
|
|
|
elif style == "german":
|
|
|
|
s = "g"
|
|
|
|
elif style == "polish":
|
|
|
|
s = "p"
|
|
|
|
elif style == "swiss":
|
|
|
|
s = "c"
|
|
|
|
elif style == "danish":
|
|
|
|
s = "a"
|
|
|
|
elif style == "plain":
|
|
|
|
s = "q"
|
|
|
|
elif style == "british":
|
|
|
|
s = "b"
|
|
|
|
elif style == "swedishg":
|
|
|
|
s = "w"
|
|
|
|
elif style == "french":
|
|
|
|
s = "f"
|
|
|
|
elif style == "frenchin":
|
|
|
|
s = "i"
|
|
|
|
elif style == "russian":
|
|
|
|
s = "r"
|
|
|
|
|
|
|
|
# now transform the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes x', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = document.body[i].replace("x", s)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2016-12-26 13:03:48 +00:00
|
|
|
def revert_cjkquotes(document):
|
|
|
|
" Revert cjk quote insets "
|
|
|
|
|
|
|
|
# Get global style
|
|
|
|
style = "english"
|
|
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document! Missing \\quotes_style")
|
|
|
|
else:
|
|
|
|
style = get_value(document.header, "\\quotes_style", i)
|
|
|
|
|
|
|
|
global_cjk = style.find("cjk") != -1
|
|
|
|
|
|
|
|
if global_cjk:
|
|
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
# transform dynamic insets
|
|
|
|
s = "j"
|
|
|
|
if style == "cjkangle":
|
|
|
|
s = "k"
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes x', i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
document.body[i] = document.body[i].replace("x", s)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes j', i)
|
|
|
|
if k == -1:
|
|
|
|
break
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
cjk = False
|
|
|
|
parent = get_containing_layout(document.body, k)
|
|
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
|
|
if ql == -1 or ql < parent[1]:
|
|
|
|
cjk = document.language in cjk_langs
|
|
|
|
elif document.body[ql].split()[1] in cjk_langs:
|
|
|
|
cjk = True
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
replace = []
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
if val[1] == "l":
|
|
|
|
# inner opening mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300E"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# inner closing mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300F"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# outer marks
|
|
|
|
if val[1] == "l":
|
|
|
|
# outer opening mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300C"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# outer closing mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300D"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
|
|
|
|
|
|
|
|
document.body[k:l+1] = replace
|
|
|
|
i = l
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes k', i)
|
|
|
|
if k == -1:
|
|
|
|
return
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
cjk = False
|
|
|
|
parent = get_containing_layout(document.body, k)
|
|
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
|
|
if ql == -1 or ql < parent[1]:
|
|
|
|
cjk = document.language in cjk_langs
|
|
|
|
elif document.body[ql].split()[1] in cjk_langs:
|
|
|
|
cjk = True
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
replace = []
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
if val[1] == "l":
|
|
|
|
# inner opening mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u3008"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# inner closing mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u3009"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# outer marks
|
|
|
|
if val[1] == "l":
|
|
|
|
# outer opening mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300A"]
|
|
|
|
else:
|
2017-01-08 17:37:08 +00:00
|
|
|
replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
|
2016-12-26 13:03:48 +00:00
|
|
|
else:
|
|
|
|
# outer closing mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300B"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
|
|
|
|
|
|
|
|
document.body[k:l+1] = replace
|
|
|
|
i = l
|
|
|
|
|
|
|
|
|
2016-12-29 15:45:19 +00:00
|
|
|
def revert_crimson(document):
|
2017-03-27 09:16:31 +00:00
|
|
|
" Revert native Cochineal/Crimson font definition to LaTeX "
|
2016-12-29 15:45:19 +00:00
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
preamble = ""
|
|
|
|
i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
|
|
|
|
if i != -1:
|
|
|
|
osf = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
osf = True
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
if osf:
|
|
|
|
document.header[j] = "\\font_osf false"
|
|
|
|
preamble += "[proportional,osf]"
|
|
|
|
preamble += "{cochineal}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = document.header[i].replace("cochineal", "default")
|
|
|
|
|
|
|
|
|
|
|
|
def revert_cochinealmath(document):
|
2017-03-27 09:16:31 +00:00
|
|
|
" Revert cochineal newtxmath definitions to LaTeX "
|
2016-12-29 15:45:19 +00:00
|
|
|
|
2017-03-27 09:16:31 +00:00
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
2016-12-29 15:45:19 +00:00
|
|
|
i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
|
|
|
|
if i != -1:
|
|
|
|
add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
|
|
|
|
document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
|
|
|
|
|
|
|
|
|
2016-06-18 22:38:24 +00:00
|
|
|
def revert_labelonly(document):
|
|
|
|
" Revert labelonly tag for InsetRef "
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of reference inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
k = find_token(document.body, "LatexCommand labelonly", i, j)
|
|
|
|
if k == -1:
|
|
|
|
i = j
|
|
|
|
continue
|
2016-06-18 23:29:15 +00:00
|
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
|
|
if not label:
|
2016-06-18 22:38:24 +00:00
|
|
|
document.warning("Can't find label for reference at line %d!" %(i))
|
|
|
|
i = j + 1
|
|
|
|
continue
|
|
|
|
document.body[i:j+1] = put_cmd_in_ert([label])
|
2017-01-07 19:39:03 +00:00
|
|
|
i += 1
|
2016-06-18 22:38:24 +00:00
|
|
|
|
2016-06-18 23:29:15 +00:00
|
|
|
|
|
|
|
def revert_plural_refs(document):
|
|
|
|
" Revert plural and capitalized references "
|
|
|
|
i = find_token(document.header, "\\use_refstyle 1", 0)
|
|
|
|
use_refstyle = (i != 0)
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of reference inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
2017-01-05 07:50:18 +00:00
|
|
|
plural = caps = suffix = False
|
|
|
|
k = find_token(document.body, "LaTeXCommand formatted", i, j)
|
|
|
|
if k != -1 and use_refstyle:
|
2016-06-18 23:29:15 +00:00
|
|
|
plural = get_bool_value(document.body, "plural", i, j, False)
|
|
|
|
caps = get_bool_value(document.body, "caps", i, j, False)
|
|
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
|
|
if label:
|
2017-01-05 07:50:18 +00:00
|
|
|
try:
|
|
|
|
(prefix, suffix) = label.split(":", 1)
|
|
|
|
except:
|
|
|
|
document.warning("No `:' separator in formatted reference at line %d!" % (i))
|
2016-06-18 23:29:15 +00:00
|
|
|
else:
|
|
|
|
document.warning("Can't find label for reference at line %d!" % (i))
|
|
|
|
|
2017-01-05 07:50:18 +00:00
|
|
|
# this effectively tests also for use_refstyle and a formatted reference
|
|
|
|
# we do this complicated test because we would otherwise do this erasure
|
|
|
|
# over and over and over
|
|
|
|
if not ((plural or caps) and suffix):
|
2016-06-18 23:29:15 +00:00
|
|
|
del_token(document.body, "plural", i, j)
|
|
|
|
del_token(document.body, "caps", i, j - 1) # since we deleted a line
|
|
|
|
i = j - 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
if caps:
|
|
|
|
prefix = prefix[0].title() + prefix[1:]
|
|
|
|
cmd = "\\" + prefix + "ref"
|
|
|
|
if plural:
|
|
|
|
cmd += "[s]"
|
|
|
|
cmd += "{" + suffix + "}"
|
|
|
|
document.body[i:j+1] = put_cmd_in_ert([cmd])
|
2017-01-07 19:39:03 +00:00
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_noprefix(document):
|
|
|
|
" Revert labelonly tags with 'noprefix' set "
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of reference inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
k = find_token(document.body, "LatexCommand labelonly", i, j)
|
2017-05-16 16:32:42 +00:00
|
|
|
noprefix = False
|
|
|
|
if k != -1:
|
|
|
|
noprefix = get_bool_value(document.body, "noprefix", i, j)
|
2017-01-07 19:39:03 +00:00
|
|
|
if not noprefix:
|
2017-05-16 16:32:42 +00:00
|
|
|
# either it was not a labelonly command, or else noprefix was not set.
|
|
|
|
# in that case, we just delete the option.
|
2017-01-07 19:39:03 +00:00
|
|
|
del_token(document.body, "noprefix", i, j)
|
|
|
|
i = j
|
|
|
|
continue
|
|
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
|
|
if not label:
|
|
|
|
document.warning("Can't find label for reference at line %d!" %(i))
|
|
|
|
i = j + 1
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
(prefix, suffix) = label.split(":", 1)
|
|
|
|
except:
|
|
|
|
document.warning("No `:' separator in formatted reference at line %d!" % (i))
|
|
|
|
# we'll leave this as an ordinary labelonly reference
|
|
|
|
del_token(document.body, "noprefix", i, j)
|
|
|
|
i = j
|
|
|
|
continue
|
|
|
|
document.body[i:j+1] = put_cmd_in_ert([suffix])
|
|
|
|
i += 1
|
|
|
|
|
2016-06-18 23:29:15 +00:00
|
|
|
|
2017-01-08 08:39:46 +00:00
|
|
|
def revert_biblatex(document):
|
|
|
|
" Revert biblatex support "
|
|
|
|
|
|
|
|
#
|
|
|
|
# Header
|
|
|
|
#
|
|
|
|
|
|
|
|
# 1. Get cite engine
|
|
|
|
engine = "basic"
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
|
|
else:
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
|
|
|
|
# 2. Store biblatex state and revert to natbib
|
|
|
|
biblatex = False
|
|
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
|
|
biblatex = True
|
2017-02-04 11:02:00 +00:00
|
|
|
document.header[i] = "\\cite_engine natbib"
|
2017-01-08 08:39:46 +00:00
|
|
|
|
|
|
|
# 3. Store and remove new document headers
|
|
|
|
bibstyle = ""
|
|
|
|
i = find_token(document.header, "\\biblatex_bibstyle", 0)
|
|
|
|
if i != -1:
|
|
|
|
bibstyle = get_value(document.header, "\\biblatex_bibstyle", i)
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
citestyle = ""
|
|
|
|
i = find_token(document.header, "\\biblatex_citestyle", 0)
|
|
|
|
if i != -1:
|
|
|
|
citestyle = get_value(document.header, "\\biblatex_citestyle", i)
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
biblio_options = ""
|
|
|
|
i = find_token(document.header, "\\biblio_options", 0)
|
|
|
|
if i != -1:
|
|
|
|
biblio_options = get_value(document.header, "\\biblio_options", i)
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
if biblatex:
|
|
|
|
bbxopts = "[natbib=true"
|
|
|
|
if bibstyle != "":
|
|
|
|
bbxopts += ",bibstyle=" + bibstyle
|
|
|
|
if citestyle != "":
|
|
|
|
bbxopts += ",citestyle=" + citestyle
|
|
|
|
if biblio_options != "":
|
|
|
|
bbxopts += "," + biblio_options
|
|
|
|
bbxopts += "]"
|
|
|
|
add_to_preamble(document, "\\usepackage" + bbxopts + "{biblatex}")
|
|
|
|
|
|
|
|
#
|
|
|
|
# Body
|
|
|
|
#
|
|
|
|
|
|
|
|
# 1. Bibtex insets
|
|
|
|
i = 0
|
|
|
|
bibresources = []
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of bibtex inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
bibs = get_quoted_value(document.body, "bibfiles", i, j)
|
|
|
|
opts = get_quoted_value(document.body, "biblatexopts", i, j)
|
|
|
|
# store resources
|
|
|
|
if bibs:
|
|
|
|
bibresources += bibs.split(",")
|
|
|
|
else:
|
|
|
|
document.warning("Can't find bibfiles for bibtex inset at line %d!" %(i))
|
|
|
|
# remove biblatexopts line
|
|
|
|
k = find_token(document.body, "biblatexopts", i, j)
|
|
|
|
if k != -1:
|
|
|
|
del document.body[k]
|
|
|
|
# Re-find inset end line
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
# Insert ERT \\printbibliography and wrap bibtex inset to a Note
|
|
|
|
if biblatex:
|
|
|
|
pcmd = "printbibliography"
|
|
|
|
if opts:
|
|
|
|
pcmd += "[" + opts + "]"
|
|
|
|
repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
|
|
|
|
"", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
|
|
|
|
"\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
|
|
|
|
"status open", "", "\\begin_layout Plain Layout" ]
|
|
|
|
repl += document.body[i:j+1]
|
|
|
|
repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
|
|
|
|
document.body[i:j+1] = repl
|
|
|
|
j += 27
|
|
|
|
|
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
if biblatex:
|
|
|
|
for b in bibresources:
|
|
|
|
add_to_preamble(document, "\\addbibresource{" + b + ".bib}")
|
|
|
|
|
|
|
|
# 2. Citation insets
|
|
|
|
|
|
|
|
# Specific citation insets used in biblatex that need to be reverted to ERT
|
|
|
|
new_citations = {
|
|
|
|
"Cite" : "Cite",
|
|
|
|
"citebyear" : "citeyear",
|
|
|
|
"citeyear" : "cite*",
|
|
|
|
"Footcite" : "Smartcite",
|
|
|
|
"footcite" : "smartcite",
|
|
|
|
"Autocite" : "Autocite",
|
|
|
|
"autocite" : "autocite",
|
|
|
|
"citetitle" : "citetitle",
|
|
|
|
"citetitle*" : "citetitle*",
|
|
|
|
"fullcite" : "fullcite",
|
|
|
|
"footfullcite" : "footfullcite",
|
|
|
|
"supercite" : "supercite",
|
|
|
|
"citeauthor" : "citeauthor",
|
|
|
|
"citeauthor*" : "citeauthor*",
|
|
|
|
"Citeauthor" : "Citeauthor",
|
|
|
|
"Citeauthor*" : "Citeauthor*"
|
|
|
|
}
|
|
|
|
|
|
|
|
# All commands accepted by LyX < 2.3. Everything else throws an error.
|
|
|
|
old_citations = [ "cite", "nocite", "citet", "citep", "citealt", "citealp",\
|
|
|
|
"citeauthor", "citeyear", "citeyearpar", "citet*", "citep*",\
|
|
|
|
"citealt*", "citealp*", "citeauthor*", "Citet", "Citep",\
|
|
|
|
"Citealt", "Citealp", "Citeauthor", "Citet*", "Citep*",\
|
|
|
|
"Citealt*", "Citealp*", "Citeauthor*", "fullcite", "footcite",\
|
|
|
|
"footcitet", "footcitep", "footcitealt", "footcitealp",\
|
|
|
|
"footciteauthor", "footciteyear", "footciteyearpar",\
|
2018-01-23 07:45:19 +00:00
|
|
|
"citefield", "citetitle", "cite*" ]
|
2017-01-08 08:39:46 +00:00
|
|
|
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of citation inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
|
|
|
|
i = j + 1
|
|
|
|
continue
|
|
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
|
|
if biblatex and cmd in list(new_citations.keys()):
|
|
|
|
pre = get_quoted_value(document.body, "before", i, j)
|
|
|
|
post = get_quoted_value(document.body, "after", i, j)
|
|
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
|
|
if not key:
|
|
|
|
document.warning("Citation inset at line %d does not have a key!" %(i))
|
|
|
|
key = "???"
|
|
|
|
# Replace known new commands with ERT
|
|
|
|
res = "\\" + new_citations[cmd]
|
|
|
|
if pre:
|
|
|
|
res += "[" + pre + "]"
|
|
|
|
if post:
|
|
|
|
res += "[" + post + "]"
|
Support for "qualified citation lists"
These are biblatex-specific multicite commands that allow for multiple
pre- and postnotes, as in:
\cites(pre)(post)[pre1][post1]{key1}[pre2][post2]{key2}...
with an optional general pre- and postnote, which applies to the whole
list (like [][] in normal cite commands) and an optional pre- and
postnotes for each item, so that pagination can actually be specified in
multi-cite references, as in:
(cf. Miller 2015, 2; furthermore Smith 2013, 23-23; Jenkins 2012, 103,
also refer to chapter 6 in this book)
See the biblatex manual, sec. 3.8.3., for details.
File format change.
2017-01-21 13:25:17 +00:00
|
|
|
elif pre:
|
|
|
|
res += "[]"
|
2017-01-08 08:39:46 +00:00
|
|
|
res += "{" + key + "}"
|
|
|
|
document.body[i:j+1] = put_cmd_in_ert([res])
|
|
|
|
elif cmd not in old_citations:
|
|
|
|
# Reset unknown commands to cite. This is what LyX does as well
|
|
|
|
# (but LyX 2.2 would break on unknown commands)
|
|
|
|
document.body[k] = "LatexCommand cite"
|
|
|
|
document.warning("Reset unknown cite command '%s' with cite" % cmd)
|
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
# Emulate the old biblatex-workaround (pretend natbib in order to use the styles)
|
|
|
|
if biblatex:
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
k = find_token(document.header, "\\language", 0)
|
|
|
|
if k == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
|
|
return
|
|
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
|
|
i = k-1
|
|
|
|
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
|
|
return
|
|
|
|
|
|
|
|
document.header[i+1 : i+1] = [
|
|
|
|
"### Inserted by lyx2lyx (biblatex emulation) ###",
|
|
|
|
"Provides natbib 1",
|
|
|
|
"### End of insertion by lyx2lyx (biblatex emulation) ###"
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2017-01-13 10:53:22 +00:00
|
|
|
def revert_citekeyonly(document):
|
|
|
|
" Revert keyonly cite command to ERT "
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of citation inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
|
|
|
|
i = j + 1
|
|
|
|
continue
|
|
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
|
|
if cmd != "keyonly":
|
|
|
|
i = j + 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
|
|
if not key:
|
|
|
|
document.warning("Citation inset at line %d does not have a key!" %(i))
|
|
|
|
# Replace known new commands with ERT
|
|
|
|
document.body[i:j+1] = put_cmd_in_ert([key])
|
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
|
2017-01-13 17:23:42 +00:00
|
|
|
|
|
|
|
def revert_bibpackopts(document):
|
|
|
|
" Revert support for natbib/jurabib package options "
|
|
|
|
|
|
|
|
engine = "basic"
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
|
|
else:
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
|
|
|
|
biblatex = False
|
|
|
|
if engine not in ["natbib", "jurabib"]:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\biblio_options", 0)
|
2017-01-15 11:48:43 +00:00
|
|
|
if i == -1:
|
|
|
|
# Nothing to do if we have no options
|
|
|
|
return
|
|
|
|
|
|
|
|
biblio_options = get_value(document.header, "\\biblio_options", i)
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
if not biblio_options:
|
|
|
|
# Nothing to do for empty options
|
|
|
|
return
|
2017-01-13 17:23:42 +00:00
|
|
|
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
k = find_token(document.header, "\\language", 0)
|
|
|
|
if k == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
|
|
return
|
|
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
|
|
i = k - 1
|
|
|
|
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
|
|
return
|
|
|
|
|
|
|
|
document.header[i+1 : i+1] = [
|
|
|
|
"### Inserted by lyx2lyx (bibliography package options) ###",
|
|
|
|
"PackageOptions " + engine + " " + biblio_options,
|
|
|
|
"### End of insertion by lyx2lyx (bibliography package options) ###"
|
|
|
|
]
|
|
|
|
|
|
|
|
|
Support for "qualified citation lists"
These are biblatex-specific multicite commands that allow for multiple
pre- and postnotes, as in:
\cites(pre)(post)[pre1][post1]{key1}[pre2][post2]{key2}...
with an optional general pre- and postnote, which applies to the whole
list (like [][] in normal cite commands) and an optional pre- and
postnotes for each item, so that pagination can actually be specified in
multi-cite references, as in:
(cf. Miller 2015, 2; furthermore Smith 2013, 23-23; Jenkins 2012, 103,
also refer to chapter 6 in this book)
See the biblatex manual, sec. 3.8.3., for details.
File format change.
2017-01-21 13:25:17 +00:00
|
|
|
def revert_qualicites(document):
|
|
|
|
" Revert qualified citation list commands to ERT "
|
|
|
|
|
|
|
|
# Citation insets that support qualified lists, with their LaTeX code
|
|
|
|
ql_citations = {
|
|
|
|
"cite" : "cites",
|
|
|
|
"Cite" : "Cites",
|
|
|
|
"citet" : "textcites",
|
|
|
|
"Citet" : "Textcites",
|
|
|
|
"citep" : "parencites",
|
|
|
|
"Citep" : "Parencites",
|
|
|
|
"Footcite" : "Smartcites",
|
|
|
|
"footcite" : "smartcites",
|
|
|
|
"Autocite" : "Autocites",
|
|
|
|
"autocite" : "autocites",
|
|
|
|
}
|
|
|
|
|
|
|
|
# Get cite engine
|
|
|
|
engine = "basic"
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
|
|
else:
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
|
|
|
|
biblatex = engine in ["biblatex", "biblatex-natbib"]
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of citation inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
pres = find_token(document.body, "pretextlist", i, j)
|
|
|
|
posts = find_token(document.body, "posttextlist", i, j)
|
|
|
|
if pres == -1 and posts == -1:
|
|
|
|
# nothing to do.
|
|
|
|
i = j + 1
|
|
|
|
continue
|
|
|
|
pretexts = get_quoted_value(document.body, "pretextlist", pres)
|
|
|
|
posttexts = get_quoted_value(document.body, "posttextlist", posts)
|
|
|
|
k = find_token(document.body, "LatexCommand", i, j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
|
|
|
|
i = j + 1
|
|
|
|
continue
|
|
|
|
cmd = get_value(document.body, "LatexCommand", k)
|
|
|
|
if biblatex and cmd in list(ql_citations.keys()):
|
|
|
|
pre = get_quoted_value(document.body, "before", i, j)
|
|
|
|
post = get_quoted_value(document.body, "after", i, j)
|
|
|
|
key = get_quoted_value(document.body, "key", i, j)
|
|
|
|
if not key:
|
|
|
|
document.warning("Citation inset at line %d does not have a key!" %(i))
|
|
|
|
key = "???"
|
|
|
|
keys = key.split(",")
|
|
|
|
prelist = pretexts.split("\t")
|
|
|
|
premap = dict()
|
|
|
|
for pp in prelist:
|
|
|
|
ppp = pp.split(" ", 1)
|
|
|
|
premap[ppp[0]] = ppp[1]
|
|
|
|
postlist = posttexts.split("\t")
|
|
|
|
postmap = dict()
|
|
|
|
for pp in postlist:
|
|
|
|
ppp = pp.split(" ", 1)
|
|
|
|
postmap[ppp[0]] = ppp[1]
|
|
|
|
# Replace known new commands with ERT
|
|
|
|
if "(" in pre or ")" in pre:
|
|
|
|
pre = "{" + pre + "}"
|
|
|
|
if "(" in post or ")" in post:
|
|
|
|
post = "{" + post + "}"
|
|
|
|
res = "\\" + ql_citations[cmd]
|
|
|
|
if pre:
|
|
|
|
res += "(" + pre + ")"
|
|
|
|
if post:
|
|
|
|
res += "(" + post + ")"
|
|
|
|
elif pre:
|
|
|
|
res += "()"
|
|
|
|
for kk in keys:
|
|
|
|
if premap.get(kk, "") != "":
|
|
|
|
res += "[" + premap[kk] + "]"
|
|
|
|
if postmap.get(kk, "") != "":
|
|
|
|
res += "[" + postmap[kk] + "]"
|
|
|
|
elif premap.get(kk, "") != "":
|
|
|
|
res += "[]"
|
|
|
|
res += "{" + kk + "}"
|
|
|
|
document.body[i:j+1] = put_cmd_in_ert([res])
|
|
|
|
else:
|
|
|
|
# just remove the params
|
|
|
|
del document.body[posttexts]
|
|
|
|
del document.body[pretexts]
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2017-01-30 06:44:55 +00:00
|
|
|
command_insets = ["bibitem", "citation", "href", "index_print", "nomenclature"]
|
|
|
|
def convert_literalparam(document):
|
|
|
|
" Add param literal "
|
|
|
|
|
|
|
|
for inset in command_insets:
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
while i < j and document.body[i].strip() != '':
|
|
|
|
i += 1
|
2017-04-29 11:54:01 +00:00
|
|
|
# href is already fully latexified. Here we can switch off literal.
|
|
|
|
if inset == "href":
|
2017-01-30 06:44:55 +00:00
|
|
|
document.body.insert(i, "literal \"false\"")
|
|
|
|
else:
|
|
|
|
document.body.insert(i, "literal \"true\"")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def revert_literalparam(document):
|
|
|
|
" Remove param literal "
|
|
|
|
|
|
|
|
for inset in command_insets:
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
k = find_token(document.body, 'literal', i, j)
|
|
|
|
if k == -1:
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
del document.body[k]
|
|
|
|
|
|
|
|
|
2017-02-04 11:02:00 +00:00
|
|
|
|
|
|
|
def revert_multibib(document):
|
|
|
|
" Revert multibib support "
|
|
|
|
|
|
|
|
# 1. Get cite engine
|
|
|
|
engine = "basic"
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
|
|
else:
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
|
|
|
|
# 2. Do we use biblatex?
|
|
|
|
biblatex = False
|
|
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
|
|
biblatex = True
|
|
|
|
|
|
|
|
# 3. Store and remove multibib document header
|
|
|
|
multibib = ""
|
|
|
|
i = find_token(document.header, "\\multibib", 0)
|
|
|
|
if i != -1:
|
|
|
|
multibib = get_value(document.header, "\\multibib", i)
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
if not multibib:
|
|
|
|
return
|
|
|
|
|
|
|
|
# 4. The easy part: Biblatex
|
|
|
|
if biblatex:
|
|
|
|
i = find_token(document.header, "\\biblio_options", 0)
|
|
|
|
if i == -1:
|
|
|
|
k = find_token(document.header, "\\use_bibtopic", 0)
|
|
|
|
if k == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! No \\use_bibtopic header found!")
|
|
|
|
return
|
|
|
|
document.header[k-1 : k-1] = ["\\biblio_options " + "refsection=" + multibib]
|
|
|
|
else:
|
|
|
|
biblio_options = get_value(document.header, "\\biblio_options", i)
|
|
|
|
if biblio_options:
|
|
|
|
biblio_options += ","
|
|
|
|
biblio_options += "refsection=" + multibib
|
|
|
|
document.header[i] = "\\biblio_options " + biblio_options
|
|
|
|
|
|
|
|
# Bibtex insets
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of bibtex inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
btprint = get_quoted_value(document.body, "btprint", i, j)
|
|
|
|
if btprint != "bibbysection":
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
opts = get_quoted_value(document.body, "biblatexopts", i, j)
|
|
|
|
# change btprint line
|
|
|
|
k = find_token(document.body, "btprint", i, j)
|
|
|
|
if k != -1:
|
|
|
|
document.body[k] = "btprint \"btPrintCited\""
|
|
|
|
# Insert ERT \\bibbysection and wrap bibtex inset to a Note
|
|
|
|
pcmd = "bibbysection"
|
|
|
|
if opts:
|
|
|
|
pcmd += "[" + opts + "]"
|
|
|
|
repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
|
|
|
|
"", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
|
|
|
|
"\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
|
|
|
|
"status open", "", "\\begin_layout Plain Layout" ]
|
|
|
|
repl += document.body[i:j+1]
|
|
|
|
repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
|
|
|
|
document.body[i:j+1] = repl
|
|
|
|
j += 27
|
|
|
|
|
|
|
|
i = j + 1
|
|
|
|
return
|
|
|
|
|
|
|
|
# 5. More tricky: Bibtex/Bibtopic
|
|
|
|
k = find_token(document.header, "\\use_bibtopic", 0)
|
|
|
|
if k == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! No \\use_bibtopic header found!")
|
|
|
|
return
|
|
|
|
document.header[k] = "\\use_bibtopic true"
|
|
|
|
|
|
|
|
# Possible units. This assumes that the LyX name follows the std,
|
|
|
|
# which might not always be the case. But it's as good as we can get.
|
|
|
|
units = {
|
|
|
|
"part" : "Part",
|
|
|
|
"chapter" : "Chapter",
|
|
|
|
"section" : "Section",
|
|
|
|
"subsection" : "Subsection",
|
|
|
|
}
|
|
|
|
|
|
|
|
if multibib not in units.keys():
|
|
|
|
document.warning("Unknown multibib value `%s'!" % nultibib)
|
|
|
|
return
|
|
|
|
unit = units[multibib]
|
|
|
|
btunit = False
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_layout " + unit, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
if btunit:
|
|
|
|
document.body[i-1 : i-1] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"end{btUnit}", "\\end_layout",
|
|
|
|
"\\begin_layout Plain Layout", "",
|
|
|
|
"\\backslash",
|
|
|
|
"begin{btUnit}"
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
i += 21
|
|
|
|
else:
|
|
|
|
document.body[i-1 : i-1] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"begin{btUnit}"
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
i += 16
|
|
|
|
btunit = True
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
if btunit:
|
|
|
|
i = find_token(document.body, "\\end_body", i)
|
|
|
|
document.body[i-1 : i-1] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"end{btUnit}"
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
2017-02-04 18:23:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_chapterbib(document):
|
|
|
|
" Revert chapterbib support "
|
|
|
|
|
|
|
|
# 1. Get cite engine
|
|
|
|
engine = "basic"
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document! Missing \\cite_engine")
|
|
|
|
else:
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
|
|
|
|
# 2. Do we use biblatex?
|
|
|
|
biblatex = False
|
|
|
|
if engine in ["biblatex", "biblatex-natbib"]:
|
|
|
|
biblatex = True
|
|
|
|
|
|
|
|
# 3. Store multibib document header value
|
|
|
|
multibib = ""
|
|
|
|
i = find_token(document.header, "\\multibib", 0)
|
|
|
|
if i != -1:
|
|
|
|
multibib = get_value(document.header, "\\multibib", i)
|
|
|
|
|
|
|
|
if not multibib or multibib != "child":
|
|
|
|
# nothing to do
|
|
|
|
return
|
|
|
|
|
|
|
|
# 4. remove multibib header
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
# 5. Biblatex
|
|
|
|
if biblatex:
|
|
|
|
# find include insets
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of bibtex inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
parbeg = parent[1]
|
|
|
|
|
|
|
|
# Insert ERT \\newrefsection before inset
|
|
|
|
beg = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"newrefsection"
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
document.body[parbeg-1:parbeg-1] = beg
|
|
|
|
j += len(beg)
|
|
|
|
i = j + 1
|
|
|
|
return
|
|
|
|
|
|
|
|
# 6. Bibtex/Bibtopic
|
|
|
|
i = find_token(document.header, "\\use_bibtopic", 0)
|
|
|
|
if i == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! No \\use_bibtopic header found!")
|
|
|
|
return
|
|
|
|
if get_value(document.header, "\\use_bibtopic", i) == "true":
|
|
|
|
# find include insets
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of bibtex inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
parbeg = parent[1]
|
|
|
|
parend = parent[2]
|
|
|
|
|
|
|
|
# Insert wrap inset into \\begin{btUnit}...\\end{btUnit}
|
|
|
|
beg = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"begin{btUnit}"
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
end = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"end{btUnit}"
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
document.body[parend+1:parend+1] = end
|
|
|
|
document.body[parbeg-1:parbeg-1] = beg
|
|
|
|
j += len(beg) + len(end)
|
|
|
|
i = j + 1
|
|
|
|
return
|
|
|
|
|
|
|
|
# 7. Chapterbib proper
|
|
|
|
add_to_preamble(document, ["\\usepackage{chapterbib}"])
|
2017-03-19 19:50:34 +00:00
|
|
|
|
|
|
|
|
|
|
|
def convert_dashligatures(document):
|
2017-09-30 21:26:02 +00:00
|
|
|
"Set 'use_dash_ligatures' according to content."
|
2018-01-23 07:45:19 +00:00
|
|
|
# Look for and remove dashligatures workaround from 2.3->2.2 reversion,
|
|
|
|
# set use_dash_ligatures to True if found, to None else.
|
|
|
|
use_dash_ligatures = del_complete_lines(document.preamble,
|
|
|
|
['% Added by lyx2lyx',
|
|
|
|
r'\renewcommand{\textendash}{--}',
|
|
|
|
r'\renewcommand{\textemdash}{---}']) or None
|
2018-01-21 18:55:27 +00:00
|
|
|
|
2017-09-30 21:26:02 +00:00
|
|
|
if use_dash_ligatures is None:
|
2018-01-23 07:45:19 +00:00
|
|
|
# Look for dashes (Documents by LyX 2.1 or older have "\twohyphens\n"
|
|
|
|
# or "\threehyphens\n" as interim representation for -- an ---.)
|
2018-01-24 16:38:19 +00:00
|
|
|
lines = document.body
|
|
|
|
has_literal_dashes = has_ligature_dashes = False
|
|
|
|
i = j = 0
|
|
|
|
while i+1 < len(lines):
|
|
|
|
i += 1
|
|
|
|
line = lines[i]
|
|
|
|
# skip lines without any dashes:
|
|
|
|
if not re.search(u"[\u2013\u2014]|\\twohyphens|\\threehyphens", line):
|
2017-09-30 21:26:02 +00:00
|
|
|
continue
|
2018-01-24 16:38:19 +00:00
|
|
|
# skip label width string (see bug 10243):
|
|
|
|
if line.startswith("\\labelwidthstring"):
|
|
|
|
continue
|
|
|
|
# do not touch hyphens in some insets (cf. lyx_2_2.convert_dashes):
|
|
|
|
try:
|
|
|
|
value, start, end = get_containing_inset(lines, i)
|
|
|
|
except TypeError: # no containing inset
|
|
|
|
value, start, end = "no inset", -1, -1
|
|
|
|
if (value.split()[0] in
|
|
|
|
["CommandInset", "ERT", "External", "Formula",
|
|
|
|
"FormulaMacro", "Graphics", "IPA", "listings"]
|
|
|
|
or value == "Flex Code"):
|
|
|
|
i = end
|
2018-01-10 11:05:26 +00:00
|
|
|
continue
|
2018-01-24 16:38:19 +00:00
|
|
|
try:
|
|
|
|
layout, start, end, j = get_containing_layout(lines, i)
|
|
|
|
except TypeError: # no (or malformed) containing layout
|
|
|
|
document.warning("Malformed LyX document: "
|
|
|
|
"Can't find layout at line %d" % i)
|
|
|
|
continue
|
|
|
|
if layout == "LyX-Code":
|
|
|
|
i = end
|
|
|
|
continue
|
|
|
|
|
2017-09-30 21:26:02 +00:00
|
|
|
# literal dash followed by a word or no-break space:
|
|
|
|
if re.search(u"[\u2013\u2014]([\w\u00A0]|$)", line,
|
|
|
|
flags=re.UNICODE):
|
|
|
|
has_literal_dashes = True
|
|
|
|
# ligature dash followed by word or no-break space on next line:
|
2018-01-21 18:55:27 +00:00
|
|
|
if (re.search(r"(\\twohyphens|\\threehyphens)", line) and
|
2018-01-24 16:38:19 +00:00
|
|
|
re.match(u"[\w\u00A0]", lines[i+1], flags=re.UNICODE)):
|
2017-09-30 21:26:02 +00:00
|
|
|
has_ligature_dashes = True
|
2018-01-24 16:38:19 +00:00
|
|
|
if has_literal_dashes and has_ligature_dashes:
|
|
|
|
# TODO: insert a warning note in the document?
|
|
|
|
document.warning('This document contained both literal and '
|
|
|
|
'"ligature" dashes.\n Line breaks may have changed. '
|
|
|
|
'See UserGuide chapter 3.9.1 for details.')
|
|
|
|
break
|
|
|
|
if has_literal_dashes:
|
2017-09-30 21:26:02 +00:00
|
|
|
use_dash_ligatures = False
|
|
|
|
elif has_ligature_dashes:
|
|
|
|
use_dash_ligatures = True
|
|
|
|
# insert the setting if there is a preferred value
|
|
|
|
if use_dash_ligatures is not None:
|
2018-01-23 07:45:19 +00:00
|
|
|
i = find_token(document.header, "\\graphics")
|
|
|
|
document.header.insert(i, "\\use_dash_ligatures %s"
|
|
|
|
% str(use_dash_ligatures).lower())
|
2017-03-19 19:50:34 +00:00
|
|
|
|
2018-01-23 13:01:30 +00:00
|
|
|
|
2017-03-19 19:50:34 +00:00
|
|
|
def revert_dashligatures(document):
|
2017-09-30 21:26:02 +00:00
|
|
|
"""Remove font ligature settings for en- and em-dashes.
|
|
|
|
Revert conversion of \twodashes or \threedashes to literal dashes."""
|
2018-01-23 07:45:19 +00:00
|
|
|
use_dash_ligatures = del_value(document.header, "\\use_dash_ligatures")
|
|
|
|
if use_dash_ligatures != "true" or document.backend != "latex":
|
2017-03-19 19:50:34 +00:00
|
|
|
return
|
2017-09-30 21:26:02 +00:00
|
|
|
j = 0
|
|
|
|
new_body = []
|
|
|
|
for i, line in enumerate(document.body):
|
2017-03-19 19:50:34 +00:00
|
|
|
# Skip some document parts where dashes are not converted
|
2017-09-30 21:26:02 +00:00
|
|
|
if (i < j) or line.startswith("\\labelwidthstring"):
|
|
|
|
new_body.append(line)
|
|
|
|
continue
|
2018-01-21 18:55:27 +00:00
|
|
|
if (line.startswith("\\begin_inset ") and
|
|
|
|
line[13:].split()[0] in ["CommandInset", "ERT", "External",
|
|
|
|
"Formula", "FormulaMacro", "Graphics", "IPA", "listings"]
|
|
|
|
or line == "\\begin_inset Flex Code"):
|
2017-03-19 19:50:34 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
2017-09-30 21:26:02 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of "
|
2017-03-19 19:50:34 +00:00
|
|
|
+ words[1] + " inset at line " + str(i))
|
2017-09-30 21:26:02 +00:00
|
|
|
new_body.append(line)
|
2017-03-19 19:50:34 +00:00
|
|
|
continue
|
2018-01-10 11:05:26 +00:00
|
|
|
if line == "\\begin_layout LyX-Code":
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: "
|
|
|
|
"Can't find end of %s layout at line %d" % (words[1],i))
|
|
|
|
new_body.append(line)
|
|
|
|
continue
|
|
|
|
# TODO: skip replacement in typewriter fonts
|
2017-09-30 21:26:02 +00:00
|
|
|
line = line.replace(u'\u2013', '\\twohyphens\n')
|
|
|
|
line = line.replace(u'\u2014', '\\threehyphens\n')
|
|
|
|
lines = line.split('\n')
|
|
|
|
new_body.extend(line.split('\n'))
|
|
|
|
document.body = new_body
|
|
|
|
# redefine the dash LICRs to use ligature dashes:
|
|
|
|
add_to_preamble(document, [r'\renewcommand{\textendash}{--}',
|
|
|
|
r'\renewcommand{\textemdash}{---}'])
|
2017-03-27 09:16:31 +00:00
|
|
|
|
2017-02-04 11:02:00 +00:00
|
|
|
|
2017-04-04 21:02:47 +00:00
|
|
|
def revert_noto(document):
|
|
|
|
" Revert Noto font definitions to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
preamble = ""
|
|
|
|
i = find_token(document.header, "\\font_roman \"NotoSerif-TLF\"", 0)
|
|
|
|
if i != -1:
|
|
|
|
add_to_preamble(document, ["\\renewcommand{\\rmdefault}{NotoSerif-TLF}"])
|
|
|
|
document.header[i] = document.header[i].replace("NotoSerif-TLF", "default")
|
|
|
|
i = find_token(document.header, "\\font_sans \"NotoSans-TLF\"", 0)
|
|
|
|
if i != -1:
|
|
|
|
add_to_preamble(document, ["\\renewcommand{\\sfdefault}{NotoSans-TLF}"])
|
|
|
|
document.header[i] = document.header[i].replace("NotoSans-TLF", "default")
|
|
|
|
i = find_token(document.header, "\\font_typewriter \"NotoMono-TLF\"", 0)
|
|
|
|
if i != -1:
|
|
|
|
add_to_preamble(document, ["\\renewcommand{\\ttdefault}{NotoMono-TLF}"])
|
|
|
|
document.header[i] = document.header[i].replace("NotoMono-TLF", "default")
|
|
|
|
|
|
|
|
|
2017-04-04 22:01:19 +00:00
|
|
|
def revert_xout(document):
|
|
|
|
" Reverts \\xout font attribute "
|
|
|
|
changed = revert_font_attrs(document.body, "\\xout", "\\xout")
|
|
|
|
if changed == True:
|
|
|
|
insert_to_preamble(document, \
|
|
|
|
['% for proper cross-out',
|
|
|
|
'\\PassOptionsToPackage{normalem}{ulem}',
|
|
|
|
'\\usepackage{ulem}'])
|
|
|
|
|
|
|
|
|
2017-04-05 20:22:47 +00:00
|
|
|
def convert_mathindent(document):
|
2018-01-23 13:01:30 +00:00
|
|
|
"""Add the \\is_math_indent tag.
|
|
|
|
"""
|
|
|
|
k = find_token(document.header, "\\quotes_style") # where to insert
|
2017-04-13 00:31:26 +00:00
|
|
|
# check if the document uses the class option "fleqn"
|
2018-01-23 13:01:30 +00:00
|
|
|
options = get_value(document.header, "\\options")
|
|
|
|
if 'fleqn' in options:
|
2017-04-13 00:31:26 +00:00
|
|
|
document.header.insert(k, "\\is_math_indent 1")
|
2018-01-23 13:01:30 +00:00
|
|
|
# delete the fleqn option
|
|
|
|
i = find_token(document.header, "\\options")
|
|
|
|
options = [option for option in options.split(",")
|
|
|
|
if option.strip() != "fleqn"]
|
|
|
|
if options:
|
|
|
|
document.header[i] = "\\options " + ",".join(options)
|
|
|
|
else:
|
2017-04-13 00:31:26 +00:00
|
|
|
del document.header[i]
|
|
|
|
else:
|
|
|
|
document.header.insert(k, "\\is_math_indent 0")
|
2017-04-05 20:22:47 +00:00
|
|
|
|
|
|
|
def revert_mathindent(document):
|
|
|
|
" Define mathindent if set in the document "
|
2018-01-23 13:01:30 +00:00
|
|
|
# emulate and delete \math_indentation
|
|
|
|
value = get_value(document.header, "\\math_indentation",
|
|
|
|
default="default", delete=True)
|
|
|
|
if value != "default":
|
|
|
|
add_to_preamble(document, [r"\setlength{\mathindent}{%s}"%value])
|
|
|
|
# delete \is_math_indent and emulate via document class option
|
|
|
|
if not get_bool_value(document.header, "\\is_math_indent", delete=True):
|
|
|
|
return
|
|
|
|
i = find_token(document.header, "\\options")
|
2017-04-05 20:22:47 +00:00
|
|
|
if i != -1:
|
2018-01-23 13:01:30 +00:00
|
|
|
document.header[i] = document.header[i].replace("\\options ",
|
|
|
|
"\\options fleqn,")
|
2017-04-16 14:24:01 +00:00
|
|
|
else:
|
2018-01-23 13:01:30 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options")
|
|
|
|
document.header.insert(l, "\\options fleqn")
|
2017-04-05 20:22:47 +00:00
|
|
|
|
|
|
|
|
2017-04-08 01:30:21 +00:00
|
|
|
def revert_baselineskip(document):
|
|
|
|
" Revert baselineskips to TeX code "
|
|
|
|
i = 0
|
|
|
|
vspaceLine = 0
|
|
|
|
hspaceLine = 0
|
|
|
|
while True:
|
|
|
|
regexp = re.compile(r'^.*baselineskip%.*$')
|
|
|
|
i = find_re(document.body, regexp, i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
vspaceLine = find_token(document.body, "\\begin_inset VSpace", i)
|
|
|
|
if vspaceLine == i:
|
|
|
|
# output VSpace inset as TeX code
|
|
|
|
# first read out the values
|
|
|
|
beg = document.body[i].rfind("VSpace ");
|
|
|
|
end = document.body[i].rfind("baselineskip%");
|
|
|
|
baselineskip = float(document.body[i][beg + 7:end]);
|
|
|
|
# we store the value in percent, thus divide by 100
|
|
|
|
baselineskip = baselineskip/100;
|
|
|
|
baselineskip = str(baselineskip);
|
|
|
|
# check if it is the starred version
|
|
|
|
if document.body[i].find('*') != -1:
|
|
|
|
star = '*'
|
|
|
|
else:
|
|
|
|
star = ''
|
|
|
|
# now output TeX code
|
|
|
|
endInset = find_end_of_inset(document.body, i)
|
|
|
|
if endInset == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing '\\end_inset' of VSpace inset.")
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
document.body[vspaceLine: endInset + 1] = put_cmd_in_ert("\\vspace" + star + '{' + baselineskip + "\\baselineskip}")
|
|
|
|
hspaceLine = find_token(document.body, "\\begin_inset space \\hspace", i - 1)
|
|
|
|
document.warning("hspaceLine: " + str(hspaceLine))
|
|
|
|
document.warning("i: " + str(i))
|
|
|
|
if hspaceLine == i - 1:
|
|
|
|
# output space inset as TeX code
|
|
|
|
# first read out the values
|
|
|
|
beg = document.body[i].rfind("\\length ");
|
|
|
|
end = document.body[i].rfind("baselineskip%");
|
|
|
|
baselineskip = float(document.body[i][beg + 7:end]);
|
|
|
|
document.warning("baselineskip: " + str(baselineskip))
|
|
|
|
# we store the value in percent, thus divide by 100
|
|
|
|
baselineskip = baselineskip/100;
|
|
|
|
baselineskip = str(baselineskip);
|
|
|
|
# check if it is the starred version
|
|
|
|
if document.body[i-1].find('*') != -1:
|
|
|
|
star = '*'
|
|
|
|
else:
|
|
|
|
star = ''
|
|
|
|
# now output TeX code
|
|
|
|
endInset = find_end_of_inset(document.body, i)
|
|
|
|
if endInset == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing '\\end_inset' of space inset.")
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
document.body[hspaceLine: endInset + 1] = put_cmd_in_ert("\\hspace" + star + '{' + baselineskip + "\\baselineskip}")
|
2017-03-06 13:49:30 +00:00
|
|
|
|
2017-04-08 01:30:21 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2017-04-15 04:57:52 +00:00
|
|
|
def revert_rotfloat(document):
|
|
|
|
" Revert placement options for rotated floats "
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
k = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "sideways true", i)
|
|
|
|
if i != -1:
|
|
|
|
regexp = re.compile(r'^.*placement.*$')
|
|
|
|
j = find_re(document.body, regexp, i-2)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
if j != i-2:
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
return
|
|
|
|
# we found a sideways float with placement options
|
|
|
|
# at first store the placement
|
|
|
|
beg = document.body[i-2].rfind(" ");
|
|
|
|
placement = document.body[i-2][beg+1:]
|
|
|
|
# check if the option'H' is used
|
|
|
|
if placement.find("H") != -1:
|
2017-03-06 13:49:30 +00:00
|
|
|
add_to_preamble(document, ["\\usepackage{float}"])
|
2017-04-15 04:57:52 +00:00
|
|
|
# now check if it is a starred type
|
|
|
|
if document.body[i-1].find("wide true") != -1:
|
|
|
|
star = '*'
|
|
|
|
else:
|
|
|
|
star = ''
|
|
|
|
# store the float type
|
|
|
|
beg = document.body[i-3].rfind(" ");
|
|
|
|
fType = document.body[i-3][beg+1:]
|
|
|
|
# now output TeX code
|
|
|
|
endInset = find_end_of_inset(document.body, i-3)
|
|
|
|
if endInset == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing '\\end_inset' of Float inset.")
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
document.body[endInset-2: endInset+1] = put_cmd_in_ert("\\end{sideways" + fType + star + '}')
|
|
|
|
document.body[i-3: i+2] = put_cmd_in_ert("\\begin{sideways" + fType + star + "}[" + placement + ']')
|
2017-03-06 13:49:30 +00:00
|
|
|
add_to_preamble(document, ["\\usepackage{rotfloat}"])
|
|
|
|
|
2017-04-15 04:57:52 +00:00
|
|
|
i = i + 1
|
|
|
|
|
2017-04-25 00:28:10 +00:00
|
|
|
|
2018-01-23 13:01:30 +00:00
|
|
|
allowbreak_emulation = [r"\begin_inset space \hspace{}",
|
|
|
|
r"\length 0dd",
|
|
|
|
r"\end_inset",
|
|
|
|
r""]
|
|
|
|
|
2017-03-06 13:49:30 +00:00
|
|
|
def convert_allowbreak(document):
|
|
|
|
" Zero widths Space-inset -> \SpecialChar allowbreak. "
|
2018-01-23 13:01:30 +00:00
|
|
|
lines = document.body
|
|
|
|
i = find_complete_lines(lines, allowbreak_emulation, 2)
|
|
|
|
while i != -1:
|
|
|
|
lines[i-1:i+4] = [lines[i-1] + r"\SpecialChar allowbreak"]
|
|
|
|
i = find_complete_lines(lines, allowbreak_emulation, i)
|
2017-03-06 13:49:30 +00:00
|
|
|
|
2017-04-25 00:28:10 +00:00
|
|
|
|
2017-03-06 13:49:30 +00:00
|
|
|
def revert_allowbreak(document):
|
|
|
|
" \SpecialChar allowbreak -> Zero widths Space-inset. "
|
2018-01-23 13:01:30 +00:00
|
|
|
i = 1
|
|
|
|
lines = document.body
|
|
|
|
while i < len(lines):
|
|
|
|
if lines[i].endswith(r"\SpecialChar allowbreak"):
|
|
|
|
lines[i:i+1] = [lines[i].replace(r"\SpecialChar allowbreak", "")
|
|
|
|
] + allowbreak_emulation
|
|
|
|
i += 5
|
|
|
|
else:
|
|
|
|
i += 1
|
2017-03-06 13:49:30 +00:00
|
|
|
|
2017-04-15 04:57:52 +00:00
|
|
|
|
2017-04-25 00:28:10 +00:00
|
|
|
def convert_mathnumberpos(document):
|
2017-05-13 18:39:45 +00:00
|
|
|
" add the \\math_number_before tag "
|
2017-04-25 00:28:10 +00:00
|
|
|
# check if the document uses the class option "leqno"
|
|
|
|
k = find_token(document.header, "\\quotes_style", 0)
|
2017-05-16 23:23:59 +00:00
|
|
|
m = find_token(document.header, "\\options", 0)
|
2017-04-25 00:28:10 +00:00
|
|
|
regexp = re.compile(r'^.*leqno.*')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
2017-05-16 23:23:59 +00:00
|
|
|
if i != -1 and i == m:
|
2017-04-25 00:28:10 +00:00
|
|
|
document.header.insert(k, "\\math_number_before 1")
|
|
|
|
# delete the found option
|
|
|
|
document.header[i] = document.header[i].replace(",leqno", "")
|
|
|
|
document.header[i] = document.header[i].replace(", leqno", "")
|
|
|
|
document.header[i] = document.header[i].replace("leqno,", "")
|
|
|
|
j = find_re(document.header, regexp, 0)
|
|
|
|
if i == j:
|
2017-05-13 18:39:45 +00:00
|
|
|
# then we have leqno as the only option
|
2017-04-25 00:28:10 +00:00
|
|
|
del document.header[i]
|
|
|
|
else:
|
|
|
|
document.header.insert(k, "\\math_number_before 0")
|
|
|
|
|
|
|
|
|
|
|
|
def revert_mathnumberpos(document):
|
|
|
|
" add the document class option leqno"
|
|
|
|
regexp = re.compile(r'(\\math_number_before 1)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
if i == -1:
|
|
|
|
regexp = re.compile(r'(\\math_number_before)')
|
|
|
|
j = find_re(document.header, regexp, 0)
|
|
|
|
del document.header[j]
|
|
|
|
else:
|
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options leqno,")
|
|
|
|
del document.header[i]
|
|
|
|
else:
|
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l, "\\options leqno")
|
|
|
|
del document.header[i + 1]
|
|
|
|
|
|
|
|
|
2017-05-13 18:39:45 +00:00
|
|
|
def convert_mathnumberingname(document):
|
|
|
|
" rename the \\math_number_before tag to \\math_numbering_side "
|
|
|
|
regexp = re.compile(r'(\\math_number_before 1)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\math_numbering_side left"
|
|
|
|
regexp = re.compile(r'(\\math_number_before 0)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\math_numbering_side default"
|
|
|
|
# check if the document uses the class option "reqno"
|
|
|
|
k = find_token(document.header, "\\math_numbering_side", 0)
|
2017-05-16 23:23:59 +00:00
|
|
|
m = find_token(document.header, "\\options", 0)
|
2017-05-13 18:39:45 +00:00
|
|
|
regexp = re.compile(r'^.*reqno.*')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
2017-05-16 23:23:59 +00:00
|
|
|
if i != -1 and i == m:
|
2017-05-13 18:39:45 +00:00
|
|
|
document.header[k] = "\\math_numbering_side right"
|
|
|
|
# delete the found option
|
|
|
|
document.header[i] = document.header[i].replace(",reqno", "")
|
|
|
|
document.header[i] = document.header[i].replace(", reqno", "")
|
|
|
|
document.header[i] = document.header[i].replace("reqno,", "")
|
|
|
|
j = find_re(document.header, regexp, 0)
|
|
|
|
if i == j:
|
|
|
|
# then we have reqno as the only option
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_mathnumberingname(document):
|
|
|
|
" rename the \\math_numbering_side tag back to \\math_number_before "
|
|
|
|
# just rename
|
|
|
|
regexp = re.compile(r'(\\math_numbering_side left)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\math_number_before 1"
|
|
|
|
# add the option reqno and delete the tag
|
|
|
|
regexp = re.compile(r'(\\math_numbering_side right)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\math_number_before 0"
|
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2018-01-23 13:01:30 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options reqno,")
|
2017-05-13 18:39:45 +00:00
|
|
|
else:
|
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l, "\\options reqno")
|
2017-09-30 21:26:02 +00:00
|
|
|
# add the math_number_before tag
|
2017-05-13 18:39:45 +00:00
|
|
|
regexp = re.compile(r'(\\math_numbering_side default)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\math_number_before 0"
|
|
|
|
|
|
|
|
|
2017-06-06 22:55:23 +00:00
|
|
|
def convert_minted(document):
|
|
|
|
" add the \\use_minted tag "
|
2018-01-23 13:01:30 +00:00
|
|
|
i = find_token(document.header, "\\index ")
|
|
|
|
document.header.insert(i, "\\use_minted 0")
|
2017-06-06 22:55:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_minted(document):
|
|
|
|
" remove the \\use_minted tag "
|
|
|
|
i = find_token(document.header, "\\use_minted", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header.pop(i)
|
|
|
|
|
|
|
|
|
2016-06-03 05:40:21 +00:00
|
|
|
##
|
|
|
|
# Conversion hub
|
|
|
|
#
|
|
|
|
|
|
|
|
supported_versions = ["2.3.0", "2.3"]
|
|
|
|
convert = [
|
2016-06-19 19:23:25 +00:00
|
|
|
[509, [convert_microtype]],
|
2016-07-12 03:56:32 +00:00
|
|
|
[510, [convert_dateinset]],
|
2016-08-04 09:42:06 +00:00
|
|
|
[511, [convert_ibranches]],
|
2016-10-16 13:33:23 +00:00
|
|
|
[512, [convert_beamer_article_styles]],
|
2016-10-22 13:33:59 +00:00
|
|
|
[513, []],
|
2016-10-27 22:21:58 +00:00
|
|
|
[514, []],
|
2016-12-07 17:38:41 +00:00
|
|
|
[515, []],
|
|
|
|
[516, [convert_inputenc]],
|
2016-12-14 02:49:04 +00:00
|
|
|
[517, []],
|
2016-12-20 17:54:07 +00:00
|
|
|
[518, [convert_iopart]],
|
2016-12-21 14:17:30 +00:00
|
|
|
[519, [convert_quotestyle]],
|
2016-12-24 13:27:00 +00:00
|
|
|
[520, []],
|
2016-12-25 11:19:02 +00:00
|
|
|
[521, [convert_frenchquotes]],
|
2016-12-26 13:03:48 +00:00
|
|
|
[522, []],
|
2016-12-29 15:45:19 +00:00
|
|
|
[523, []],
|
2016-06-18 22:38:24 +00:00
|
|
|
[524, []],
|
2016-06-18 23:29:15 +00:00
|
|
|
[525, []],
|
2017-01-07 19:39:03 +00:00
|
|
|
[526, []],
|
2017-01-08 08:39:46 +00:00
|
|
|
[527, []],
|
2017-01-13 10:53:22 +00:00
|
|
|
[528, []],
|
2017-01-13 17:23:42 +00:00
|
|
|
[529, []],
|
Support for "qualified citation lists"
These are biblatex-specific multicite commands that allow for multiple
pre- and postnotes, as in:
\cites(pre)(post)[pre1][post1]{key1}[pre2][post2]{key2}...
with an optional general pre- and postnote, which applies to the whole
list (like [][] in normal cite commands) and an optional pre- and
postnotes for each item, so that pagination can actually be specified in
multi-cite references, as in:
(cf. Miller 2015, 2; furthermore Smith 2013, 23-23; Jenkins 2012, 103,
also refer to chapter 6 in this book)
See the biblatex manual, sec. 3.8.3., for details.
File format change.
2017-01-21 13:25:17 +00:00
|
|
|
[530, []],
|
2017-01-30 06:44:55 +00:00
|
|
|
[531, []],
|
2017-02-04 11:02:00 +00:00
|
|
|
[532, [convert_literalparam]],
|
|
|
|
[533, []],
|
2017-03-19 19:50:34 +00:00
|
|
|
[534, []],
|
2017-04-04 21:02:47 +00:00
|
|
|
[535, [convert_dashligatures]],
|
2017-04-04 22:01:19 +00:00
|
|
|
[536, []],
|
2017-04-05 20:22:47 +00:00
|
|
|
[537, []],
|
2017-04-08 01:30:21 +00:00
|
|
|
[538, [convert_mathindent]],
|
2017-04-15 04:57:52 +00:00
|
|
|
[539, []],
|
2017-03-06 13:49:30 +00:00
|
|
|
[540, []],
|
|
|
|
[541, [convert_allowbreak]],
|
2017-05-13 18:39:45 +00:00
|
|
|
[542, [convert_mathnumberpos]],
|
2017-06-06 22:55:23 +00:00
|
|
|
[543, [convert_mathnumberingname]],
|
|
|
|
[544, [convert_minted]]
|
2017-04-25 00:28:10 +00:00
|
|
|
]
|
2016-06-03 05:40:21 +00:00
|
|
|
|
|
|
|
revert = [
|
2017-06-06 22:55:23 +00:00
|
|
|
[543, [revert_minted]],
|
2017-05-13 18:39:45 +00:00
|
|
|
[542, [revert_mathnumberingname]],
|
2017-04-25 00:28:10 +00:00
|
|
|
[541, [revert_mathnumberpos]],
|
2017-03-06 13:49:30 +00:00
|
|
|
[540, [revert_allowbreak]],
|
2017-04-15 04:57:52 +00:00
|
|
|
[539, [revert_rotfloat]],
|
2017-04-08 01:30:21 +00:00
|
|
|
[538, [revert_baselineskip]],
|
2017-04-05 20:22:47 +00:00
|
|
|
[537, [revert_mathindent]],
|
2017-04-04 22:01:19 +00:00
|
|
|
[536, [revert_xout]],
|
2017-04-04 21:02:47 +00:00
|
|
|
[535, [revert_noto]],
|
2017-03-19 19:50:34 +00:00
|
|
|
[534, [revert_dashligatures]],
|
2017-02-04 18:23:46 +00:00
|
|
|
[533, [revert_chapterbib]],
|
2017-02-04 11:02:00 +00:00
|
|
|
[532, [revert_multibib]],
|
2017-01-30 06:44:55 +00:00
|
|
|
[531, [revert_literalparam]],
|
Support for "qualified citation lists"
These are biblatex-specific multicite commands that allow for multiple
pre- and postnotes, as in:
\cites(pre)(post)[pre1][post1]{key1}[pre2][post2]{key2}...
with an optional general pre- and postnote, which applies to the whole
list (like [][] in normal cite commands) and an optional pre- and
postnotes for each item, so that pagination can actually be specified in
multi-cite references, as in:
(cf. Miller 2015, 2; furthermore Smith 2013, 23-23; Jenkins 2012, 103,
also refer to chapter 6 in this book)
See the biblatex manual, sec. 3.8.3., for details.
File format change.
2017-01-21 13:25:17 +00:00
|
|
|
[530, [revert_qualicites]],
|
2017-01-13 17:23:42 +00:00
|
|
|
[529, [revert_bibpackopts]],
|
2017-01-13 10:53:22 +00:00
|
|
|
[528, [revert_citekeyonly]],
|
2017-01-08 08:39:46 +00:00
|
|
|
[527, [revert_biblatex]],
|
2017-01-07 19:39:03 +00:00
|
|
|
[526, [revert_noprefix]],
|
2016-06-18 23:29:15 +00:00
|
|
|
[525, [revert_plural_refs]],
|
2016-06-18 22:38:24 +00:00
|
|
|
[524, [revert_labelonly]],
|
2016-12-29 15:45:19 +00:00
|
|
|
[523, [revert_crimson, revert_cochinealmath]],
|
2016-12-26 13:03:48 +00:00
|
|
|
[522, [revert_cjkquotes]],
|
2016-12-25 11:19:02 +00:00
|
|
|
[521, [revert_dynamicquotes]],
|
2016-12-24 13:27:00 +00:00
|
|
|
[520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
|
2016-12-21 14:17:30 +00:00
|
|
|
[519, [revert_plainquote]],
|
2016-12-20 17:54:07 +00:00
|
|
|
[518, [revert_quotestyle]],
|
2016-12-14 02:49:04 +00:00
|
|
|
[517, [revert_iopart]],
|
2016-12-10 10:53:42 +00:00
|
|
|
[516, [revert_quotes]],
|
|
|
|
[515, []],
|
2016-10-27 22:21:58 +00:00
|
|
|
[514, [revert_urdu, revert_syriac]],
|
2016-10-22 13:33:59 +00:00
|
|
|
[513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
|
2016-10-16 13:33:23 +00:00
|
|
|
[512, [revert_bosnian, revert_friulan, revert_macedonian, revert_piedmontese, revert_romansh]],
|
2016-08-04 09:42:06 +00:00
|
|
|
[511, [revert_beamer_article_styles]],
|
2016-07-12 03:56:32 +00:00
|
|
|
[510, [revert_ibranches]],
|
2016-06-19 19:23:25 +00:00
|
|
|
[509, []],
|
2016-06-03 05:40:21 +00:00
|
|
|
[508, [revert_microtype]]
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|