2016-06-03 05:40:21 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# This file is part of lyx2lyx
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright (C) 2016 The LyX team
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
|
|
|
|
""" Convert files to the file format generated by lyx 2.3"""
|
|
|
|
|
|
|
|
import re, string
|
|
|
|
import unicodedata
|
|
|
|
import sys, os
|
|
|
|
|
|
|
|
# Uncomment only what you need to import, please.
|
|
|
|
|
2016-06-18 22:38:24 +00:00
|
|
|
from parser_tools import find_end_of, find_token_backwards, find_end_of_layout, \
|
|
|
|
find_token, find_end_of_inset, get_value, get_bool_value, \
|
2016-06-18 23:29:15 +00:00
|
|
|
get_containing_layout, get_quoted_value, del_token
|
|
|
|
# find_tokens, find_token_exact, is_in_inset, \
|
|
|
|
# check_token, get_option_value
|
2016-06-17 19:11:53 +00:00
|
|
|
|
2016-10-22 13:33:59 +00:00
|
|
|
from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert
|
|
|
|
# get_ert, lyx2latex, \
|
2016-06-03 05:40:21 +00:00
|
|
|
# lyx2verbatim, length_in_bp, convert_info_insets
|
|
|
|
# insert_to_preamble, latex_length, revert_flex_inset, \
|
|
|
|
# revert_font_attrs, hex2ratio, str2bool
|
|
|
|
|
|
|
|
####################################################################
|
|
|
|
# Private helper functions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
###############################################################################
|
|
|
|
###
|
|
|
|
### Conversion and reversion routines
|
|
|
|
###
|
|
|
|
###############################################################################
|
|
|
|
|
2016-06-17 19:11:53 +00:00
|
|
|
def convert_microtype(document):
|
|
|
|
" Add microtype settings. "
|
|
|
|
i = find_token(document.header, "\\font_tt_scale" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find \\font_tt_scale.")
|
2016-07-13 14:01:17 +00:00
|
|
|
i = len(document.header) - 1
|
|
|
|
|
2016-06-17 19:11:53 +00:00
|
|
|
j = find_token(document.preamble, "\\usepackage{microtype}", 0)
|
|
|
|
if j == -1:
|
2016-07-13 14:01:17 +00:00
|
|
|
document.header.insert(i + 1, "\\use_microtype false")
|
2016-06-17 19:11:53 +00:00
|
|
|
else:
|
2016-07-13 14:01:17 +00:00
|
|
|
document.header.insert(i + 1, "\\use_microtype true")
|
2016-06-17 19:11:53 +00:00
|
|
|
del document.preamble[j]
|
|
|
|
|
|
|
|
|
2016-06-03 05:40:21 +00:00
|
|
|
def revert_microtype(document):
|
|
|
|
" Remove microtype settings. "
|
|
|
|
i = find_token(document.header, "\\use_microtype", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2016-07-13 14:01:17 +00:00
|
|
|
use_microtype = get_bool_value(document.header, "\\use_microtype" , i)
|
2016-06-03 05:40:21 +00:00
|
|
|
del document.header[i]
|
2016-07-13 14:01:17 +00:00
|
|
|
if use_microtype:
|
2016-06-17 19:11:53 +00:00
|
|
|
add_to_preamble(document, ["\\usepackage{microtype}"])
|
2016-06-03 05:40:21 +00:00
|
|
|
|
|
|
|
|
2016-06-19 19:23:25 +00:00
|
|
|
def convert_dateinset(document):
|
|
|
|
' Convert date external inset to ERT '
|
|
|
|
i = 0
|
2016-06-25 21:37:13 +00:00
|
|
|
while True:
|
2016-06-19 19:23:25 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset External", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed lyx document: Missing '\\end_inset' in convert_dateinset.")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
if get_value(document.body, 'template', i, j) == "Date":
|
|
|
|
document.body[i : j + 1] = put_cmd_in_ert("\\today ")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
2016-12-07 17:38:41 +00:00
|
|
|
def convert_inputenc(document):
|
|
|
|
" Replace no longer supported input encoding settings. "
|
|
|
|
i = find_token(document.header, "\\inputenc", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if get_value(document.header, "\\inputencoding", i) == "pt254":
|
|
|
|
document.header[i] = "\\inputencoding pt154"
|
|
|
|
|
|
|
|
|
2016-07-12 03:56:32 +00:00
|
|
|
def convert_ibranches(document):
|
|
|
|
' Add "inverted 0" to branch insets'
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Branch", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body.insert(i + 1, "inverted 0")
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_ibranches(document):
|
|
|
|
' Convert inverted branches to explicit anti-branches'
|
|
|
|
# Get list of branches
|
|
|
|
ourbranches = {}
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.header, "\\branch", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
branch = document.header[i][8:].strip()
|
|
|
|
if document.header[i+1].startswith("\\selected "):
|
|
|
|
#document.warning(document.header[i+1])
|
|
|
|
#document.warning(document.header[i+1][10])
|
|
|
|
selected = int(document.header[i+1][10])
|
|
|
|
else:
|
|
|
|
document.warning("Malformed LyX document: No selection indicator for branch " + branch)
|
|
|
|
selected = 1
|
|
|
|
|
|
|
|
# the value tells us whether the branch is selected
|
|
|
|
ourbranches[document.header[i][8:].strip()] = selected
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# Figure out what inverted branches, if any, have been used
|
|
|
|
# and convert them to "Anti-OldBranch"
|
|
|
|
ibranches = {}
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Branch", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
if not document.body[i+1].startswith("inverted "):
|
|
|
|
document.warning("Malformed LyX document: Missing 'inverted' tag!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
inverted = document.body[i+1][9]
|
|
|
|
#document.warning(document.body[i+1])
|
|
|
|
|
|
|
|
if inverted == "1":
|
|
|
|
branch = document.body[i][20:].strip()
|
|
|
|
#document.warning(branch)
|
|
|
|
if not branch in ibranches:
|
|
|
|
antibranch = "Anti-" + branch
|
|
|
|
while antibranch in ibranches:
|
|
|
|
antibranch = "x" + antibranch
|
|
|
|
ibranches[branch] = antibranch
|
|
|
|
else:
|
|
|
|
antibranch = ibranches[branch]
|
|
|
|
#document.warning(antibranch)
|
|
|
|
document.body[i] = "\\begin_inset Branch " + antibranch
|
|
|
|
|
|
|
|
# remove "inverted" key
|
|
|
|
del document.body[i+1]
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# now we need to add the new branches to the header
|
|
|
|
for old, new in ibranches.iteritems():
|
|
|
|
i = find_token(document.header, "\\branch " + old, 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Can't find branch %s even though we found it before!" % (old))
|
|
|
|
continue
|
|
|
|
j = find_token(document.header, "\\end_branch", i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document! Can't find end of branch " + old)
|
|
|
|
continue
|
|
|
|
# ourbranches[old] - 1 inverts the selection status of the old branch
|
|
|
|
lines = ["\\branch " + new,
|
|
|
|
"\\selected " + str(ourbranches[old] - 1)]
|
|
|
|
# these are the old lines telling us color, etc.
|
|
|
|
lines += document.header[i+2 : j+1]
|
|
|
|
document.header[i:i] = lines
|
2016-08-04 09:42:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_beamer_article_styles(document):
|
|
|
|
" Include (scr)article styles in beamer article "
|
|
|
|
|
|
|
|
beamer_articles = ["article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_articles:
|
|
|
|
return
|
|
|
|
|
|
|
|
inclusion = "article.layout"
|
|
|
|
if document.textclass == "scrarticle-beamer":
|
|
|
|
inclusion = "scrartcl.layout"
|
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
k = find_token(document.header, "\\language", 0)
|
|
|
|
if k == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
|
|
return
|
|
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
|
|
i = k - 1
|
|
|
|
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document: Can't find end of local layout!")
|
2016-08-04 09:42:06 +00:00
|
|
|
return
|
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
document.header[i+1 : i+1] = [
|
|
|
|
"### Inserted by lyx2lyx (more [scr]article styles) ###",
|
|
|
|
"Input " + inclusion,
|
|
|
|
"Input beamer.layout",
|
|
|
|
"Provides geometry 0",
|
|
|
|
"Provides hyperref 0",
|
|
|
|
"DefaultFont",
|
|
|
|
" Family Roman",
|
|
|
|
" Series Medium",
|
|
|
|
" Shape Up",
|
|
|
|
" Size Normal",
|
|
|
|
" Color None",
|
|
|
|
"EndFont",
|
|
|
|
"Preamble",
|
|
|
|
" \\usepackage{beamerarticle,pgf}",
|
|
|
|
" % this default might be overridden by plain title style",
|
|
|
|
" \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
|
|
|
|
" \\AtBeginDocument{",
|
|
|
|
" \\let\\origtableofcontents=\\tableofcontents",
|
|
|
|
" \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
|
|
|
|
" \\def\\gobbletableofcontents#1{\\origtableofcontents}",
|
|
|
|
" }",
|
|
|
|
"EndPreamble",
|
|
|
|
"### End of insertion by lyx2lyx (more [scr]article styles) ###"
|
|
|
|
]
|
|
|
|
|
2016-08-04 09:42:06 +00:00
|
|
|
|
|
|
|
def convert_beamer_article_styles(document):
|
|
|
|
" Remove included (scr)article styles in beamer article "
|
|
|
|
|
|
|
|
beamer_articles = ["article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_articles:
|
|
|
|
return
|
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2016-08-04 09:42:06 +00:00
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document: Can't find end of local layout!")
|
|
|
|
return
|
2016-08-04 09:42:06 +00:00
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
|
|
|
|
if k != -1:
|
|
|
|
l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
|
|
|
|
if l == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("End of lyx2lyx local layout insertion not found!")
|
|
|
|
return
|
2016-08-04 09:42:06 +00:00
|
|
|
|
2016-12-14 03:03:59 +00:00
|
|
|
if k == i + 1 and l == j - 1:
|
|
|
|
# that was all the local layout there was
|
|
|
|
document.header[i : j + 1] = []
|
|
|
|
else:
|
2016-08-04 09:42:06 +00:00
|
|
|
document.header[k : l + 1] = []
|
|
|
|
|
2016-06-03 05:40:21 +00:00
|
|
|
|
2016-10-16 13:33:23 +00:00
|
|
|
def revert_bosnian(document):
|
|
|
|
"Set the document language to English but assure Bosnian output"
|
|
|
|
|
|
|
|
if document.language == "bosnian":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language bosnian", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options bosnian,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options bosnian")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_friulan(document):
|
|
|
|
"Set the document language to English but assure Friulan output"
|
|
|
|
|
|
|
|
if document.language == "friulan":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language friulan", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options friulan,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options friulan")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_macedonian(document):
|
|
|
|
"Set the document language to English but assure Macedonian output"
|
|
|
|
|
|
|
|
if document.language == "macedonian":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language macedonian", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options macedonian,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options macedonian")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_piedmontese(document):
|
|
|
|
"Set the document language to English but assure Piedmontese output"
|
|
|
|
|
|
|
|
if document.language == "piedmontese":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language piedmontese", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options piedmontese,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options piedmontese")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_romansh(document):
|
|
|
|
"Set the document language to English but assure Romansh output"
|
|
|
|
|
|
|
|
if document.language == "romansh":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language romansh", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-16 13:33:23 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package babel"
|
2016-10-16 13:33:23 +00:00
|
|
|
k = find_token(document.header, "\\options", 0)
|
|
|
|
if k != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[k] = document.header[k].replace("\\options", "\\options romansh,")
|
2016-10-16 13:33:23 +00:00
|
|
|
else:
|
2016-12-07 17:38:41 +00:00
|
|
|
l = find_token(document.header, "\\use_default_options", 0)
|
|
|
|
document.header.insert(l + 1, "\\options romansh")
|
2016-10-16 13:33:23 +00:00
|
|
|
|
|
|
|
|
2016-10-22 13:33:59 +00:00
|
|
|
def revert_amharic(document):
|
|
|
|
"Set the document language to English but assure Amharic output"
|
|
|
|
|
|
|
|
if document.language == "amharic":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language amharic", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-22 13:33:59 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-22 13:33:59 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{amharic}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{amharic}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_asturian(document):
|
|
|
|
"Set the document language to English but assure Asturian output"
|
|
|
|
|
|
|
|
if document.language == "asturian":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language asturian", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-22 13:33:59 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-22 13:33:59 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{asturian}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{asturian}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_kannada(document):
|
|
|
|
"Set the document language to English but assure Kannada output"
|
|
|
|
|
|
|
|
if document.language == "kannada":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language kannada", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-22 13:33:59 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-22 13:33:59 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{kannada}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{kannada}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_khmer(document):
|
|
|
|
"Set the document language to English but assure Khmer output"
|
|
|
|
|
|
|
|
if document.language == "khmer":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language khmer", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-22 13:33:59 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-22 13:33:59 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{khmer}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{khmer}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
2016-10-27 22:21:58 +00:00
|
|
|
def revert_urdu(document):
|
|
|
|
"Set the document language to English but assure Urdu output"
|
|
|
|
|
|
|
|
if document.language == "urdu":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language urdu", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-27 22:21:58 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-27 22:21:58 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{urdu}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{urdu}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_syriac(document):
|
|
|
|
"Set the document language to English but assure Syriac output"
|
|
|
|
|
|
|
|
if document.language == "syriac":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language syriac", 0)
|
|
|
|
if i != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[i] = "\\language english"
|
2016-10-27 22:21:58 +00:00
|
|
|
j = find_token(document.header, "\\language_package default", 0)
|
|
|
|
if j != -1:
|
2016-12-07 17:38:41 +00:00
|
|
|
document.header[j] = "\\language_package default"
|
2016-10-27 22:21:58 +00:00
|
|
|
add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{syriac}}"])
|
|
|
|
document.body[2 : 2] = ["\\begin_layout Standard",
|
|
|
|
"\\begin_inset ERT", "status open", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "",
|
|
|
|
"\\backslash",
|
|
|
|
"resetdefaultlanguage{syriac}",
|
|
|
|
"\\end_layout", "", "\\end_inset", "", "",
|
|
|
|
"\\end_layout", ""]
|
|
|
|
|
|
|
|
|
2016-12-10 10:53:42 +00:00
|
|
|
def revert_quotes(document):
|
|
|
|
" Revert Quote Insets in verbatim or Hebrew context to plain quotes "
|
|
|
|
|
|
|
|
# First handle verbatim insets
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while i < len(document.body):
|
|
|
|
words = document.body[i].split()
|
|
|
|
if len(words) > 1 and words[0] == "\\begin_inset" and \
|
2016-12-12 09:49:08 +00:00
|
|
|
( words[1] in ["ERT", "listings"] or ( len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"]) ):
|
2016-12-10 10:53:42 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes', i, j)
|
|
|
|
if k == -1:
|
|
|
|
i += 1
|
|
|
|
break
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
2016-12-10 11:54:12 +00:00
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
replace = "\""
|
|
|
|
if document.body[k].endswith("s"):
|
|
|
|
replace = "'"
|
|
|
|
document.body[k:l+1] = [replace]
|
|
|
|
else:
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Now verbatim layouts
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while i < len(document.body):
|
|
|
|
words = document.body[i].split()
|
|
|
|
if len(words) > 1 and words[0] == "\\begin_layout" and \
|
|
|
|
words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]:
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of " + words[1] + " layout at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes', i, j)
|
|
|
|
if k == -1:
|
|
|
|
i += 1
|
|
|
|
break
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
2016-12-10 10:53:42 +00:00
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
replace = "\""
|
|
|
|
if document.body[k].endswith("s"):
|
|
|
|
replace = "'"
|
|
|
|
document.body[k:l+1] = [replace]
|
|
|
|
else:
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Now handle Hebrew
|
2016-12-13 06:30:40 +00:00
|
|
|
if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
|
|
|
|
return
|
|
|
|
|
2016-12-10 10:53:42 +00:00
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
2016-12-12 09:49:08 +00:00
|
|
|
k = find_token(document.body, '\\begin_inset Quotes', i)
|
2016-12-10 10:53:42 +00:00
|
|
|
if k == -1:
|
|
|
|
return
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
hebrew = False
|
|
|
|
parent = get_containing_layout(document.body, k)
|
|
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
|
|
if ql == -1 or ql < parent[1]:
|
|
|
|
hebrew = document.language == "hebrew"
|
|
|
|
elif document.body[ql] == "\\lang hebrew":
|
|
|
|
hebrew = True
|
|
|
|
if hebrew:
|
|
|
|
replace = "\""
|
|
|
|
if document.body[k].endswith("s"):
|
|
|
|
replace = "'"
|
|
|
|
document.body[k:l+1] = [replace]
|
2016-12-12 09:49:08 +00:00
|
|
|
i = l
|
2016-12-10 10:53:42 +00:00
|
|
|
|
|
|
|
|
2016-12-14 02:49:04 +00:00
|
|
|
def revert_iopart(document):
|
|
|
|
" Input new styles via local layout "
|
|
|
|
if document.textclass != "iopart":
|
|
|
|
return
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
k = find_token(document.header, "\\language", 0)
|
|
|
|
if k == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! No \\language header found!")
|
|
|
|
return
|
|
|
|
document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
|
|
|
|
i = k-1
|
|
|
|
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
|
|
return
|
|
|
|
|
|
|
|
document.header[i+1 : i+1] = [
|
|
|
|
"### Inserted by lyx2lyx (stdlayouts) ###",
|
|
|
|
"Input stdlayouts.inc",
|
|
|
|
"### End of insertion by lyx2lyx (stdlayouts) ###"
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def convert_iopart(document):
|
|
|
|
" Remove local layout we added, if it is there "
|
|
|
|
if document.textclass != "iopart":
|
|
|
|
return
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("Malformed LyX document! Can't find end of local layout!")
|
|
|
|
return
|
|
|
|
|
|
|
|
k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
|
|
|
|
if k != -1:
|
|
|
|
l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
|
|
|
|
if l == -1:
|
|
|
|
# this should not happen
|
|
|
|
document.warning("End of lyx2lyx local layout insertion not found!")
|
|
|
|
return
|
|
|
|
if k == i + 1 and l == j - 1:
|
|
|
|
# that was all the local layout there was
|
|
|
|
document.header[i : j + 1] = []
|
|
|
|
else:
|
|
|
|
document.header[k : l + 1] = []
|
|
|
|
|
2016-12-10 10:53:42 +00:00
|
|
|
|
2016-12-20 17:54:07 +00:00
|
|
|
def convert_quotestyle(document):
|
|
|
|
" Convert \\quotes_language to \\quotes_style "
|
|
|
|
i = find_token(document.header, "\\quotes_language", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document! Can't find \\quotes_language!")
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\quotes_language", i)
|
|
|
|
document.header[i] = "\\quotes_style " + val
|
|
|
|
|
|
|
|
|
|
|
|
def revert_quotestyle(document):
|
|
|
|
" Revert \\quotes_style to \\quotes_language "
|
|
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document! Can't find \\quotes_style!")
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\quotes_style", i)
|
|
|
|
document.header[i] = "\\quotes_language " + val
|
|
|
|
|
|
|
|
|
2016-12-21 14:17:30 +00:00
|
|
|
def revert_plainquote(document):
|
2016-12-24 13:27:00 +00:00
|
|
|
" Revert plain quote insets "
|
2016-12-21 14:17:30 +00:00
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style plain", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes q', i)
|
|
|
|
if k == -1:
|
|
|
|
return
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
replace = "\""
|
|
|
|
if document.body[k].endswith("s"):
|
|
|
|
replace = "'"
|
|
|
|
document.body[k:l+1] = [replace]
|
|
|
|
i = l
|
|
|
|
|
|
|
|
|
2016-12-24 13:27:00 +00:00
|
|
|
def convert_frenchquotes(document):
|
|
|
|
" Convert french quote insets to swiss "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style french", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style swiss"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes f', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val.replace("f", "c", 1)
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_swissquotes(document):
|
|
|
|
" Revert swiss quote insets to french "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style swiss", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes c', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val.replace("c", "f", 1)
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_britishquotes(document):
|
|
|
|
" Revert british quote insets to english "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style british", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes b', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val.replace("b", "e", 1)
|
|
|
|
if val[2] == "d":
|
|
|
|
# opening mark
|
|
|
|
newval = newval.replace("d", "s")
|
|
|
|
else:
|
2016-12-26 13:03:48 +00:00
|
|
|
# closing mark
|
2016-12-24 13:27:00 +00:00
|
|
|
newval = newval.replace("s", "d")
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_swedishgquotes(document):
|
|
|
|
" Revert swedish quote insets "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style swedishg", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style danish"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes w', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
if val[2] == "d":
|
|
|
|
# outer marks
|
|
|
|
newval = val.replace("w", "a", 1).replace("r", "l")
|
|
|
|
else:
|
|
|
|
# inner marks
|
|
|
|
newval = val.replace("w", "s", 1)
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_frenchquotes(document):
|
|
|
|
" Revert french inner quote insets "
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes f', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
newval = val.replace("f", "e", 1).replace("s", "d")
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_frenchinquotes(document):
|
|
|
|
" Revert inner frenchin quote insets "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style frenchin", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes i', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val.replace("i", "f", 1)
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
newval = newval.replace("s", "d")
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_russianquotes(document):
|
|
|
|
" Revert russian quote insets "
|
|
|
|
|
|
|
|
# First, revert style setting
|
|
|
|
i = find_token(document.header, "\\quotes_style russian", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\quotes_style french"
|
|
|
|
|
|
|
|
# now the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes r', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
newval = val
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
newval = val.replace("r", "g", 1).replace("s", "d")
|
|
|
|
else:
|
|
|
|
# outer marks
|
|
|
|
newval = val.replace("r", "f", 1)
|
|
|
|
document.body[i] = document.body[i].replace(val, newval)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2016-12-25 11:19:02 +00:00
|
|
|
def revert_dynamicquotes(document):
|
|
|
|
" Revert dynamic quote insets "
|
|
|
|
|
|
|
|
# First, revert header
|
|
|
|
i = find_token(document.header, "\\dynamic_quotes", 0)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
# Get global style
|
|
|
|
style = "english"
|
|
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document! Missing \\quotes_style")
|
|
|
|
else:
|
|
|
|
style = get_value(document.header, "\\quotes_style", i)
|
|
|
|
|
2016-12-26 13:03:48 +00:00
|
|
|
s = "e"
|
2016-12-25 11:19:02 +00:00
|
|
|
if style == "english":
|
|
|
|
s = "e"
|
|
|
|
elif style == "swedish":
|
|
|
|
s = "s"
|
|
|
|
elif style == "german":
|
|
|
|
s = "g"
|
|
|
|
elif style == "polish":
|
|
|
|
s = "p"
|
|
|
|
elif style == "swiss":
|
|
|
|
s = "c"
|
|
|
|
elif style == "danish":
|
|
|
|
s = "a"
|
|
|
|
elif style == "plain":
|
|
|
|
s = "q"
|
|
|
|
elif style == "british":
|
|
|
|
s = "b"
|
|
|
|
elif style == "swedishg":
|
|
|
|
s = "w"
|
|
|
|
elif style == "french":
|
|
|
|
s = "f"
|
|
|
|
elif style == "frenchin":
|
|
|
|
s = "i"
|
|
|
|
elif style == "russian":
|
|
|
|
s = "r"
|
|
|
|
|
|
|
|
# now transform the insets
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes x', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = document.body[i].replace("x", s)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2016-12-26 13:03:48 +00:00
|
|
|
def revert_cjkquotes(document):
|
|
|
|
" Revert cjk quote insets "
|
|
|
|
|
|
|
|
# Get global style
|
|
|
|
style = "english"
|
|
|
|
i = find_token(document.header, "\\quotes_style", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document! Missing \\quotes_style")
|
|
|
|
else:
|
|
|
|
style = get_value(document.header, "\\quotes_style", i)
|
|
|
|
|
|
|
|
global_cjk = style.find("cjk") != -1
|
|
|
|
|
|
|
|
if global_cjk:
|
|
|
|
document.header[i] = "\\quotes_style english"
|
|
|
|
# transform dynamic insets
|
|
|
|
s = "j"
|
|
|
|
if style == "cjkangle":
|
|
|
|
s = "k"
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Quotes x', i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
document.body[i] = document.body[i].replace("x", s)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes j', i)
|
|
|
|
if k == -1:
|
|
|
|
break
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
cjk = False
|
|
|
|
parent = get_containing_layout(document.body, k)
|
|
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
|
|
if ql == -1 or ql < parent[1]:
|
|
|
|
cjk = document.language in cjk_langs
|
|
|
|
elif document.body[ql].split()[1] in cjk_langs:
|
|
|
|
cjk = True
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
replace = []
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
if val[1] == "l":
|
|
|
|
# inner opening mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300E"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# inner closing mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300F"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# outer marks
|
|
|
|
if val[1] == "l":
|
|
|
|
# outer opening mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300C"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# outer closing mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300D"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
|
|
|
|
|
|
|
|
document.body[k:l+1] = replace
|
|
|
|
i = l
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, '\\begin_inset Quotes k', i)
|
|
|
|
if k == -1:
|
|
|
|
return
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
|
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
cjk = False
|
|
|
|
parent = get_containing_layout(document.body, k)
|
|
|
|
ql = find_token_backwards(document.body, "\\lang", k)
|
|
|
|
if ql == -1 or ql < parent[1]:
|
|
|
|
cjk = document.language in cjk_langs
|
|
|
|
elif document.body[ql].split()[1] in cjk_langs:
|
|
|
|
cjk = True
|
|
|
|
val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
|
|
|
|
replace = []
|
|
|
|
if val[2] == "s":
|
|
|
|
# inner marks
|
|
|
|
if val[1] == "l":
|
|
|
|
# inner opening mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u3008"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# inner closing mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u3009"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
|
|
|
|
else:
|
|
|
|
# outer marks
|
|
|
|
if val[1] == "l":
|
|
|
|
# outer opening mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300A"]
|
|
|
|
else:
|
2017-01-08 17:37:08 +00:00
|
|
|
replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
|
2016-12-26 13:03:48 +00:00
|
|
|
else:
|
|
|
|
# outer closing mark
|
|
|
|
if cjk:
|
|
|
|
replace = [u"\u300B"]
|
|
|
|
else:
|
|
|
|
replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
|
|
|
|
|
|
|
|
document.body[k:l+1] = replace
|
|
|
|
i = l
|
|
|
|
|
|
|
|
|
2016-12-29 15:45:19 +00:00
|
|
|
def revert_crimson(document):
|
|
|
|
" Revert native Cochineal/Crimson font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
preamble = ""
|
|
|
|
i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
|
|
|
|
if i != -1:
|
|
|
|
osf = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
osf = True
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
if osf:
|
|
|
|
document.header[j] = "\\font_osf false"
|
|
|
|
preamble += "[proportional,osf]"
|
|
|
|
preamble += "{cochineal}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = document.header[i].replace("cochineal", "default")
|
|
|
|
|
|
|
|
|
|
|
|
def revert_cochinealmath(document):
|
|
|
|
" Revert cochineal newtxmath definitions to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
|
|
|
|
if i != -1:
|
|
|
|
add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
|
|
|
|
document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
|
|
|
|
|
|
|
|
|
2016-06-18 22:38:24 +00:00
|
|
|
def revert_labelonly(document):
|
|
|
|
" Revert labelonly tag for InsetRef "
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of reference inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
k = find_token(document.body, "LatexCommand labelonly", i, j)
|
|
|
|
if k == -1:
|
|
|
|
i = j
|
|
|
|
continue
|
2016-06-18 23:29:15 +00:00
|
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
|
|
if not label:
|
2016-06-18 22:38:24 +00:00
|
|
|
document.warning("Can't find label for reference at line %d!" %(i))
|
|
|
|
i = j + 1
|
|
|
|
continue
|
|
|
|
document.body[i:j+1] = put_cmd_in_ert([label])
|
2017-01-07 19:39:03 +00:00
|
|
|
i += 1
|
2016-06-18 22:38:24 +00:00
|
|
|
|
2016-06-18 23:29:15 +00:00
|
|
|
|
|
|
|
def revert_plural_refs(document):
|
|
|
|
" Revert plural and capitalized references "
|
|
|
|
i = find_token(document.header, "\\use_refstyle 1", 0)
|
|
|
|
use_refstyle = (i != 0)
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of reference inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
2017-01-05 07:50:18 +00:00
|
|
|
plural = caps = suffix = False
|
|
|
|
k = find_token(document.body, "LaTeXCommand formatted", i, j)
|
|
|
|
if k != -1 and use_refstyle:
|
2016-06-18 23:29:15 +00:00
|
|
|
plural = get_bool_value(document.body, "plural", i, j, False)
|
|
|
|
caps = get_bool_value(document.body, "caps", i, j, False)
|
|
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
|
|
if label:
|
2017-01-05 07:50:18 +00:00
|
|
|
try:
|
|
|
|
(prefix, suffix) = label.split(":", 1)
|
|
|
|
except:
|
|
|
|
document.warning("No `:' separator in formatted reference at line %d!" % (i))
|
2016-06-18 23:29:15 +00:00
|
|
|
else:
|
|
|
|
document.warning("Can't find label for reference at line %d!" % (i))
|
|
|
|
|
2017-01-05 07:50:18 +00:00
|
|
|
# this effectively tests also for use_refstyle and a formatted reference
|
|
|
|
# we do this complicated test because we would otherwise do this erasure
|
|
|
|
# over and over and over
|
|
|
|
if not ((plural or caps) and suffix):
|
2016-06-18 23:29:15 +00:00
|
|
|
del_token(document.body, "plural", i, j)
|
|
|
|
del_token(document.body, "caps", i, j - 1) # since we deleted a line
|
|
|
|
i = j - 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
if caps:
|
|
|
|
prefix = prefix[0].title() + prefix[1:]
|
|
|
|
cmd = "\\" + prefix + "ref"
|
|
|
|
if plural:
|
|
|
|
cmd += "[s]"
|
|
|
|
cmd += "{" + suffix + "}"
|
|
|
|
document.body[i:j+1] = put_cmd_in_ert([cmd])
|
2017-01-07 19:39:03 +00:00
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_noprefix(document):
|
|
|
|
" Revert labelonly tags with 'noprefix' set "
|
|
|
|
i = 0
|
|
|
|
while (True):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of reference inset at line %d!!" %(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
k = find_token(document.body, "LatexCommand labelonly", i, j)
|
|
|
|
if k == -1:
|
|
|
|
i = j
|
|
|
|
continue
|
|
|
|
noprefix = get_bool_value(document.body, "noprefix", i, j)
|
|
|
|
if not noprefix:
|
|
|
|
del_token(document.body, "noprefix", i, j)
|
|
|
|
i = j
|
|
|
|
continue
|
|
|
|
label = get_quoted_value(document.body, "reference", i, j)
|
|
|
|
if not label:
|
|
|
|
document.warning("Can't find label for reference at line %d!" %(i))
|
|
|
|
i = j + 1
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
(prefix, suffix) = label.split(":", 1)
|
|
|
|
except:
|
|
|
|
document.warning("No `:' separator in formatted reference at line %d!" % (i))
|
|
|
|
# we'll leave this as an ordinary labelonly reference
|
|
|
|
del_token(document.body, "noprefix", i, j)
|
|
|
|
i = j
|
|
|
|
continue
|
|
|
|
document.body[i:j+1] = put_cmd_in_ert([suffix])
|
|
|
|
i += 1
|
|
|
|
|
2016-06-18 23:29:15 +00:00
|
|
|
|
2016-06-03 05:40:21 +00:00
|
|
|
##
|
|
|
|
# Conversion hub
|
|
|
|
#
|
|
|
|
|
|
|
|
supported_versions = ["2.3.0", "2.3"]
|
|
|
|
convert = [
|
2016-06-19 19:23:25 +00:00
|
|
|
[509, [convert_microtype]],
|
2016-07-12 03:56:32 +00:00
|
|
|
[510, [convert_dateinset]],
|
2016-08-04 09:42:06 +00:00
|
|
|
[511, [convert_ibranches]],
|
2016-10-16 13:33:23 +00:00
|
|
|
[512, [convert_beamer_article_styles]],
|
2016-10-22 13:33:59 +00:00
|
|
|
[513, []],
|
2016-10-27 22:21:58 +00:00
|
|
|
[514, []],
|
2016-12-07 17:38:41 +00:00
|
|
|
[515, []],
|
|
|
|
[516, [convert_inputenc]],
|
2016-12-14 02:49:04 +00:00
|
|
|
[517, []],
|
2016-12-20 17:54:07 +00:00
|
|
|
[518, [convert_iopart]],
|
2016-12-21 14:17:30 +00:00
|
|
|
[519, [convert_quotestyle]],
|
2016-12-24 13:27:00 +00:00
|
|
|
[520, []],
|
2016-12-25 11:19:02 +00:00
|
|
|
[521, [convert_frenchquotes]],
|
2016-12-26 13:03:48 +00:00
|
|
|
[522, []],
|
2016-12-29 15:45:19 +00:00
|
|
|
[523, []],
|
2016-06-18 22:38:24 +00:00
|
|
|
[524, []],
|
2016-06-18 23:29:15 +00:00
|
|
|
[525, []],
|
2017-01-07 19:39:03 +00:00
|
|
|
[526, []],
|
|
|
|
[527, []]
|
2016-06-03 05:40:21 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
revert = [
|
2017-01-07 19:39:03 +00:00
|
|
|
[526, [revert_noprefix]],
|
2016-06-18 23:29:15 +00:00
|
|
|
[525, [revert_plural_refs]],
|
2016-06-18 22:38:24 +00:00
|
|
|
[524, [revert_labelonly]],
|
2016-12-29 15:45:19 +00:00
|
|
|
[523, [revert_crimson, revert_cochinealmath]],
|
2016-12-26 13:03:48 +00:00
|
|
|
[522, [revert_cjkquotes]],
|
2016-12-25 11:19:02 +00:00
|
|
|
[521, [revert_dynamicquotes]],
|
2016-12-24 13:27:00 +00:00
|
|
|
[520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
|
2016-12-21 14:17:30 +00:00
|
|
|
[519, [revert_plainquote]],
|
2016-12-20 17:54:07 +00:00
|
|
|
[518, [revert_quotestyle]],
|
2016-12-14 02:49:04 +00:00
|
|
|
[517, [revert_iopart]],
|
2016-12-10 10:53:42 +00:00
|
|
|
[516, [revert_quotes]],
|
|
|
|
[515, []],
|
2016-10-27 22:21:58 +00:00
|
|
|
[514, [revert_urdu, revert_syriac]],
|
2016-10-22 13:33:59 +00:00
|
|
|
[513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
|
2016-10-16 13:33:23 +00:00
|
|
|
[512, [revert_bosnian, revert_friulan, revert_macedonian, revert_piedmontese, revert_romansh]],
|
2016-08-04 09:42:06 +00:00
|
|
|
[511, [revert_beamer_article_styles]],
|
2016-07-12 03:56:32 +00:00
|
|
|
[510, [revert_ibranches]],
|
2016-06-19 19:23:25 +00:00
|
|
|
[509, []],
|
2016-06-03 05:40:21 +00:00
|
|
|
[508, [revert_microtype]]
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|