2011-05-03 13:12:55 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# This file is part of lyx2lyx
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright (C) 2011 The LyX team
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
2011-08-25 23:10:36 +00:00
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
2011-05-03 13:12:55 +00:00
|
|
|
|
2013-03-21 01:11:42 +00:00
|
|
|
""" Convert files to the file format generated by LyX 2.1"""
|
2011-05-03 13:12:55 +00:00
|
|
|
|
|
|
|
import re, string
|
|
|
|
import unicodedata
|
|
|
|
import sys, os
|
|
|
|
|
|
|
|
# Uncomment only what you need to import, please.
|
|
|
|
|
2012-12-09 16:19:21 +00:00
|
|
|
from parser_tools import count_pars_in_inset, del_token, find_token, find_token_exact, \
|
2012-12-19 18:33:39 +00:00
|
|
|
find_token_backwards, find_end_of, find_end_of_inset, find_end_of_layout, \
|
|
|
|
find_end_of_sequence, find_re, get_option_value, get_containing_layout, \
|
2015-02-13 09:15:29 +00:00
|
|
|
get_containing_inset, get_value, get_quoted_value, set_option_value
|
2011-07-23 18:40:21 +00:00
|
|
|
|
2011-05-03 13:12:55 +00:00
|
|
|
#from parser_tools import find_token, find_end_of, find_tokens, \
|
2012-12-09 10:04:56 +00:00
|
|
|
#find_end_of_inset, find_end_of_layout, \
|
2012-11-29 14:34:20 +00:00
|
|
|
#is_in_inset, del_token, check_token
|
2011-07-23 18:40:21 +00:00
|
|
|
|
2012-04-16 19:40:59 +00:00
|
|
|
from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, get_ert
|
2011-07-23 18:40:21 +00:00
|
|
|
|
2012-01-05 20:53:48 +00:00
|
|
|
#from lyx2lyx_tools import insert_to_preamble, \
|
2012-04-16 19:40:59 +00:00
|
|
|
# lyx2latex, latex_length, revert_flex_inset, \
|
2011-05-03 13:12:55 +00:00
|
|
|
# revert_font_attrs, hex2ratio, str2bool
|
|
|
|
|
|
|
|
####################################################################
|
|
|
|
# Private helper functions
|
|
|
|
|
|
|
|
#def remove_option(lines, m, option):
|
|
|
|
#''' removes option from line m. returns whether we did anything '''
|
|
|
|
#l = lines[m].find(option)
|
|
|
|
#if l == -1:
|
|
|
|
#return False
|
|
|
|
#val = lines[m][l:].split('"')[1]
|
|
|
|
#lines[m] = lines[m][:l - 1] + lines[m][l+len(option + '="' + val + '"'):]
|
|
|
|
#return True
|
|
|
|
|
|
|
|
|
2013-07-24 13:47:14 +00:00
|
|
|
def revert_Argument_to_TeX_brace(document, line, endline, n, nmax, environment, opt):
|
|
|
|
'''
|
|
|
|
Reverts an InsetArgument to TeX-code
|
|
|
|
usage:
|
|
|
|
revert_Argument_to_TeX_brace(document, LineOfBegin, LineOfEnd, StartArgument, EndArgument, isEnvironment, isOpt)
|
|
|
|
LineOfBegin is the line of the \begin_layout or \begin_inset statement
|
|
|
|
LineOfEnd is the line of the \end_layout or \end_inset statement, if "0" is given, the end of the file is used instead
|
|
|
|
StartArgument is the number of the first argument that needs to be converted
|
|
|
|
EndArgument is the number of the last argument that needs to be converted or the last defined one
|
|
|
|
isEnvironment must be true, if the layout is for a LaTeX environment
|
|
|
|
isOpt must be true, if the argument is an optional one
|
|
|
|
'''
|
|
|
|
lineArg = 0
|
|
|
|
wasOpt = False
|
|
|
|
while lineArg != -1 and n < nmax + 1:
|
|
|
|
lineArg = find_token(document.body, "\\begin_inset Argument " + str(n), line)
|
|
|
|
if lineArg > endline and endline != 0:
|
|
|
|
return wasOpt
|
|
|
|
if lineArg != -1:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
|
|
|
|
# we have to assure that no other inset is in the Argument
|
|
|
|
beginInset = find_token(document.body, "\\begin_inset", beginPlain)
|
|
|
|
endInset = find_token(document.body, "\\end_inset", beginPlain)
|
|
|
|
k = beginPlain + 1
|
|
|
|
l = k
|
|
|
|
while beginInset < endInset and beginInset != -1:
|
|
|
|
beginInset = find_token(document.body, "\\begin_inset", k)
|
|
|
|
endInset = find_token(document.body, "\\end_inset", l)
|
|
|
|
k = beginInset + 1
|
|
|
|
l = endInset + 1
|
|
|
|
if environment == False:
|
|
|
|
if opt == False:
|
|
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}{")
|
|
|
|
del(document.body[lineArg : beginPlain + 1])
|
|
|
|
wasOpt = False
|
|
|
|
else:
|
|
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("]")
|
|
|
|
document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("[")
|
|
|
|
wasOpt = True
|
|
|
|
else:
|
|
|
|
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
|
|
|
|
document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("{")
|
|
|
|
wasOpt = False
|
2013-07-24 14:59:51 +00:00
|
|
|
n += 1
|
2013-07-24 13:47:14 +00:00
|
|
|
return wasOpt
|
|
|
|
|
|
|
|
|
2013-09-03 17:02:28 +00:00
|
|
|
def convert_TeX_brace_to_Argument(document, line, n, nmax, inset, environment, opt):
|
2013-07-24 13:47:14 +00:00
|
|
|
'''
|
|
|
|
Converts TeX code for mandatory arguments to an InsetArgument
|
|
|
|
The conversion of TeX code for optional arguments must be done with another routine
|
|
|
|
!!! Be careful if the braces are different in your case as expected here:
|
|
|
|
- "}{" separates mandatory arguments of commands
|
|
|
|
- "}" + "{" separates mandatory arguments of commands
|
|
|
|
- "}" + " " + "{" separates mandatory arguments of commands
|
|
|
|
- { and } surround a mandatory argument of an environment
|
|
|
|
usage:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, LineOfBeginLayout/Inset, StartArgument, EndArgument, isInset, isEnvironment, isOpt)
|
2013-07-24 13:47:14 +00:00
|
|
|
LineOfBeginLayout/Inset is the line of the \begin_layout or \begin_inset statement
|
|
|
|
StartArgument is the number of the first ERT that needs to be converted
|
|
|
|
EndArgument is the number of the last ERT that needs to be converted
|
|
|
|
isInset must be true, if braces inside an InsetLayout needs to be converted
|
|
|
|
isEnvironment must be true, if the layout is for a LaTeX environment
|
2013-09-03 17:02:28 +00:00
|
|
|
isOpt must be true, if the argument is an optional one
|
2013-07-24 13:47:14 +00:00
|
|
|
|
|
|
|
Todo: this routine can currently handle only one mandatory argument of environments
|
|
|
|
'''
|
2014-05-15 13:35:05 +00:00
|
|
|
|
|
|
|
end_layout = find_end_of_layout(document.body, line)
|
2013-07-24 13:47:14 +00:00
|
|
|
lineERT = line
|
|
|
|
endn = line
|
|
|
|
loop = 1
|
2014-05-15 13:35:05 +00:00
|
|
|
while n < nmax + 1:
|
|
|
|
lineERT = find_token(document.body, "\\begin_inset ERT", lineERT, end_layout)
|
|
|
|
if lineERT == -1:
|
|
|
|
break
|
|
|
|
if environment == False:
|
|
|
|
end_ERT = find_end_of_inset(document.body, lineERT)
|
|
|
|
if end_ERT == -1:
|
|
|
|
document.warning("Can't find end of ERT!!")
|
|
|
|
break
|
|
|
|
# Note that this only checks for ][ or }{ at the beginning of a line
|
2013-09-03 17:02:28 +00:00
|
|
|
if opt:
|
2014-05-15 13:35:05 +00:00
|
|
|
bracePair = find_token(document.body, "][", lineERT, end_ERT)
|
2013-09-03 17:02:28 +00:00
|
|
|
else:
|
2014-05-15 13:35:05 +00:00
|
|
|
bracePair = find_token(document.body, "}{", lineERT, end_ERT)
|
|
|
|
if bracePair != -1:
|
2013-07-24 13:47:14 +00:00
|
|
|
end = find_token(document.body, "\\end_inset", bracePair)
|
2014-05-15 13:35:05 +00:00
|
|
|
document.body[lineERT : end_ERT + 1] = ["\\end_layout", "", "\\end_inset"]
|
2013-07-24 13:47:14 +00:00
|
|
|
if loop == 1:
|
|
|
|
# in the case that n > 1 we have optional arguments before
|
|
|
|
# therefore detect them if any
|
|
|
|
if n > 1:
|
|
|
|
# first check if there is an argument
|
|
|
|
lineArg = find_token(document.body, "\\begin_inset Argument", line)
|
|
|
|
if lineArg < lineERT and lineArg != -1:
|
|
|
|
# we have an argument, so now search backwards for its end
|
|
|
|
# we must now assure that we don't find other insets like e.g. a newline
|
|
|
|
endInsetArg = lineERT
|
|
|
|
endLayoutArg = endInsetArg
|
|
|
|
while endInsetArg != endLayoutArg + 2 and endInsetArg != -1:
|
|
|
|
endInsetArg = endInsetArg - 1
|
|
|
|
endLayoutArg = endInsetArg
|
|
|
|
endInsetArg = find_token_backwards(document.body, "\\end_inset", endInsetArg)
|
|
|
|
endLayoutArg = find_token_backwards(document.body, "\\end_layout", endLayoutArg)
|
|
|
|
line = endInsetArg + 1
|
|
|
|
if inset == False:
|
|
|
|
document.body[line + 1 : line + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
else:
|
|
|
|
document.body[line + 4 : line + 4] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
2014-05-15 13:35:05 +00:00
|
|
|
else: # if loop != 1
|
2013-07-24 13:47:14 +00:00
|
|
|
document.body[endn : endn] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
2013-07-24 14:59:51 +00:00
|
|
|
n += 1
|
2013-07-24 13:47:14 +00:00
|
|
|
endn = end
|
2014-05-15 13:35:05 +00:00
|
|
|
loop += 1
|
2014-05-19 13:25:16 +00:00
|
|
|
else:
|
|
|
|
# no brace pair found
|
|
|
|
# now check the case that we have "}" + "{" in two ERTs
|
2013-09-03 17:02:28 +00:00
|
|
|
if opt:
|
2014-05-15 13:35:05 +00:00
|
|
|
endBrace = find_token(document.body, "]", lineERT, end_layout)
|
2013-09-03 17:02:28 +00:00
|
|
|
else:
|
2014-05-15 13:35:05 +00:00
|
|
|
endBrace = find_token(document.body, "}", lineERT, end_layout)
|
2013-07-24 13:47:14 +00:00
|
|
|
if endBrace == lineERT + 5:
|
2013-09-03 17:02:28 +00:00
|
|
|
if opt:
|
2014-05-15 13:35:05 +00:00
|
|
|
beginBrace = find_token(document.body, "[", endBrace, end_layout)
|
2013-09-03 17:02:28 +00:00
|
|
|
else:
|
2014-05-15 13:35:05 +00:00
|
|
|
beginBrace = find_token(document.body, "{", endBrace, end_layout)
|
2013-07-24 13:47:14 +00:00
|
|
|
# assure that the ERTs are consecutive (11 or 12 depending if there is a space between the ERTs or not)
|
|
|
|
if beginBrace == endBrace + 11 or beginBrace == endBrace + 12:
|
|
|
|
end = find_token(document.body, "\\end_inset", beginBrace)
|
|
|
|
document.body[lineERT : end + 1] = ["\\end_layout", "", "\\end_inset"]
|
|
|
|
if loop == 1:
|
|
|
|
# in the case that n > 1 we have optional arguments before
|
|
|
|
# therefore detect them if any
|
|
|
|
if n > 1:
|
|
|
|
# first check if there is an argument
|
|
|
|
lineArg = find_token(document.body, "\\begin_inset Argument", line)
|
|
|
|
if lineArg < lineERT and lineArg != -1:
|
|
|
|
# we have an argument, so now search backwards for its end
|
|
|
|
# we must now assure that we don't find other insets like e.g. a newline
|
|
|
|
endInsetArg = lineERT
|
|
|
|
endLayoutArg = endInsetArg
|
|
|
|
while endInsetArg != endLayoutArg + 2 and endInsetArg != -1:
|
|
|
|
endInsetArg = endInsetArg - 1
|
|
|
|
endLayoutArg = endInsetArg
|
|
|
|
endInsetArg = find_token_backwards(document.body, "\\end_inset", endInsetArg)
|
|
|
|
endLayoutArg = find_token_backwards(document.body, "\\end_layout", endLayoutArg)
|
|
|
|
line = endInsetArg + 1
|
|
|
|
if inset == False:
|
|
|
|
document.body[line + 1 : line + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
else:
|
|
|
|
document.body[line + 4 : line + 4] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
else:
|
|
|
|
document.body[endn : endn] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
n += 1
|
|
|
|
loop += 1
|
|
|
|
# set the line where the next argument will be inserted
|
|
|
|
if beginBrace == endBrace + 11:
|
|
|
|
endn = end - 11
|
|
|
|
else:
|
|
|
|
endn = end - 12
|
|
|
|
else:
|
|
|
|
lineERT += 1
|
|
|
|
else:
|
|
|
|
lineERT += 1
|
2014-05-15 13:35:05 +00:00
|
|
|
if environment == True:
|
|
|
|
end_ERT = find_end_of_inset(document.body, lineERT)
|
|
|
|
if end_ERT == -1:
|
|
|
|
document.warning("Can't find end of ERT!!")
|
|
|
|
break
|
|
|
|
# Note that this only checks for [ or { at the beginning of a line
|
2013-09-03 17:02:28 +00:00
|
|
|
if opt:
|
2014-05-15 13:35:05 +00:00
|
|
|
opening = find_token(document.body, "[", lineERT, end_ERT)
|
2013-09-03 17:02:28 +00:00
|
|
|
else:
|
2014-05-15 13:35:05 +00:00
|
|
|
opening = find_token(document.body, "{", lineERT, end_ERT)
|
|
|
|
if opening != -1:
|
|
|
|
lineERT2 = find_token(document.body, "\\begin_inset ERT", end_ERT, end_layout)
|
2014-09-16 15:42:19 +00:00
|
|
|
if lineERT2 == -1:
|
|
|
|
# argument in a single ERT
|
|
|
|
# strip off the opening bracket
|
|
|
|
document.body[opening] = document.body[opening][1:]
|
|
|
|
ertcontlastline = end_ERT - 3
|
|
|
|
if (opt and document.body[ertcontlastline].endswith("]")) or document.body[ertcontlastline].endswith("}"):
|
|
|
|
# strip off the closing bracket
|
|
|
|
document.body[ertcontlastline] = document.body[ertcontlastline][:-1]
|
|
|
|
end2 = find_token(document.body, "\\end_inset", ertcontlastline)
|
|
|
|
document.body[lineERT : lineERT + 1] = ["\\begin_inset Argument " + str(n)]
|
|
|
|
else:
|
2014-05-19 13:25:16 +00:00
|
|
|
end_ERT2 = find_end_of_inset(document.body, lineERT2)
|
2014-05-15 13:35:05 +00:00
|
|
|
if end_ERT2 == -1:
|
|
|
|
document.warning("Can't find end of second ERT!!")
|
|
|
|
break
|
|
|
|
if opt:
|
|
|
|
closing = find_token(document.body, "]", lineERT2, end_ERT2)
|
|
|
|
else:
|
|
|
|
closing = find_token(document.body, "}", lineERT2, end_ERT2)
|
|
|
|
if closing != -1: # assure that the "}" is in this ERT
|
|
|
|
end2 = find_token(document.body, "\\end_inset", closing)
|
|
|
|
document.body[lineERT2 : end2 + 1] = ["\\end_layout", "", "\\end_inset"]
|
2014-09-16 15:42:19 +00:00
|
|
|
document.body[lineERT : end_ERT + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
|
|
|
|
n += 1
|
2013-07-24 13:47:14 +00:00
|
|
|
|
|
|
|
|
2011-05-03 13:12:55 +00:00
|
|
|
###############################################################################
|
|
|
|
###
|
|
|
|
### Conversion and reversion routines
|
|
|
|
###
|
|
|
|
###############################################################################
|
|
|
|
|
2011-07-23 18:40:21 +00:00
|
|
|
def revert_visible_space(document):
|
|
|
|
"Revert InsetSpace visible into its ERT counterpart"
|
|
|
|
i = 0
|
|
|
|
while True:
|
2011-08-29 14:07:30 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset space \\textvisiblespace{}", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
subst = put_cmd_in_ert("\\textvisiblespace{}")
|
|
|
|
document.body[i:end + 1] = subst
|
2011-05-03 13:12:55 +00:00
|
|
|
|
|
|
|
|
2014-04-25 20:39:22 +00:00
|
|
|
undertilde_commands = ["utilde"]
|
2011-08-10 03:37:33 +00:00
|
|
|
def convert_undertilde(document):
|
|
|
|
" Load undertilde automatically "
|
|
|
|
i = find_token(document.header, "\\use_mathdots" , 0)
|
2012-01-08 12:34:12 +00:00
|
|
|
if i == -1:
|
|
|
|
i = find_token(document.header, "\\use_mhchem" , 0)
|
|
|
|
if i == -1:
|
|
|
|
i = find_token(document.header, "\\use_esint" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find \\use_mathdots.")
|
|
|
|
return;
|
|
|
|
j = find_token(document.preamble, "\\usepackage{undertilde}", 0)
|
2014-04-25 20:39:22 +00:00
|
|
|
if j != -1:
|
|
|
|
# package was loaded in the preamble, convert this to header setting for round trip
|
|
|
|
document.header.insert(i + 1, "\\use_undertilde 2") # on
|
2012-01-08 12:34:12 +00:00
|
|
|
del document.preamble[j]
|
2014-04-25 20:39:22 +00:00
|
|
|
else:
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, '\\begin_inset Formula', j)
|
|
|
|
if j == -1:
|
|
|
|
break
|
|
|
|
k = find_end_of_inset(document.body, j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(j))
|
|
|
|
j += 1
|
|
|
|
continue
|
|
|
|
code = "\n".join(document.body[j:k])
|
|
|
|
for c in undertilde_commands:
|
|
|
|
if code.find("\\%s" % c) != -1:
|
|
|
|
# at least one of the commands was found - need to switch package off
|
|
|
|
document.header.insert(i + 1, "\\use_undertilde 0") # off
|
|
|
|
return
|
|
|
|
j = k
|
|
|
|
# no command was found - set to auto (bug 9069)
|
|
|
|
document.header.insert(i + 1, "\\use_undertilde 1") # auto
|
|
|
|
|
2011-08-10 03:37:33 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_undertilde(document):
|
|
|
|
" Load undertilde if used in the document "
|
2014-04-25 20:39:22 +00:00
|
|
|
regexp = re.compile(r'(\\use_undertilde)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
value = "1" # default is auto
|
|
|
|
if i != -1:
|
|
|
|
value = get_value(document.header, "\\use_undertilde" , i).split()[0]
|
|
|
|
del document.header[i]
|
|
|
|
if value == "2": # on
|
2011-08-10 03:37:33 +00:00
|
|
|
add_to_preamble(document, ["\\usepackage{undertilde}"])
|
2014-04-25 20:39:22 +00:00
|
|
|
elif value == "1": # auto
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Formula', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
code = "\n".join(document.body[i:j])
|
|
|
|
for c in undertilde_commands:
|
|
|
|
if code.find("\\%s" % c) != -1:
|
|
|
|
add_to_preamble(document, ["\\usepackage{undertilde}"])
|
|
|
|
return
|
|
|
|
i = j
|
2011-08-10 03:37:33 +00:00
|
|
|
|
|
|
|
|
2011-08-29 14:07:30 +00:00
|
|
|
def revert_negative_space(document):
|
|
|
|
"Revert InsetSpace negmedspace and negthickspace into its TeX-code counterpart"
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
reverted = False
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset space \\negmedspace{}", i)
|
|
|
|
if i == -1:
|
|
|
|
j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
|
|
|
|
if j == -1:
|
|
|
|
# load amsmath in the preamble if not already loaded if we are at the end of checking
|
|
|
|
if reverted == True:
|
|
|
|
i = find_token(document.header, "\\use_amsmath 2", 0)
|
|
|
|
if i == -1:
|
|
|
|
add_to_preamble(document, ["\\@ifundefined{negthickspace}{\\usepackage{amsmath}}"])
|
|
|
|
return
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
subst = put_cmd_in_ert("\\negmedspace{}")
|
|
|
|
document.body[i:end + 1] = subst
|
|
|
|
j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
end = find_end_of_inset(document.body, j)
|
|
|
|
subst = put_cmd_in_ert("\\negthickspace{}")
|
|
|
|
document.body[j:end + 1] = subst
|
|
|
|
reverted = True
|
|
|
|
|
|
|
|
|
|
|
|
def revert_math_spaces(document):
|
|
|
|
"Revert formulas with protected custom space and protected hfills to TeX-code"
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Formula", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find("\\hspace*")
|
|
|
|
if j != -1:
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
subst = put_cmd_in_ert(document.body[i][21:])
|
|
|
|
document.body[i:end + 1] = subst
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2011-08-29 14:07:30 +00:00
|
|
|
|
|
|
|
|
2011-11-07 18:36:56 +00:00
|
|
|
def convert_japanese_encodings(document):
|
|
|
|
" Rename the japanese encodings to names understood by platex "
|
|
|
|
jap_enc_dict = {
|
|
|
|
"EUC-JP-pLaTeX": "euc",
|
|
|
|
"JIS-pLaTeX": "jis",
|
|
|
|
"SJIS-pLaTeX": "sjis"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\inputencoding" , 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\inputencoding", i)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val in list(jap_enc_dict.keys()):
|
2011-11-07 18:36:56 +00:00
|
|
|
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_japanese_encodings(document):
|
|
|
|
" Revert the japanese encodings name changes "
|
|
|
|
jap_enc_dict = {
|
|
|
|
"euc": "EUC-JP-pLaTeX",
|
|
|
|
"jis": "JIS-pLaTeX",
|
|
|
|
"sjis": "SJIS-pLaTeX"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\inputencoding" , 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\inputencoding", i)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val in list(jap_enc_dict.keys()):
|
2011-11-07 18:36:56 +00:00
|
|
|
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
|
|
|
|
|
|
|
|
|
2014-04-24 19:52:32 +00:00
|
|
|
def convert_justification(document):
|
|
|
|
" Add the \\justification buffer param"
|
2014-07-04 17:55:44 +00:00
|
|
|
i = find_token(document.header, "\\suppress_date" , 0)
|
2014-04-24 19:52:32 +00:00
|
|
|
if i == -1:
|
2014-07-04 17:55:44 +00:00
|
|
|
i = find_token(document.header, "\\paperorientation" , 0)
|
|
|
|
if i == -1:
|
|
|
|
i = find_token(document.header, "\\use_indices" , 0)
|
|
|
|
if i == -1:
|
|
|
|
i = find_token(document.header, "\\use_bibtopic" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\suppress_date.")
|
2014-04-24 19:52:32 +00:00
|
|
|
return
|
|
|
|
document.header.insert(i + 1, "\\justification true")
|
|
|
|
|
|
|
|
|
2011-12-07 22:33:25 +00:00
|
|
|
def revert_justification(document):
|
|
|
|
" Revert the \\justification buffer param"
|
|
|
|
if not del_token(document.header, '\\justification', 0):
|
|
|
|
document.warning("Malformed LyX document: Missing \\justification.")
|
|
|
|
|
2011-12-08 23:58:30 +00:00
|
|
|
|
|
|
|
def revert_australian(document):
|
|
|
|
"Set English language variants Australian and Newzealand to English"
|
|
|
|
|
|
|
|
if document.language == "australian" or document.language == "newzealand":
|
2012-06-08 00:37:36 +00:00
|
|
|
document.language = "english"
|
2011-12-08 23:58:30 +00:00
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang australian", j)
|
2012-06-08 00:37:36 +00:00
|
|
|
if j == -1:
|
2011-12-08 23:58:30 +00:00
|
|
|
j = find_token(document.body, "\\lang newzealand", 0)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
document.body[j] = document.body[j].replace("\\lang newzealand", "\\lang english")
|
|
|
|
else:
|
|
|
|
document.body[j] = document.body[j].replace("\\lang australian", "\\lang english")
|
|
|
|
j += 1
|
2011-12-18 21:27:17 +00:00
|
|
|
|
2011-12-07 22:33:25 +00:00
|
|
|
|
2011-12-12 14:40:34 +00:00
|
|
|
def convert_biblio_style(document):
|
|
|
|
"Add a sensible default for \\biblio_style based on the citation engine."
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
if i != -1:
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i).split("_")[0]
|
|
|
|
style = {"basic": "plain", "natbib": "plainnat", "jurabib": "jurabib"}
|
|
|
|
document.header.insert(i + 1, "\\biblio_style " + style[engine])
|
|
|
|
|
|
|
|
|
|
|
|
def revert_biblio_style(document):
|
|
|
|
"BibTeX insets with default option use the style defined by \\biblio_style."
|
|
|
|
i = find_token(document.header, "\\biblio_style" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("No \\biblio_style line. Nothing to do.")
|
|
|
|
return
|
|
|
|
|
|
|
|
default_style = get_value(document.header, "\\biblio_style", i)
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
# We are looking for bibtex insets having the default option
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of bibtex inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
return
|
|
|
|
k = find_token(document.body, "options", i, j)
|
|
|
|
if k != -1:
|
|
|
|
options = get_quoted_value(document.body, "options", k)
|
|
|
|
if "default" in options.split(","):
|
|
|
|
document.body[k] = 'options "%s"' \
|
|
|
|
% options.replace("default", default_style)
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2011-12-18 21:27:17 +00:00
|
|
|
def handle_longtable_captions(document, forward):
|
|
|
|
begin_table = 0
|
|
|
|
while True:
|
|
|
|
begin_table = find_token(document.body, '<lyxtabular version=', begin_table)
|
|
|
|
if begin_table == -1:
|
|
|
|
break
|
|
|
|
end_table = find_end_of(document.body, begin_table, '<lyxtabular', '</lyxtabular>')
|
|
|
|
if end_table == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of table.")
|
|
|
|
begin_table += 1
|
|
|
|
continue
|
|
|
|
fline = find_token(document.body, "<features", begin_table, end_table)
|
|
|
|
if fline == -1:
|
|
|
|
document.warning("Can't find features for inset at line " + str(begin_table))
|
|
|
|
begin_table += 1
|
|
|
|
continue
|
|
|
|
p = document.body[fline].find("islongtable")
|
|
|
|
if p == -1:
|
|
|
|
# no longtable
|
|
|
|
begin_table += 1
|
|
|
|
continue
|
|
|
|
numrows = get_option_value(document.body[begin_table], "rows")
|
|
|
|
try:
|
|
|
|
numrows = int(numrows)
|
|
|
|
except:
|
|
|
|
document.warning(document.body[begin_table])
|
|
|
|
document.warning("Unable to determine rows!")
|
|
|
|
begin_table = end_table
|
|
|
|
continue
|
|
|
|
begin_row = begin_table
|
|
|
|
for row in range(numrows):
|
|
|
|
begin_row = find_token(document.body, '<row', begin_row, end_table)
|
|
|
|
if begin_row == -1:
|
|
|
|
document.warning("Can't find row " + str(row + 1))
|
|
|
|
break
|
|
|
|
end_row = find_end_of(document.body, begin_row, '<row', '</row>')
|
|
|
|
if end_row == -1:
|
|
|
|
document.warning("Can't find end of row " + str(row + 1))
|
|
|
|
break
|
|
|
|
if forward:
|
|
|
|
if (get_option_value(document.body[begin_row], 'caption') == 'true' and
|
|
|
|
get_option_value(document.body[begin_row], 'endfirsthead') != 'true' and
|
|
|
|
get_option_value(document.body[begin_row], 'endhead') != 'true' and
|
|
|
|
get_option_value(document.body[begin_row], 'endfoot') != 'true' and
|
|
|
|
get_option_value(document.body[begin_row], 'endlastfoot') != 'true'):
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'caption', 'true", endfirsthead="true')
|
|
|
|
elif get_option_value(document.body[begin_row], 'caption') == 'true':
|
|
|
|
if get_option_value(document.body[begin_row], 'endfirsthead') == 'true':
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'endfirsthead', 'false')
|
|
|
|
if get_option_value(document.body[begin_row], 'endhead') == 'true':
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'endhead', 'false')
|
|
|
|
if get_option_value(document.body[begin_row], 'endfoot') == 'true':
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'endfoot', 'false')
|
|
|
|
if get_option_value(document.body[begin_row], 'endlastfoot') == 'true':
|
|
|
|
document.body[begin_row] = set_option_value(document.body[begin_row], 'endlastfoot', 'false')
|
|
|
|
begin_row = end_row
|
|
|
|
# since there could be a tabular inside this one, we
|
|
|
|
# cannot jump to end.
|
|
|
|
begin_table += 1
|
|
|
|
|
|
|
|
|
|
|
|
def convert_longtable_captions(document):
|
|
|
|
"Add a firsthead flag to caption rows"
|
|
|
|
handle_longtable_captions(document, True)
|
|
|
|
|
|
|
|
|
|
|
|
def revert_longtable_captions(document):
|
|
|
|
"remove head/foot flag from caption rows"
|
|
|
|
handle_longtable_captions(document, False)
|
|
|
|
|
|
|
|
|
2012-01-03 21:26:09 +00:00
|
|
|
def convert_use_packages(document):
|
|
|
|
"use_xxx yyy => use_package xxx yyy"
|
|
|
|
packages = ["amsmath", "esint", "mathdots", "mhchem", "undertilde"]
|
|
|
|
for p in packages:
|
2012-05-26 17:00:03 +00:00
|
|
|
i = find_token(document.header, "\\use_%s" % p, 0)
|
2012-01-03 21:26:09 +00:00
|
|
|
if i != -1:
|
2012-05-26 17:00:03 +00:00
|
|
|
value = get_value(document.header, "\\use_%s" % p, i)
|
2012-01-03 21:26:09 +00:00
|
|
|
document.header[i] = "\\use_package %s %s" % (p, value)
|
|
|
|
|
|
|
|
|
|
|
|
def revert_use_packages(document):
|
|
|
|
"use_package xxx yyy => use_xxx yyy"
|
2014-04-25 20:12:04 +00:00
|
|
|
packages = ["amsmath", "esint", "mhchem", "mathdots", "undertilde"]
|
2012-01-05 20:53:48 +00:00
|
|
|
# the order is arbitrary for the use_package version, and not all packages need to be given.
|
|
|
|
# Ensure a complete list and correct order (important for older LyX versions and especially lyx2lyx)
|
2014-04-25 20:12:04 +00:00
|
|
|
# first loop: find line with first package
|
|
|
|
j = -1
|
2012-05-26 17:00:03 +00:00
|
|
|
for p in packages:
|
2012-01-03 21:26:09 +00:00
|
|
|
regexp = re.compile(r'(\\use_package\s+%s)' % p)
|
2014-04-25 20:12:04 +00:00
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
if i != -1 and (j < 0 or i < j):
|
|
|
|
j = i
|
|
|
|
# second loop: replace or insert packages in front of all existing ones
|
|
|
|
for p in packages:
|
|
|
|
regexp = re.compile(r'(\\use_package\s+%s)' % p)
|
|
|
|
i = find_re(document.header, regexp, 0)
|
2012-01-03 21:26:09 +00:00
|
|
|
if i != -1:
|
2012-05-26 17:00:03 +00:00
|
|
|
value = get_value(document.header, "\\use_package %s" % p, i).split()[1]
|
2012-01-05 20:53:48 +00:00
|
|
|
del document.header[i]
|
2014-04-25 20:12:04 +00:00
|
|
|
document.header.insert(j, "\\use_%s %s" % (p, value))
|
|
|
|
else:
|
|
|
|
document.header.insert(j, "\\use_%s 1" % p)
|
2013-07-24 14:59:51 +00:00
|
|
|
j += 1
|
2012-01-05 20:53:48 +00:00
|
|
|
|
|
|
|
|
2014-04-25 20:39:22 +00:00
|
|
|
def convert_use_package(document, pkg, commands, oldauto):
|
|
|
|
# oldauto defines how the version we are converting from behaves:
|
|
|
|
# if it is true, the old version uses the package automatically.
|
|
|
|
# if it is false, the old version never uses the package.
|
2012-01-05 20:53:48 +00:00
|
|
|
i = find_token(document.header, "\\use_package", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find \\use_package.")
|
|
|
|
return;
|
2013-02-27 12:12:03 +00:00
|
|
|
j = find_token(document.preamble, "\\usepackage{" + pkg + "}", 0)
|
2014-04-25 20:39:22 +00:00
|
|
|
if j != -1:
|
|
|
|
# package was loaded in the preamble, convert this to header setting for round trip
|
|
|
|
document.header.insert(i + 1, "\\use_package " + pkg + " 2") # on
|
2012-01-08 12:34:12 +00:00
|
|
|
del document.preamble[j]
|
2014-04-25 20:39:22 +00:00
|
|
|
# If oldauto is true we have two options:
|
|
|
|
# We can either set the package to auto - this is correct for files in
|
|
|
|
# format 425 to 463, and may create a conflict for older files which use
|
|
|
|
# any command in commands with a different definition.
|
|
|
|
# Or we can look whether any command in commands is used, and set it to
|
|
|
|
# auto if not and to off if yes. This will not create a conflict, but will
|
|
|
|
# create uncompilable documents for files in format 425 to 463, which use
|
|
|
|
# any command in commands.
|
|
|
|
# We choose the first option since its error is less likely.
|
|
|
|
elif oldauto:
|
|
|
|
document.header.insert(i + 1, "\\use_package " + pkg + " 1") # auto
|
|
|
|
else:
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, '\\begin_inset Formula', j)
|
|
|
|
if j == -1:
|
|
|
|
break
|
|
|
|
k = find_end_of_inset(document.body, j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(j))
|
|
|
|
j += 1
|
|
|
|
continue
|
|
|
|
code = "\n".join(document.body[j:k])
|
|
|
|
for c in commands:
|
|
|
|
if code.find("\\%s" % c) != -1:
|
|
|
|
# at least one of the commands was found - need to switch package off
|
|
|
|
document.header.insert(i + 1, "\\use_package " + pkg + " 0") # off
|
|
|
|
return
|
|
|
|
j = k
|
|
|
|
# no command was found - set to auto (bug 9069)
|
|
|
|
document.header.insert(i + 1, "\\use_package " + pkg + " 1") # auto
|
2012-01-05 20:53:48 +00:00
|
|
|
|
|
|
|
|
2013-02-28 20:03:07 +00:00
|
|
|
def revert_use_package(document, pkg, commands, oldauto):
|
|
|
|
# oldauto defines how the version we are reverting to behaves:
|
|
|
|
# if it is true, the old version uses the package automatically.
|
|
|
|
# if it is false, the old version never uses the package.
|
2013-02-27 12:12:03 +00:00
|
|
|
regexp = re.compile(r'(\\use_package\s+%s)' % pkg)
|
2012-01-05 20:53:48 +00:00
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
value = "1" # default is auto
|
|
|
|
if i != -1:
|
|
|
|
value = get_value(document.header, "\\use_package" , i).split()[1]
|
|
|
|
del document.header[i]
|
|
|
|
if value == "2": # on
|
2013-02-27 12:12:03 +00:00
|
|
|
add_to_preamble(document, ["\\usepackage{" + pkg + "}"])
|
2013-02-28 20:03:07 +00:00
|
|
|
elif value == "1" and not oldauto: # auto
|
2012-01-05 20:53:48 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Formula', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
code = "\n".join(document.body[i:j])
|
|
|
|
for c in commands:
|
|
|
|
if code.find("\\%s" % c) != -1:
|
2013-02-27 12:12:03 +00:00
|
|
|
add_to_preamble(document, ["\\usepackage{" + pkg + "}"])
|
2012-01-05 20:53:48 +00:00
|
|
|
return
|
|
|
|
i = j
|
2012-01-03 21:26:09 +00:00
|
|
|
|
|
|
|
|
2014-04-25 20:39:22 +00:00
|
|
|
mathtools_commands = ["mathclap", "mathllap", "mathrlap", \
|
2013-02-27 12:12:03 +00:00
|
|
|
"lgathered", "rgathered", "vcentcolon", "dblcolon", \
|
|
|
|
"coloneqq", "Coloneqq", "coloneq", "Coloneq", "eqqcolon", \
|
|
|
|
"Eqqcolon", "eqcolon", "Eqcolon", "colonapprox", \
|
|
|
|
"Colonapprox", "colonsim", "Colonsim"]
|
2014-04-25 20:39:22 +00:00
|
|
|
def convert_use_mathtools(document):
|
|
|
|
"insert use_package mathtools"
|
|
|
|
convert_use_package(document, "mathtools", mathtools_commands, False)
|
2013-02-27 12:12:03 +00:00
|
|
|
|
|
|
|
|
2014-04-25 20:39:22 +00:00
|
|
|
def revert_use_mathtools(document):
|
|
|
|
"remove use_package mathtools"
|
|
|
|
revert_use_package(document, "mathtools", mathtools_commands, False)
|
2012-12-15 12:02:40 +00:00
|
|
|
|
|
|
|
|
2014-04-25 20:39:22 +00:00
|
|
|
# commands provided by stmaryrd.sty but LyX uses other packages:
|
|
|
|
# boxdot lightning, bigtriangledown, bigtriangleup
|
|
|
|
stmaryrd_commands = ["shortleftarrow", "shortrightarrow", "shortuparrow", \
|
2012-12-15 12:02:40 +00:00
|
|
|
"shortdownarrow", "Yup", "Ydown", "Yleft", "Yright", \
|
|
|
|
"varcurlyvee", "varcurlywedge", "minuso", "baro", \
|
|
|
|
"sslash", "bbslash", "moo", "varotimes", "varoast", \
|
|
|
|
"varobar", "varodot", "varoslash", "varobslash", \
|
|
|
|
"varocircle", "varoplus", "varominus", "boxast", \
|
2012-12-18 21:36:34 +00:00
|
|
|
"boxbar", "boxslash", "boxbslash", "boxcircle", \
|
2012-12-15 12:02:40 +00:00
|
|
|
"boxbox", "boxempty", "merge", "vartimes", \
|
|
|
|
"fatsemi", "sswarrow", "ssearrow", "curlywedgeuparrow", \
|
|
|
|
"curlywedgedownarrow", "fatslash", "fatbslash", "lbag", \
|
|
|
|
"rbag", "varbigcirc", "leftrightarroweq", \
|
|
|
|
"curlyveedownarrow", "curlyveeuparrow", "nnwarrow", \
|
|
|
|
"nnearrow", "leftslice", "rightslice", "varolessthan", \
|
|
|
|
"varogreaterthan", "varovee", "varowedge", "talloblong", \
|
|
|
|
"interleave", "obar", "obslash", "olessthan", \
|
|
|
|
"ogreaterthan", "ovee", "owedge", "oblong", "inplus", \
|
|
|
|
"niplus", "nplus", "subsetplus", "supsetplus", \
|
|
|
|
"subsetpluseq", "supsetpluseq", "Lbag", "Rbag", \
|
|
|
|
"llbracket", "rrbracket", "llparenthesis", \
|
|
|
|
"rrparenthesis", "binampersand", "bindnasrepma", \
|
|
|
|
"trianglelefteqslant", "trianglerighteqslant", \
|
|
|
|
"ntrianglelefteqslant", "ntrianglerighteqslant", \
|
|
|
|
"llfloor", "rrfloor", "llceil", "rrceil", "arrownot", \
|
|
|
|
"Arrownot", "Mapstochar", "mapsfromchar", "Mapsfromchar", \
|
|
|
|
"leftrightarrowtriangle", "leftarrowtriangle", \
|
|
|
|
"rightarrowtriangle", \
|
|
|
|
"bigcurlyvee", "bigcurlywedge", "bigsqcap", "bigbox", \
|
|
|
|
"bigparallel", "biginterleave", "bignplus", \
|
|
|
|
"varcopyright", "longarrownot", "Longarrownot", \
|
|
|
|
"Mapsto", "mapsfrom", "Mapsfrom" "Longmapsto", \
|
|
|
|
"longmapsfrom", "Longmapsfrom"]
|
2014-04-25 20:39:22 +00:00
|
|
|
def convert_use_stmaryrd(document):
|
|
|
|
"insert use_package stmaryrd"
|
|
|
|
convert_use_package(document, "stmaryrd", stmaryrd_commands, False)
|
|
|
|
|
2012-12-15 12:02:40 +00:00
|
|
|
|
2014-04-25 20:39:22 +00:00
|
|
|
def revert_use_stmaryrd(document):
|
|
|
|
"remove use_package stmaryrd"
|
|
|
|
revert_use_package(document, "stmaryrd", stmaryrd_commands, False)
|
2012-12-15 12:02:40 +00:00
|
|
|
|
|
|
|
|
2014-04-25 20:39:22 +00:00
|
|
|
stackrel_commands = ["stackrel"]
|
2012-12-28 18:51:28 +00:00
|
|
|
def convert_use_stackrel(document):
|
|
|
|
"insert use_package stackrel"
|
2014-04-25 20:39:22 +00:00
|
|
|
convert_use_package(document, "stackrel", stackrel_commands, False)
|
2012-12-28 18:51:28 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_use_stackrel(document):
|
|
|
|
"remove use_package stackrel"
|
2014-04-25 20:39:22 +00:00
|
|
|
revert_use_package(document, "stackrel", stackrel_commands, False)
|
2012-12-28 18:51:28 +00:00
|
|
|
|
|
|
|
|
2012-01-09 13:16:38 +00:00
|
|
|
def convert_cite_engine_type(document):
|
|
|
|
"Determine the \\cite_engine_type from the citation engine."
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
engine = get_value(document.header, "\\cite_engine", i)
|
|
|
|
if "_" in engine:
|
|
|
|
engine, type = engine.split("_")
|
|
|
|
else:
|
|
|
|
type = {"basic": "numerical", "jurabib": "authoryear"}[engine]
|
|
|
|
document.header[i] = "\\cite_engine " + engine
|
|
|
|
document.header.insert(i + 1, "\\cite_engine_type " + type)
|
|
|
|
|
|
|
|
|
|
|
|
def revert_cite_engine_type(document):
|
|
|
|
"Natbib had the type appended with an underscore."
|
|
|
|
engine_type = "numerical"
|
|
|
|
i = find_token(document.header, "\\cite_engine_type" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("No \\cite_engine_type line. Assuming numerical.")
|
|
|
|
else:
|
|
|
|
engine_type = get_value(document.header, "\\cite_engine_type", i)
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
# We are looking for the natbib citation engine
|
2012-03-13 23:43:27 +00:00
|
|
|
i = find_token(document.header, "\\cite_engine natbib", 0)
|
2012-01-09 13:16:38 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.header[i] = "\\cite_engine natbib_" + engine_type
|
|
|
|
|
|
|
|
|
2013-05-16 14:00:54 +00:00
|
|
|
def convert_cite_engine_type_default(document):
|
|
|
|
"Convert \\cite_engine_type to default for the basic citation engine."
|
|
|
|
i = find_token(document.header, "\\cite_engine basic", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
i = find_token(document.header, "\\cite_engine_type" , 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.header[i] = "\\cite_engine_type default"
|
|
|
|
|
|
|
|
|
|
|
|
def revert_cite_engine_type_default(document):
|
|
|
|
"""Revert \\cite_engine_type default.
|
|
|
|
|
|
|
|
Revert to numerical for the basic cite engine, otherwise to authoryear."""
|
|
|
|
engine_type = "authoryear"
|
|
|
|
i = find_token(document.header, "\\cite_engine_type default" , 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_token(document.header, "\\cite_engine basic", 0)
|
|
|
|
if j != -1:
|
|
|
|
engine_type = "numerical"
|
|
|
|
document.header[i] = "\\cite_engine_type " + engine_type
|
|
|
|
|
|
|
|
|
2014-04-25 20:39:22 +00:00
|
|
|
cancel_commands = ["cancel", "bcancel", "xcancel", "cancelto"]
|
2013-02-28 20:03:07 +00:00
|
|
|
# this is the same, as revert_use_cancel() except for the default
|
2012-01-23 01:49:49 +00:00
|
|
|
def revert_cancel(document):
|
|
|
|
"add cancel to the preamble if necessary"
|
2014-04-25 20:39:22 +00:00
|
|
|
revert_use_package(document, "cancel", cancel_commands, False)
|
2012-01-23 01:49:49 +00:00
|
|
|
|
|
|
|
|
2015-11-24 23:05:44 +00:00
|
|
|
def revert_verbatim(document, starred = False):
|
2015-11-24 22:56:26 +00:00
|
|
|
" Revert verbatim environments completely to TeX-code. "
|
2012-02-20 02:10:33 +00:00
|
|
|
i = 0
|
|
|
|
consecutive = False
|
2015-11-25 05:37:36 +00:00
|
|
|
|
|
|
|
layout_name = "Verbatim"
|
|
|
|
latex_name = "verbatim"
|
|
|
|
if starred:
|
|
|
|
layout_name = "Verbatim*"
|
|
|
|
latex_name = "verbatim*"
|
|
|
|
|
|
|
|
subst_end = ['\\end_layout', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\end_layout', '',
|
2012-02-20 02:10:33 +00:00
|
|
|
'\\begin_layout Plain Layout', '', '',
|
|
|
|
'\\backslash', '',
|
2015-11-25 05:37:36 +00:00
|
|
|
'end{%s}' % (latex_name),
|
2012-02-20 02:10:33 +00:00
|
|
|
'\\end_layout', '', '\\end_inset',
|
|
|
|
'', '', '\\end_layout']
|
|
|
|
subst_begin = ['\\begin_layout Standard', '\\noindent',
|
2014-04-11 17:55:18 +00:00
|
|
|
'\\begin_inset ERT', 'status open', '',
|
2012-02-20 02:10:33 +00:00
|
|
|
'\\begin_layout Plain Layout', '', '', '\\backslash',
|
2015-11-25 05:37:36 +00:00
|
|
|
'begin{%s}' % (latex_name),
|
2012-02-20 02:10:33 +00:00
|
|
|
'\\end_layout', '', '\\begin_layout Plain Layout', '']
|
2014-04-11 17:55:18 +00:00
|
|
|
|
2012-02-20 02:10:33 +00:00
|
|
|
while 1:
|
2015-11-24 22:56:26 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout %s" % (layout_name), i)
|
2012-02-20 02:10:33 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
2015-11-24 22:56:26 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of %s layout" \
|
|
|
|
% (layout_name))
|
2012-02-20 02:10:33 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
# delete all line breaks insets (there are no other insets)
|
|
|
|
l = i
|
|
|
|
while 1:
|
2014-04-11 17:55:18 +00:00
|
|
|
n = find_token(document.body, "\\begin_inset Newline newline", l, j)
|
2012-02-20 02:10:33 +00:00
|
|
|
if n == -1:
|
2014-04-11 17:55:18 +00:00
|
|
|
n = find_token(document.body, "\\begin_inset Newline linebreak", l, j)
|
2012-02-20 02:10:33 +00:00
|
|
|
if n == -1:
|
|
|
|
break
|
|
|
|
m = find_end_of_inset(document.body, n)
|
|
|
|
del(document.body[m:m+1])
|
2015-11-25 05:37:36 +00:00
|
|
|
document.body[n:n+1] = ['\\end_layout', '', '\\begin_layout Plain Layout']
|
2012-02-20 02:10:33 +00:00
|
|
|
l += 1
|
2014-04-11 17:55:18 +00:00
|
|
|
# we deleted a line, so the end of the inset moved forward.
|
2015-11-25 05:37:36 +00:00
|
|
|
# FIXME But we also added some lines, didn't we? I think this
|
|
|
|
# should be j += 1.
|
2014-04-11 17:55:18 +00:00
|
|
|
j -= 1
|
2012-02-20 02:10:33 +00:00
|
|
|
# consecutive verbatim environments need to be connected
|
2015-11-25 05:37:36 +00:00
|
|
|
k = find_token(document.body, "\\begin_layout %s" % (layout_name), j)
|
2012-02-20 02:10:33 +00:00
|
|
|
if k == j + 2 and consecutive == False:
|
|
|
|
consecutive = True
|
2015-11-25 05:37:36 +00:00
|
|
|
document.body[j:j+1] = ['\\end_layout', '', '\\begin_layout Plain Layout']
|
2012-02-20 02:10:33 +00:00
|
|
|
document.body[i:i+1] = subst_begin
|
|
|
|
continue
|
|
|
|
if k == j + 2 and consecutive == True:
|
2015-11-25 05:37:36 +00:00
|
|
|
document.body[j:j+1] = ['\\end_layout', '', '\\begin_layout Plain Layout']
|
2012-02-20 02:10:33 +00:00
|
|
|
del(document.body[i:i+1])
|
|
|
|
continue
|
|
|
|
if k != j + 2 and consecutive == True:
|
|
|
|
document.body[j:j+1] = subst_end
|
|
|
|
# the next paragraph must not be indented
|
2015-11-25 05:37:36 +00:00
|
|
|
# FIXME This seems to be causing problems, because of the
|
|
|
|
# hardcoded use of 19. We should figure out exactly where
|
|
|
|
# this needs to go by searching for the right tag.
|
2012-02-20 02:10:33 +00:00
|
|
|
document.body[j+19:j+19] = ['\\noindent']
|
|
|
|
del(document.body[i:i+1])
|
|
|
|
consecutive = False
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
document.body[j:j+1] = subst_end
|
|
|
|
# the next paragraph must not be indented
|
2015-11-25 05:37:36 +00:00
|
|
|
# FIXME This seems to be causing problems, because of the
|
|
|
|
# hardcoded use of 19. We should figure out exactly where
|
|
|
|
# this needs to go by searching for the right tag.
|
2012-02-20 02:10:33 +00:00
|
|
|
document.body[j+19:j+19] = ['\\noindent']
|
|
|
|
document.body[i:i+1] = subst_begin
|
|
|
|
|
|
|
|
|
2012-03-06 07:54:22 +00:00
|
|
|
def revert_tipa(document):
|
|
|
|
" Revert native TIPA insets to mathed or ERT. "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, "\\begin_inset IPA", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of IPA inset")
|
2012-03-06 07:54:22 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
Multipar = False
|
|
|
|
n = find_token(document.body, "\\begin_layout", i, j)
|
|
|
|
if n == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: IPA inset has no embedded layout")
|
2012-03-06 07:54:22 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
m = find_end_of_layout(document.body, n)
|
|
|
|
if m == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of embedded layout")
|
2012-03-06 07:54:22 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
content = document.body[n+1:m]
|
|
|
|
p = find_token(document.body, "\\begin_layout", m, j)
|
|
|
|
if p != -1 or len(content) > 1:
|
|
|
|
Multipar = True
|
|
|
|
content = document.body[i+1:j]
|
|
|
|
if Multipar:
|
|
|
|
# IPA insets with multiple pars need to be wrapped by \begin{IPA}...\end{IPA}
|
|
|
|
document.body[i:j+1] = ['\\end_layout', '', '\\begin_layout Standard'] + put_cmd_in_ert("\\begin{IPA}") + ['\\end_layout'] + content + ['\\begin_layout Standard'] + put_cmd_in_ert("\\end{IPA}")
|
|
|
|
add_to_preamble(document, ["\\usepackage{tipa,tipx}"])
|
|
|
|
else:
|
|
|
|
# single-par IPA insets can be reverted to mathed
|
|
|
|
document.body[i:j+1] = ["\\begin_inset Formula $\\text{\\textipa{" + content[0] + "}}$", "\\end_inset"]
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2012-03-16 01:29:37 +00:00
|
|
|
def revert_cell_rotation(document):
|
|
|
|
"Revert cell rotations to TeX-code"
|
|
|
|
|
|
|
|
load_rotating = False
|
|
|
|
i = 0
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
# first, let's find out if we need to do anything
|
|
|
|
i = find_token(document.body, '<cell ', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find('rotate="')
|
|
|
|
if j != -1:
|
|
|
|
k = document.body[i].find('"', j + 8)
|
|
|
|
value = document.body[i][j + 8 : k]
|
|
|
|
if value == "0":
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2012-03-16 01:29:37 +00:00
|
|
|
# remove rotate option
|
|
|
|
document.body[i] = rgx.sub('', document.body[i])
|
|
|
|
elif value == "90":
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2014-04-27 18:58:53 +00:00
|
|
|
document.body[i] = rgx.sub(' rotate="true"', document.body[i])
|
2012-03-16 01:29:37 +00:00
|
|
|
else:
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2012-03-16 01:29:37 +00:00
|
|
|
load_rotating = True
|
|
|
|
# remove rotate option
|
|
|
|
document.body[i] = rgx.sub('', document.body[i])
|
|
|
|
# write ERT
|
|
|
|
document.body[i + 5 : i + 5] = \
|
|
|
|
put_cmd_in_ert("\\end{turn}")
|
|
|
|
document.body[i + 4 : i + 4] = \
|
|
|
|
put_cmd_in_ert("\\begin{turn}{" + value + "}")
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if load_rotating:
|
|
|
|
add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
|
|
|
|
|
|
|
|
|
|
|
|
def convert_cell_rotation(document):
|
|
|
|
'Convert cell rotation statements from "true" to "90"'
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
# first, let's find out if we need to do anything
|
|
|
|
i = find_token(document.body, '<cell ', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find('rotate="true"')
|
|
|
|
if j != -1:
|
|
|
|
rgx = re.compile(r'rotate="[^"]+?"')
|
|
|
|
# convert "true" to "90"
|
|
|
|
document.body[i] = rgx.sub('rotate="90"', document.body[i])
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2012-03-21 22:04:45 +00:00
|
|
|
def revert_table_rotation(document):
|
|
|
|
"Revert table rotations to TeX-code"
|
|
|
|
|
|
|
|
load_rotating = False
|
|
|
|
i = 0
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
# first, let's find out if we need to do anything
|
|
|
|
i = find_token(document.body, '<features ', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find('rotate="')
|
|
|
|
if j != -1:
|
2012-03-21 22:39:33 +00:00
|
|
|
end_table = find_token(document.body, '</lyxtabular>', j)
|
2012-03-21 22:04:45 +00:00
|
|
|
k = document.body[i].find('"', j + 8)
|
|
|
|
value = document.body[i][j + 8 : k]
|
|
|
|
if value == "0":
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2012-03-21 22:04:45 +00:00
|
|
|
# remove rotate option
|
|
|
|
document.body[i] = rgx.sub('', document.body[i])
|
|
|
|
elif value == "90":
|
|
|
|
rgx = re.compile(r'rotate="[^"]+?"')
|
|
|
|
document.body[i] = rgx.sub('rotate="true"', document.body[i])
|
|
|
|
else:
|
2012-03-21 22:39:33 +00:00
|
|
|
rgx = re.compile(r' rotate="[^"]+?"')
|
2012-03-21 22:04:45 +00:00
|
|
|
load_rotating = True
|
|
|
|
# remove rotate option
|
|
|
|
document.body[i] = rgx.sub('', document.body[i])
|
|
|
|
# write ERT
|
2012-03-21 22:39:33 +00:00
|
|
|
document.body[end_table + 3 : end_table + 3] = \
|
2012-03-21 22:04:45 +00:00
|
|
|
put_cmd_in_ert("\\end{turn}")
|
2012-03-21 22:39:33 +00:00
|
|
|
document.body[i - 2 : i - 2] = \
|
2012-03-21 22:04:45 +00:00
|
|
|
put_cmd_in_ert("\\begin{turn}{" + value + "}")
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if load_rotating:
|
|
|
|
add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
|
|
|
|
|
|
|
|
|
|
|
|
def convert_table_rotation(document):
|
|
|
|
'Convert table rotation statements from "true" to "90"'
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
# first, let's find out if we need to do anything
|
|
|
|
i = find_token(document.body, '<features ', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = document.body[i].find('rotate="true"')
|
|
|
|
if j != -1:
|
|
|
|
rgx = re.compile(r'rotate="[^"]+?"')
|
|
|
|
# convert "true" to "90"
|
|
|
|
document.body[i] = rgx.sub('rotate="90"', document.body[i])
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2012-04-16 19:40:59 +00:00
|
|
|
def convert_listoflistings(document):
|
|
|
|
'Convert ERT \lstlistoflistings to TOC lstlistoflistings inset'
|
|
|
|
# We can support roundtrip because the command is so simple
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset ERT", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of ERT inset")
|
2012-04-16 19:40:59 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
ert = get_ert(document.body, i)
|
|
|
|
if ert == "\\lstlistoflistings{}":
|
|
|
|
document.body[i:j] = ["\\begin_inset CommandInset toc", "LatexCommand lstlistoflistings", ""]
|
|
|
|
i = i + 4
|
|
|
|
else:
|
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_listoflistings(document):
|
|
|
|
'Convert TOC lstlistoflistings inset to ERT lstlistoflistings'
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset toc", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if document.body[i+1] == "LatexCommand lstlistoflistings":
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of TOC inset")
|
2012-04-16 19:40:59 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
subst = put_cmd_in_ert("\\lstlistoflistings{}")
|
|
|
|
document.body[i:j+1] = subst
|
|
|
|
add_to_preamble(document, ["\\usepackage{listings}"])
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-04-16 19:40:59 +00:00
|
|
|
|
|
|
|
|
2012-05-06 18:48:04 +00:00
|
|
|
def convert_use_amssymb(document):
|
|
|
|
"insert use_package amssymb"
|
|
|
|
regexp = re.compile(r'(\\use_package\s+amsmath)')
|
|
|
|
i = find_re(document.header, regexp, 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find \\use_package amsmath.")
|
|
|
|
return;
|
|
|
|
value = get_value(document.header, "\\use_package" , i).split()[1]
|
|
|
|
useamsmath = 0
|
|
|
|
try:
|
|
|
|
useamsmath = int(value)
|
|
|
|
except:
|
|
|
|
document.warning("Invalid \\use_package amsmath: " + value + ". Assuming auto.")
|
|
|
|
useamsmath = 1
|
|
|
|
j = find_token(document.preamble, "\\usepackage{amssymb}", 0)
|
|
|
|
if j == -1:
|
|
|
|
document.header.insert(i + 1, "\\use_package amssymb %d" % useamsmath)
|
|
|
|
else:
|
|
|
|
document.header.insert(i + 1, "\\use_package amssymb 2")
|
|
|
|
del document.preamble[j]
|
|
|
|
|
|
|
|
|
|
|
|
def revert_use_amssymb(document):
|
|
|
|
"remove use_package amssymb"
|
|
|
|
regexp1 = re.compile(r'(\\use_package\s+amsmath)')
|
|
|
|
regexp2 = re.compile(r'(\\use_package\s+amssymb)')
|
|
|
|
i = find_re(document.header, regexp1, 0)
|
|
|
|
j = find_re(document.header, regexp2, 0)
|
|
|
|
value1 = "1" # default is auto
|
|
|
|
value2 = "1" # default is auto
|
|
|
|
if i != -1:
|
|
|
|
value1 = get_value(document.header, "\\use_package" , i).split()[1]
|
|
|
|
if j != -1:
|
|
|
|
value2 = get_value(document.header, "\\use_package" , j).split()[1]
|
|
|
|
del document.header[j]
|
|
|
|
if value1 != value2 and value2 == "2": # on
|
|
|
|
add_to_preamble(document, ["\\usepackage{amssymb}"])
|
|
|
|
|
|
|
|
|
2013-02-27 12:12:03 +00:00
|
|
|
def convert_use_cancel(document):
|
|
|
|
"insert use_package cancel"
|
2014-04-25 20:39:22 +00:00
|
|
|
convert_use_package(document, "cancel", cancel_commands, True)
|
2013-02-27 12:12:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_use_cancel(document):
|
|
|
|
"remove use_package cancel"
|
2014-04-25 20:39:22 +00:00
|
|
|
revert_use_package(document, "cancel", cancel_commands, True)
|
2013-02-27 12:12:03 +00:00
|
|
|
|
|
|
|
|
2012-06-08 00:37:36 +00:00
|
|
|
def revert_ancientgreek(document):
|
|
|
|
"Set the document language for ancientgreek to greek"
|
|
|
|
|
|
|
|
if document.language == "ancientgreek":
|
|
|
|
document.language = "greek"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language greek"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang ancientgreek", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
document.body[j] = document.body[j].replace("\\lang ancientgreek", "\\lang greek")
|
|
|
|
j += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_languages(document):
|
|
|
|
"Set the document language for new supported languages to English"
|
|
|
|
|
|
|
|
languages = [
|
|
|
|
"coptic", "divehi", "hindi", "kurmanji", "lao", "marathi", "occitan", "sanskrit",
|
|
|
|
"syriac", "tamil", "telugu", "urdu"
|
|
|
|
]
|
|
|
|
for n in range(len(languages)):
|
|
|
|
if document.language == languages[n]:
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while j < len(document.body):
|
|
|
|
j = find_token(document.body, "\\lang " + languages[n], j)
|
|
|
|
if j != -1:
|
|
|
|
document.body[j] = document.body[j].replace("\\lang " + languages[n], "\\lang english")
|
|
|
|
j += 1
|
|
|
|
else:
|
|
|
|
j = len(document.body)
|
|
|
|
|
|
|
|
|
2012-06-21 23:12:43 +00:00
|
|
|
def convert_armenian(document):
|
|
|
|
"Use polyglossia and thus non-TeX fonts for Armenian"
|
|
|
|
|
|
|
|
if document.language == "armenian":
|
|
|
|
i = find_token(document.header, "\\use_non_tex_fonts", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\use_non_tex_fonts true"
|
|
|
|
|
|
|
|
|
|
|
|
def revert_armenian(document):
|
|
|
|
"Use ArmTeX and thus TeX fonts for Armenian"
|
|
|
|
|
|
|
|
if document.language == "armenian":
|
|
|
|
i = find_token(document.header, "\\use_non_tex_fonts", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\use_non_tex_fonts false"
|
|
|
|
|
|
|
|
|
2012-08-17 12:11:02 +00:00
|
|
|
def revert_libertine(document):
|
|
|
|
" Revert native libertine font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_roman libertine", 0)
|
|
|
|
if i != -1:
|
|
|
|
osf = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
osf = True
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
if osf:
|
2012-08-18 12:45:41 +00:00
|
|
|
document.header[j] = "\\font_osf false"
|
2013-02-15 09:45:11 +00:00
|
|
|
preamble += "[osf]"
|
2012-09-25 09:07:33 +00:00
|
|
|
else:
|
|
|
|
preamble += "[lining]"
|
2012-09-23 16:33:04 +00:00
|
|
|
preamble += "{libertine-type1}"
|
2012-08-17 12:11:02 +00:00
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_roman default"
|
|
|
|
|
|
|
|
|
2012-08-17 16:24:18 +00:00
|
|
|
def revert_txtt(document):
|
|
|
|
" Revert native txtt font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_typewriter txtt", 0)
|
|
|
|
if i != -1:
|
|
|
|
preamble = "\\renewcommand{\\ttdefault}{txtt}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_typewriter default"
|
|
|
|
|
|
|
|
|
2012-08-18 12:45:41 +00:00
|
|
|
def revert_mathdesign(document):
|
|
|
|
" Revert native mathdesign font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
mathdesign_dict = {
|
|
|
|
"mdbch": "charter",
|
|
|
|
"mdput": "utopia",
|
|
|
|
"mdugm": "garamond"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\font_roman", i)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val in list(mathdesign_dict.keys()):
|
2012-08-18 12:45:41 +00:00
|
|
|
preamble = "\\usepackage[%s" % mathdesign_dict[val]
|
|
|
|
expert = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
expert = True
|
|
|
|
document.header[j] = "\\font_osf false"
|
|
|
|
l = find_token(document.header, "\\font_sc true", 0)
|
|
|
|
if l != -1:
|
|
|
|
expert = True
|
|
|
|
document.header[l] = "\\font_sc false"
|
|
|
|
if expert:
|
|
|
|
preamble += ",expert"
|
|
|
|
preamble += "]{mathdesign}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_roman default"
|
2012-08-19 09:57:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_texgyre(document):
|
|
|
|
" Revert native TeXGyre font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
texgyre_fonts = ["tgadventor", "tgbonum", "tgchorus", "tgcursor", \
|
|
|
|
"tgheros", "tgpagella", "tgschola", "tgtermes"]
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if i != -1:
|
|
|
|
val = get_value(document.header, "\\font_roman", i)
|
|
|
|
if val in texgyre_fonts:
|
|
|
|
preamble = "\\usepackage{%s}" % val
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_roman default"
|
|
|
|
i = find_token(document.header, "\\font_sans", 0)
|
|
|
|
if i != -1:
|
|
|
|
val = get_value(document.header, "\\font_sans", i)
|
|
|
|
if val in texgyre_fonts:
|
|
|
|
preamble = "\\usepackage{%s}" % val
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_sans default"
|
|
|
|
i = find_token(document.header, "\\font_typewriter", 0)
|
|
|
|
if i != -1:
|
|
|
|
val = get_value(document.header, "\\font_typewriter", i)
|
|
|
|
if val in texgyre_fonts:
|
|
|
|
preamble = "\\usepackage{%s}" % val
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_typewriter default"
|
2012-08-23 15:42:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_ipadeco(document):
|
|
|
|
" Revert IPA decorations to ERT "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset IPADeco", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
if end == -1:
|
|
|
|
document.warning("Can't find end of inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
line = document.body[i]
|
|
|
|
rx = re.compile(r'\\begin_inset IPADeco (.*)$')
|
|
|
|
m = rx.match(line)
|
|
|
|
decotype = m.group(1)
|
|
|
|
if decotype != "toptiebar" and decotype != "bottomtiebar":
|
|
|
|
document.warning("Invalid IPADeco type: " + decotype)
|
|
|
|
i = end
|
|
|
|
continue
|
|
|
|
blay = find_token(document.body, "\\begin_layout Plain Layout", i, end)
|
|
|
|
if blay == -1:
|
|
|
|
document.warning("Can't find layout for inset at line " + str(i))
|
|
|
|
i = end
|
|
|
|
continue
|
|
|
|
bend = find_end_of_layout(document.body, blay)
|
|
|
|
if bend == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of IPADeco inset's layout.")
|
|
|
|
i = end
|
|
|
|
continue
|
|
|
|
substi = ["\\begin_inset ERT", "status collapsed", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "", "\\backslash",
|
|
|
|
decotype + "{", "\\end_layout", "", "\\end_inset"]
|
|
|
|
substj = ["\\size default", "", "\\begin_inset ERT", "status collapsed", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "}", "\\end_layout", "", "\\end_inset"]
|
|
|
|
# do the later one first so as not to mess up the numbering
|
|
|
|
document.body[bend:end + 1] = substj
|
|
|
|
document.body[i:blay + 1] = substi
|
|
|
|
i = end + len(substi) + len(substj) - (end - bend) - (blay - i) - 2
|
|
|
|
add_to_preamble(document, "\\usepackage{tipa}")
|
|
|
|
|
|
|
|
|
|
|
|
def revert_ipachar(document):
|
|
|
|
' Revert \\IPAChar to ERT '
|
|
|
|
i = 0
|
|
|
|
found = False
|
|
|
|
while i < len(document.body):
|
|
|
|
m = re.match(r'(.*)\\IPAChar \\(\w+\{\w+\})(.*)', document.body[i])
|
|
|
|
if m:
|
|
|
|
found = True
|
|
|
|
before = m.group(1)
|
|
|
|
ipachar = m.group(2)
|
|
|
|
after = m.group(3)
|
|
|
|
subst = [before,
|
|
|
|
'\\begin_inset ERT',
|
|
|
|
'status collapsed', '',
|
|
|
|
'\\begin_layout Standard',
|
|
|
|
'', '', '\\backslash',
|
|
|
|
ipachar,
|
|
|
|
'\\end_layout', '',
|
|
|
|
'\\end_inset', '',
|
|
|
|
after]
|
|
|
|
document.body[i: i+1] = subst
|
|
|
|
i = i + len(subst)
|
|
|
|
else:
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-08-23 15:42:53 +00:00
|
|
|
if found:
|
|
|
|
add_to_preamble(document, "\\usepackage{tone}")
|
2012-09-19 15:46:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_minionpro(document):
|
|
|
|
" Revert native MinionPro font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_roman minionpro", 0)
|
|
|
|
if i != -1:
|
|
|
|
osf = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
osf = True
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
if osf:
|
|
|
|
document.header[j] = "\\font_osf false"
|
|
|
|
else:
|
|
|
|
preamble += "[lf]"
|
|
|
|
preamble += "{MinionPro}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_roman default"
|
2012-08-18 12:45:41 +00:00
|
|
|
|
2012-09-22 15:44:00 +00:00
|
|
|
|
|
|
|
def revert_mathfonts(document):
|
|
|
|
" Revert native math font definitions to LaTeX "
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_math", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
val = get_value(document.header, "\\font_math", i)
|
|
|
|
if val == "eulervm":
|
|
|
|
add_to_preamble(document, "\\usepackage{eulervm}")
|
|
|
|
elif val == "default":
|
|
|
|
mathfont_dict = {
|
|
|
|
"lmodern": "\\renewcommand{\\rmdefault}{lmr}",
|
|
|
|
"minionpro": "\\usepackage[onlytext,lf]{MinionPro}",
|
|
|
|
"minionpro-osf": "\\usepackage[onlytext]{MinionPro}",
|
|
|
|
"palatino": "\\renewcommand{\\rmdefault}{ppl}",
|
|
|
|
"palatino-osf": "\\renewcommand{\\rmdefault}{pplj}",
|
|
|
|
"times": "\\renewcommand{\\rmdefault}{ptm}",
|
2012-09-23 10:30:19 +00:00
|
|
|
"utopia": "\\renewcommand{\\rmdefault}{futs}",
|
|
|
|
"utopia-osf": "\\renewcommand{\\rmdefault}{futj}",
|
2012-09-22 15:44:00 +00:00
|
|
|
}
|
|
|
|
j = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if j != -1:
|
|
|
|
rm = get_value(document.header, "\\font_roman", j)
|
|
|
|
k = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if k != -1:
|
|
|
|
rm += "-osf"
|
2015-03-11 12:04:46 +00:00
|
|
|
if rm in list(mathfont_dict.keys()):
|
2012-09-22 15:44:00 +00:00
|
|
|
add_to_preamble(document, mathfont_dict[rm])
|
|
|
|
document.header[j] = "\\font_roman default"
|
|
|
|
if k != -1:
|
|
|
|
document.header[k] = "\\font_osf false"
|
|
|
|
del document.header[i]
|
|
|
|
|
2012-09-23 10:30:19 +00:00
|
|
|
|
|
|
|
def revert_mdnomath(document):
|
|
|
|
" Revert mathdesign and fourier without math "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
mathdesign_dict = {
|
|
|
|
"md-charter": "mdbch",
|
|
|
|
"md-utopia": "mdput",
|
|
|
|
"md-garamond": "mdugm"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\font_roman", i)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val in list(mathdesign_dict.keys()):
|
2012-09-23 10:30:19 +00:00
|
|
|
j = find_token(document.header, "\\font_math", 0)
|
|
|
|
if j == -1:
|
|
|
|
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
|
|
|
|
mval = get_value(document.header, "\\font_math", j)
|
|
|
|
if mval == "default":
|
|
|
|
document.header[i] = "\\font_roman default"
|
|
|
|
add_to_preamble(document, "\\renewcommand{\\rmdefault}{%s}" % mathdesign_dict[val])
|
|
|
|
else:
|
|
|
|
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
|
|
|
|
|
|
|
|
|
|
|
|
def convert_mdnomath(document):
|
|
|
|
" Change mathdesign font name "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
mathdesign_dict = {
|
|
|
|
"mdbch": "md-charter",
|
|
|
|
"mdput": "md-utopia",
|
|
|
|
"mdugm": "md-garamond"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\font_roman", i)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val in list(mathdesign_dict.keys()):
|
2012-09-23 10:30:19 +00:00
|
|
|
document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
|
|
|
|
|
|
|
|
|
2012-09-23 16:33:04 +00:00
|
|
|
def revert_newtxmath(document):
|
|
|
|
" Revert native newtxmath definitions to LaTeX "
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_math", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
val = get_value(document.header, "\\font_math", i)
|
|
|
|
mathfont_dict = {
|
|
|
|
"libertine-ntxm": "\\usepackage[libertine]{newtxmath}",
|
|
|
|
"minion-ntxm": "\\usepackage[minion]{newtxmath}",
|
|
|
|
"newtxmath": "\\usepackage{newtxmath}",
|
|
|
|
}
|
2015-03-11 12:04:46 +00:00
|
|
|
if val in list(mathfont_dict.keys()):
|
2012-09-23 16:33:04 +00:00
|
|
|
add_to_preamble(document, mathfont_dict[val])
|
|
|
|
document.header[i] = "\\font_math auto"
|
|
|
|
|
|
|
|
|
2012-09-25 09:07:33 +00:00
|
|
|
def revert_biolinum(document):
|
|
|
|
" Revert native biolinum font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_sans biolinum", 0)
|
|
|
|
if i != -1:
|
|
|
|
osf = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
osf = True
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
if not osf:
|
|
|
|
preamble += "[lf]"
|
|
|
|
preamble += "{biolinum-type1}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_sans default"
|
|
|
|
|
|
|
|
|
2012-10-21 16:55:24 +00:00
|
|
|
def revert_uop(document):
|
|
|
|
" Revert native URW Classico (Optima) font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_sans uop", 0)
|
|
|
|
if i != -1:
|
|
|
|
preamble = "\\renewcommand{\\sfdefault}{uop}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_sans default"
|
|
|
|
|
|
|
|
|
2012-11-19 13:21:02 +00:00
|
|
|
def convert_latexargs(document):
|
|
|
|
" Convert InsetArgument to new syntax "
|
|
|
|
|
2012-12-01 14:28:10 +00:00
|
|
|
if find_token(document.body, "\\begin_inset Argument", 0) == -1:
|
|
|
|
# nothing to do.
|
|
|
|
return
|
|
|
|
|
|
|
|
# A list of layouts (document classes) with only optional or no arguments.
|
|
|
|
# These can be safely converted to the new syntax
|
|
|
|
# (I took the liberty to add some of my personal layouts/modules here; JSP)
|
|
|
|
safe_layouts = ["aa", "aapaper", "aastex", "achemso", "acmsiggraph", "AEA",
|
|
|
|
"agu-dtd", "agums", "agutex", "amsart", "amsbook", "apa",
|
|
|
|
"arab-article", "armenian-article", "article-beamer", "article",
|
|
|
|
"beamer", "book", "broadway", "chess", "cl2emult", "ctex-article",
|
|
|
|
"ctex-book", "ctex-report", "dinbrief", "docbook-book", "docbook-chapter",
|
|
|
|
"docbook", "docbook-section", "doublecol-new", "dtk", "ectaart", "egs",
|
|
|
|
"elsarticle", "elsart", "entcs", "europecv", "extarticle", "extbook",
|
|
|
|
"extletter", "extreport", "foils", "frletter", "g-brief2", "g-brief",
|
|
|
|
"heb-article", "heb-letter", "hollywood", "IEEEtran", "ijmpc", "ijmpd",
|
|
|
|
"iopart", "isprs", "jarticle", "jasatex", "jbook", "jgrga", "jreport",
|
|
|
|
"jsarticle", "jsbeamer", "jsbook", "jss", "kluwer", "latex8", "letter", "lettre",
|
|
|
|
"literate-article", "literate-book", "literate-report", "llncs", "ltugboat",
|
|
|
|
"memoir", "moderncv", "mwart", "mwbk", "mwrep", "paper", "powerdot",
|
|
|
|
"recipebook", "report", "revtex4", "revtex", "scrartcl", "scrarticle-beamer",
|
|
|
|
"scrbook", "scrlettr", "scrlttr2", "scrreprt", "seminar", "siamltex",
|
|
|
|
"sigplanconf", "simplecv", "singlecol", "singlecol-new", "slides", "spie",
|
|
|
|
"svglobal3", "svglobal", "svjog", "svmono", "svmult", "svprobth", "tarticle",
|
|
|
|
"tbook", "treport", "tufte-book", "tufte-handout"]
|
|
|
|
# A list of "safe" modules, same as above
|
2014-07-27 08:23:17 +00:00
|
|
|
safe_modules = ["biblatex", "beameraddons", "beamer-resenumerate", "beamersession", "braille",
|
|
|
|
"customHeadersFooters", "endnotes", "enumitem", "eqs-within-sections", "figs-within-sections",
|
|
|
|
"fix-cm", "fixltx2e", "foottoend", "hanging", "jscharstyles", "knitr", "lilypond",
|
2012-12-01 14:28:10 +00:00
|
|
|
"linguistics", "linguisticx", "logicalmkup", "minimalistic", "nomindex", "noweb",
|
|
|
|
"pdfcomment", "sweave", "tabs-within-sections", "theorems-ams-bytype",
|
|
|
|
"theorems-ams-extended-bytype", "theorems-ams-extended", "theorems-ams", "theorems-bytype",
|
|
|
|
"theorems-chap-bytype", "theorems-chap", "theorems-named", "theorems-sec-bytype",
|
|
|
|
"theorems-sec", "theorems-starred", "theorems-std", "todonotes"]
|
|
|
|
# Modules we need to take care of
|
|
|
|
caveat_modules = ["initials"]
|
|
|
|
# information about the relevant styles in caveat_modules (number of opt and req args)
|
|
|
|
# use this if we get more caveat_modules. For now, use hard coding (see below).
|
|
|
|
# initials = [{'Layout' : 'Initial', 'opt' : 1, 'req' : 1}]
|
|
|
|
|
|
|
|
# Is this a known safe layout?
|
|
|
|
safe_layout = document.textclass in safe_layouts
|
|
|
|
if not safe_layout:
|
|
|
|
document.warning("Lyx2lyx knows nothing about textclass '%s'. "
|
|
|
|
"Please check if short title insets have been converted correctly."
|
|
|
|
% document.textclass)
|
|
|
|
# Do we use unsafe or unknown modules
|
|
|
|
mods = document.get_module_list()
|
|
|
|
unknown_modules = False
|
|
|
|
used_caveat_modules = list()
|
|
|
|
for mod in mods:
|
|
|
|
if mod in safe_modules:
|
|
|
|
continue
|
|
|
|
if mod in caveat_modules:
|
|
|
|
used_caveat_modules.append(mod)
|
|
|
|
continue
|
|
|
|
unknown_modules = True
|
|
|
|
document.warning("Lyx2lyx knows nothing about module '%s'. "
|
|
|
|
"Please check if short title insets have been converted correctly."
|
|
|
|
% mod)
|
|
|
|
|
2012-11-19 13:21:02 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2012-12-01 14:28:10 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not safe_layout or unknown_modules:
|
|
|
|
# We cannot do more here since we have no access to this layout.
|
|
|
|
# InsetArgument itself will do the real work
|
|
|
|
# (see InsetArgument::updateBuffer())
|
|
|
|
document.body[i] = "\\begin_inset Argument 999"
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-01 14:28:10 +00:00
|
|
|
continue
|
|
|
|
|
2012-12-03 07:42:26 +00:00
|
|
|
# Find containing paragraph layout
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-01 14:28:10 +00:00
|
|
|
continue
|
2012-12-03 07:42:26 +00:00
|
|
|
parbeg = parent[1]
|
|
|
|
parend = parent[2]
|
2012-12-01 14:28:10 +00:00
|
|
|
allowed_opts = -1
|
|
|
|
first_req = -1
|
|
|
|
if len(used_caveat_modules) > 0:
|
|
|
|
# We know for now that this must be the initials module with the Initial layout
|
|
|
|
# If we get more such modules, we need some automating.
|
2012-12-03 07:42:26 +00:00
|
|
|
if parent[0] == "Initial":
|
2012-12-01 14:28:10 +00:00
|
|
|
# Layout has 1 opt and 1 req arg.
|
|
|
|
# Count the actual arguments
|
|
|
|
actualargs = 0
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if document.body[p] == "\\begin_inset Argument":
|
|
|
|
actualargs += 1
|
|
|
|
if actualargs == 1:
|
|
|
|
allowed_opts = 0
|
|
|
|
first_req = 2
|
|
|
|
# Collect all arguments in this paragraph
|
|
|
|
argnr = 0
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if document.body[p] == "\\begin_inset Argument":
|
|
|
|
argnr += 1
|
|
|
|
if allowed_opts != -1:
|
|
|
|
# We have less arguments than opt + required.
|
|
|
|
# required must take precedence.
|
|
|
|
if argnr > allowed_opts and argnr < first_req:
|
|
|
|
argnr = first_req
|
|
|
|
document.body[p] = "\\begin_inset Argument %d" % argnr
|
2014-05-29 09:05:34 +00:00
|
|
|
i = parend + 1
|
2012-11-19 13:21:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_latexargs(document):
|
|
|
|
" Revert InsetArgument to old syntax "
|
|
|
|
|
2012-11-23 09:29:29 +00:00
|
|
|
i = 0
|
2012-11-30 11:57:55 +00:00
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\d+)$')
|
|
|
|
args = dict()
|
2012-11-23 09:29:29 +00:00
|
|
|
while True:
|
2012-11-30 11:57:55 +00:00
|
|
|
# Search for Argument insets
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
2012-12-02 15:47:27 +00:00
|
|
|
return
|
2012-11-30 11:57:55 +00:00
|
|
|
m = rx.match(document.body[i])
|
|
|
|
if not m:
|
|
|
|
# No ID: inset already reverted
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-11-30 11:57:55 +00:00
|
|
|
continue
|
2012-12-03 07:42:26 +00:00
|
|
|
# Find containing paragraph layout
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-11-30 11:57:55 +00:00
|
|
|
continue
|
2012-12-03 07:42:26 +00:00
|
|
|
parbeg = parent[1]
|
|
|
|
parend = parent[2]
|
2014-04-25 19:35:51 +00:00
|
|
|
# Do not set realparbeg to parent[3], since this does not work if we
|
|
|
|
# have another inset (e.g. label or index) before the first argument
|
|
|
|
# inset (this is the case in the user guide of LyX 2.0.8)
|
|
|
|
realparbeg = -1
|
|
|
|
# Collect all arguments in this paragraph
|
2012-11-30 11:57:55 +00:00
|
|
|
realparend = parend
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
2014-04-25 19:35:51 +00:00
|
|
|
if realparbeg < 0:
|
|
|
|
# This is the first argument inset
|
|
|
|
realparbeg = p
|
2012-11-30 11:57:55 +00:00
|
|
|
val = int(m.group(1))
|
|
|
|
j = find_end_of_inset(document.body, p)
|
|
|
|
# Revert to old syntax
|
|
|
|
document.body[p] = "\\begin_inset Argument"
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of Argument inset")
|
2012-11-30 11:57:55 +00:00
|
|
|
continue
|
|
|
|
if val > 0:
|
|
|
|
args[val] = document.body[p : j + 1]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : j + 1])
|
|
|
|
# Remove arg inset at this position
|
|
|
|
del document.body[p : j + 1]
|
|
|
|
if p >= realparend:
|
|
|
|
break
|
2014-04-25 19:35:51 +00:00
|
|
|
if realparbeg < 0:
|
|
|
|
# No argument inset found
|
|
|
|
realparbeg = parent[3]
|
2012-11-30 11:57:55 +00:00
|
|
|
# Now sort the arg insets
|
2014-04-25 19:35:51 +00:00
|
|
|
subst = []
|
2012-11-30 11:57:55 +00:00
|
|
|
for f in sorted(args):
|
|
|
|
subst += args[f]
|
|
|
|
del args[f]
|
|
|
|
# Insert the sorted arg insets at paragraph begin
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-11-30 11:57:55 +00:00
|
|
|
|
2012-12-09 10:40:14 +00:00
|
|
|
i = realparbeg + 1 + len(subst)
|
2012-11-19 13:21:02 +00:00
|
|
|
|
|
|
|
|
2013-01-27 04:25:00 +00:00
|
|
|
def revert_IEEEtran(document):
|
|
|
|
'''
|
|
|
|
Reverts InsetArgument of
|
|
|
|
Page headings
|
|
|
|
Biography
|
|
|
|
Biography without photo
|
|
|
|
to TeX-code
|
|
|
|
'''
|
|
|
|
if document.textclass == "IEEEtran":
|
|
|
|
i = 0
|
2013-02-07 01:01:57 +00:00
|
|
|
i2 = 0
|
2013-01-27 04:25:00 +00:00
|
|
|
j = 0
|
|
|
|
k = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Page headings", i)
|
|
|
|
if i != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, i, 0, 1, 1, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-02-07 01:01:57 +00:00
|
|
|
if i2 != -1:
|
|
|
|
i2 = find_token(document.body, "\\begin_inset Flex Paragraph Start", i2)
|
|
|
|
if i2 != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, i2, 0, 1, 1, False, False)
|
2013-02-07 01:01:57 +00:00
|
|
|
i2 = i2 + 1
|
2013-01-27 04:25:00 +00:00
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Biography without photo", j)
|
|
|
|
if j != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, j, 0, 1, 1, True, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
j += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
if k != -1:
|
|
|
|
k = find_token(document.body, "\\begin_layout Biography", k)
|
|
|
|
kA = find_token(document.body, "\\begin_layout Biography without photo", k)
|
|
|
|
if k == kA and k != -1:
|
2013-07-24 14:59:51 +00:00
|
|
|
k += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
continue
|
|
|
|
if k != -1:
|
|
|
|
# start with the second argument, therefore 2
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, k, 0, 2, 2, True, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
k += 1
|
2013-02-07 01:01:57 +00:00
|
|
|
if i == -1 and i2 == -1 and j == -1 and k == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def revert_IEEEtran_2(document):
|
|
|
|
'''
|
|
|
|
Reverts Flex Paragraph Start to TeX-code
|
|
|
|
'''
|
|
|
|
if document.textclass == "IEEEtran":
|
|
|
|
begin = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
begin = find_token(document.body, "\\begin_inset Flex Paragraph Start", begin)
|
2013-02-07 01:01:57 +00:00
|
|
|
if begin == -1:
|
2013-01-27 04:25:00 +00:00
|
|
|
return
|
2013-07-24 13:56:02 +00:00
|
|
|
end1 = find_end_of_inset(document.body, begin)
|
|
|
|
document.body[end1 - 2 : end1 + 1] = put_cmd_in_ert("}")
|
|
|
|
document.body[begin : begin + 4] = put_cmd_in_ert("\\IEEEPARstart{")
|
|
|
|
begin = begin + 5
|
2013-01-27 04:25:00 +00:00
|
|
|
|
|
|
|
|
2012-11-26 01:50:53 +00:00
|
|
|
def convert_IEEEtran(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
'''
|
|
|
|
Converts ERT of
|
|
|
|
Page headings
|
|
|
|
Biography
|
|
|
|
Biography without photo
|
|
|
|
to InsetArgument
|
|
|
|
'''
|
|
|
|
if document.textclass == "IEEEtran":
|
2012-11-26 01:50:53 +00:00
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
k = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Page headings", i)
|
|
|
|
if i != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-11-26 01:50:53 +00:00
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Biography without photo", j)
|
|
|
|
if j != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, j, 1, 1, False, True, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
j += 1
|
2012-11-26 01:50:53 +00:00
|
|
|
if k != -1:
|
|
|
|
# assure that we don't handle Biography Biography without photo
|
|
|
|
k = find_token(document.body, "\\begin_layout Biography", k)
|
|
|
|
kA = find_token(document.body, "\\begin_layout Biography without photo", k - 1)
|
|
|
|
if k == kA and k != -1:
|
2013-07-24 14:59:51 +00:00
|
|
|
k += 1
|
2012-11-26 01:50:53 +00:00
|
|
|
continue
|
|
|
|
if k != -1:
|
|
|
|
# the argument we want to convert is the second one
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, k, 2, 2, False, True, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
k += 1
|
2012-11-26 01:50:53 +00:00
|
|
|
if i == -1 and j == -1 and k == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2012-11-26 02:39:40 +00:00
|
|
|
def revert_AASTeX(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
" Reverts InsetArgument of Altaffilation to TeX-code "
|
|
|
|
if document.textclass == "aastex":
|
2012-11-26 02:39:40 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout Altaffilation", i)
|
2012-11-26 02:39:40 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-07-24 13:56:02 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, i, 0, 1, 1, False, False)
|
2013-07-24 14:56:33 +00:00
|
|
|
i += 1
|
2012-11-26 02:39:40 +00:00
|
|
|
|
|
|
|
|
|
|
|
def convert_AASTeX(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
" Converts ERT of Altaffilation to InsetArgument "
|
|
|
|
if document.textclass == "aastex":
|
2012-11-26 02:39:40 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout Altaffilation", i)
|
2012-11-26 02:39:40 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False, False)
|
2013-07-24 13:56:02 +00:00
|
|
|
i += 1
|
2012-11-26 02:39:40 +00:00
|
|
|
|
|
|
|
|
2012-11-26 03:21:23 +00:00
|
|
|
def revert_AGUTeX(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
" Reverts InsetArgument of Author affiliation to TeX-code "
|
|
|
|
if document.textclass == "agutex":
|
2012-11-26 03:21:23 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout Author affiliation", i)
|
2012-11-26 03:21:23 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-07-24 13:56:02 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, i, 0, 1, 1, False, False)
|
|
|
|
i += 1
|
2012-11-26 03:21:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def convert_AGUTeX(document):
|
2012-11-26 04:19:47 +00:00
|
|
|
" Converts ERT of Author affiliation to InsetArgument "
|
|
|
|
if document.textclass == "agutex":
|
2012-11-26 03:21:23 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout Author affiliation", i)
|
2012-11-26 03:21:23 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False, False)
|
2013-07-24 13:56:02 +00:00
|
|
|
i += 1
|
2012-11-26 03:21:23 +00:00
|
|
|
|
|
|
|
|
2012-11-26 04:19:47 +00:00
|
|
|
def revert_IJMP(document):
|
|
|
|
" Reverts InsetArgument of MarkBoth to TeX-code "
|
|
|
|
if document.textclass == "ijmpc" or document.textclass == "ijmpd":
|
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout MarkBoth", i)
|
2012-11-26 04:19:47 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-07-24 13:56:02 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, i, 0, 1, 1, False, False)
|
|
|
|
i += 1
|
2012-11-26 04:19:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
def convert_IJMP(document):
|
|
|
|
" Converts ERT of MarkBoth to InsetArgument "
|
|
|
|
if document.textclass == "ijmpc" or document.textclass == "ijmpd":
|
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout MarkBoth", i)
|
2012-11-26 04:19:47 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False, False)
|
2013-07-24 13:56:02 +00:00
|
|
|
i += 1
|
2012-11-26 04:19:47 +00:00
|
|
|
|
2012-11-30 00:54:57 +00:00
|
|
|
|
|
|
|
def revert_SIGPLAN(document):
|
2013-01-27 04:25:00 +00:00
|
|
|
" Reverts InsetArguments of SIGPLAN to TeX-code "
|
2012-11-30 00:54:57 +00:00
|
|
|
if document.textclass == "sigplanconf":
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Conference", i)
|
|
|
|
if i != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, i, 0, 1, 1, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-11-30 00:54:57 +00:00
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Author", j)
|
|
|
|
if j != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, j, 0, 1, 2, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
j += 1
|
2012-11-30 00:54:57 +00:00
|
|
|
if i == -1 and j == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def convert_SIGPLAN(document):
|
2013-01-27 04:25:00 +00:00
|
|
|
" Converts ERT of SIGPLAN to InsetArgument "
|
2012-11-30 00:54:57 +00:00
|
|
|
if document.textclass == "sigplanconf":
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Conference", i)
|
|
|
|
if i != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-11-30 00:54:57 +00:00
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Author", j)
|
|
|
|
if j != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, j, 1, 2, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
j += 1
|
2012-11-30 00:54:57 +00:00
|
|
|
if i == -1 and j == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2012-12-02 14:58:14 +00:00
|
|
|
def revert_SIGGRAPH(document):
|
|
|
|
" Reverts InsetArgument of Flex CRcat to TeX-code "
|
|
|
|
if document.textclass == "acmsiggraph":
|
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Flex CRcat", i)
|
2012-12-02 14:58:14 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-07-24 13:56:02 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, i, 0, 1, 3, False, False)
|
|
|
|
i += 1
|
2012-12-02 14:58:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
def convert_SIGGRAPH(document):
|
|
|
|
" Converts ERT of Flex CRcat to InsetArgument "
|
|
|
|
if document.textclass == "acmsiggraph":
|
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Flex CRcat", i)
|
2012-12-02 14:58:14 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 3, True, False, False)
|
2013-07-24 13:56:02 +00:00
|
|
|
i += 1
|
2012-12-02 14:58:14 +00:00
|
|
|
|
|
|
|
|
2012-12-03 23:55:39 +00:00
|
|
|
def revert_EuropeCV(document):
|
2013-01-27 04:25:00 +00:00
|
|
|
" Reverts InsetArguments of europeCV to TeX-code "
|
2012-12-03 23:55:39 +00:00
|
|
|
if document.textclass == "europecv":
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
k = 0
|
|
|
|
m = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Item", i)
|
|
|
|
if i != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, i, 0, 2, 2, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-03 23:55:39 +00:00
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout BulletedItem", j)
|
|
|
|
if j != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, j, 0, 2, 2, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
j += 1
|
2012-12-03 23:55:39 +00:00
|
|
|
if k != -1:
|
|
|
|
k = find_token(document.body, "\\begin_layout Language", k)
|
|
|
|
if k != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, k, 0, 2, 6, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
k += 1
|
2012-12-03 23:55:39 +00:00
|
|
|
if m != -1:
|
|
|
|
m = find_token(document.body, "\\begin_layout LastLanguage", m)
|
|
|
|
if m != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, m, 0, 2, 6, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
m += 1
|
2012-12-03 23:55:39 +00:00
|
|
|
if i == -1 and j == -1 and k == -1 and m == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def convert_EuropeCV(document):
|
2013-01-27 04:25:00 +00:00
|
|
|
" Converts ERT of europeCV to InsetArgument "
|
2012-12-03 23:55:39 +00:00
|
|
|
if document.textclass == "europecv":
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
k = 0
|
|
|
|
m = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Item", i)
|
|
|
|
if i != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 2, 2, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-03 23:55:39 +00:00
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout BulletedItem", j)
|
|
|
|
if j != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, j, 2, 2, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
j += 1
|
2012-12-03 23:55:39 +00:00
|
|
|
if k != -1:
|
|
|
|
k = find_token(document.body, "\\begin_layout Language", k)
|
|
|
|
if k != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, k, 2, 6, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
k += 1
|
2012-12-03 23:55:39 +00:00
|
|
|
if m != -1:
|
|
|
|
m = find_token(document.body, "\\begin_layout LastLanguage", m)
|
|
|
|
if m != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, m, 2, 6, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
m += 1
|
2012-12-03 23:55:39 +00:00
|
|
|
if i == -1 and j == -1 and k == -1 and m == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2013-01-27 04:25:00 +00:00
|
|
|
def revert_ModernCV(document):
|
|
|
|
" Reverts InsetArguments of modernCV to TeX-code "
|
|
|
|
if document.textclass == "moderncv":
|
|
|
|
j = 0
|
|
|
|
k = 0
|
|
|
|
m = 0
|
|
|
|
o = 0
|
2013-05-28 23:11:14 +00:00
|
|
|
p = 0
|
2013-01-27 04:25:00 +00:00
|
|
|
while True:
|
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Entry", j)
|
|
|
|
if j != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, j, 0, 1, 5, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
j += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
if k != -1:
|
|
|
|
k = find_token(document.body, "\\begin_layout Item", k)
|
|
|
|
if k != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, k, 0, 1, 1, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
k += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
if m != -1:
|
|
|
|
m = find_token(document.body, "\\begin_layout ItemWithComment", m)
|
|
|
|
if m != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, m, 0, 1, 2, False, False)
|
2013-01-27 04:25:00 +00:00
|
|
|
document.body[m] = document.body[m].replace("\\begin_layout ItemWithComment", "\\begin_layout Language")
|
2013-07-24 14:59:51 +00:00
|
|
|
m += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
if o != -1:
|
|
|
|
o = find_token(document.body, "\\begin_layout DoubleItem", o)
|
|
|
|
if o != -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
revert_Argument_to_TeX_brace(document, o, 0, 1, 3, False, False)
|
2013-01-27 04:25:00 +00:00
|
|
|
document.body[o] = document.body[o].replace("\\begin_layout DoubleItem", "\\begin_layout Computer")
|
|
|
|
o = o + 1
|
2013-05-28 23:11:14 +00:00
|
|
|
if p != -1:
|
|
|
|
p = find_token(document.body, "\\begin_layout Social", p)
|
|
|
|
if p != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, p, 0, 1, 1, False, True)
|
|
|
|
p = p + 1
|
|
|
|
if j == -1 and k == -1 and m == -1 and o == -1 and p == -1:
|
2013-02-16 00:02:32 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def revert_ModernCV_2(document):
|
|
|
|
" Reverts the Flex:Column inset of modernCV to TeX-code "
|
|
|
|
if document.textclass == "moderncv":
|
|
|
|
flex = 0
|
|
|
|
flexEnd = -1
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
flex = find_token(document.body, "\\begin_inset Flex Column", flex)
|
2013-02-16 00:02:32 +00:00
|
|
|
if flex == -1:
|
|
|
|
return flexEnd
|
2013-07-24 13:56:02 +00:00
|
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
|
|
wasOpt = revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, False, True)
|
|
|
|
revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, False, False)
|
|
|
|
flexEnd = find_end_of_inset(document.body, flex)
|
|
|
|
if wasOpt == True:
|
|
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\cvcolumn")
|
|
|
|
else:
|
|
|
|
document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\cvcolumn{")
|
|
|
|
document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("}")
|
|
|
|
flex += 1
|
2013-02-16 00:02:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_ModernCV_3(document):
|
|
|
|
" Reverts the Column style of modernCV to TeX-code "
|
|
|
|
if document.textclass == "moderncv":
|
|
|
|
# revert the layouts
|
|
|
|
revert_ModernCV(document)
|
|
|
|
p = 0
|
|
|
|
# get the position of the end of the last column inset
|
|
|
|
LastFlexEnd = revert_ModernCV_2(document)
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
p = find_token(document.body, "\\begin_layout Columns", p)
|
2013-02-16 00:02:32 +00:00
|
|
|
if p == -1:
|
2013-01-27 04:25:00 +00:00
|
|
|
return
|
2013-07-24 13:56:02 +00:00
|
|
|
pEnd = find_end_of_layout(document.body, p)
|
|
|
|
document.body[p] = document.body[p].replace("\\begin_layout Columns", "\\begin_layout Standard")
|
|
|
|
if LastFlexEnd != -1:
|
|
|
|
document.body[p + 1 : p + 1] = put_cmd_in_ert("\\begin{cvcolumns}")
|
|
|
|
document.body[LastFlexEnd + 24 : LastFlexEnd + 24] = put_cmd_in_ert("\\end{cvcolumns}")
|
|
|
|
p += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
|
|
|
|
|
2013-05-28 23:11:14 +00:00
|
|
|
def revert_ModernCV_4(document):
|
|
|
|
" Reverts the style Social to TeX-code "
|
|
|
|
if document.textclass == "moderncv":
|
|
|
|
# revert the layouts
|
|
|
|
revert_ModernCV(document)
|
|
|
|
p = 0
|
|
|
|
while True:
|
2013-07-24 13:56:02 +00:00
|
|
|
p = find_token(document.body, "\\begin_layout Social", p)
|
2013-05-28 23:11:14 +00:00
|
|
|
if p == -1:
|
|
|
|
return
|
2013-07-24 13:56:02 +00:00
|
|
|
pEnd = find_end_of_layout(document.body, p)
|
|
|
|
document.body[p] = document.body[p].replace("\\begin_layout Social", "\\begin_layout Standard")
|
|
|
|
document.body[p + 1 : p + 1] = put_cmd_in_ert("\\social")
|
|
|
|
hasOpt = find_token(document.body, "[", p + 9)
|
|
|
|
if hasOpt < p + 18:
|
|
|
|
document.body[p + 30 : p + 30] = put_cmd_in_ert("{")
|
|
|
|
document.body[p + 41 : p + 41] = put_cmd_in_ert("}")
|
|
|
|
else:
|
|
|
|
document.body[p + 11 : p + 11] = put_cmd_in_ert("{")
|
|
|
|
document.body[p + 21 : p + 21] = put_cmd_in_ert("}")
|
|
|
|
p += 1
|
2013-05-28 23:11:14 +00:00
|
|
|
|
|
|
|
|
2013-01-27 04:25:00 +00:00
|
|
|
def convert_ModernCV(document):
|
|
|
|
" Converts ERT of modernCV to InsetArgument "
|
|
|
|
if document.textclass == "moderncv":
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
k = 0
|
|
|
|
m = 0
|
|
|
|
o = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout DoubleItem", i)
|
|
|
|
if i != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 1, 1, False, False, False)
|
2013-01-27 04:25:00 +00:00
|
|
|
document.body[o] = document.body[o].replace("\\begin_layout DoubleItem", "\\begin_layout DoubleListItem")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_layout Entry", j)
|
|
|
|
if j != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, j, 1, 5, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
j += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
if k != -1:
|
|
|
|
k = find_token(document.body, "\\begin_layout Item", k)
|
|
|
|
if k != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, k, 1, 1, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
k += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
if m != -1:
|
|
|
|
m = find_token(document.body, "\\begin_layout Language", m)
|
|
|
|
if m != -1:
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, m, 1, 2, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
m += 1
|
2013-01-27 04:25:00 +00:00
|
|
|
if i == -1 and j == -1 and k == -1 and m == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2013-01-22 22:27:18 +00:00
|
|
|
def revert_Initials(document):
|
|
|
|
" Reverts InsetArgument of Initial to TeX-code "
|
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:17:06 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout Initial", i)
|
2013-01-22 22:27:18 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-07-24 13:17:06 +00:00
|
|
|
# first arg (optional) and second arg (first mandatory) are supported in LyX 2.0.x
|
|
|
|
revert_Argument_to_TeX_brace(document, i, 0, 3, 3, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-01-22 22:27:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
def convert_Initials(document):
|
|
|
|
" Converts ERT of Initial to InsetArgument "
|
|
|
|
i = 0
|
|
|
|
while True:
|
2013-07-24 13:17:06 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout Initial", i)
|
2013-01-22 22:27:18 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2013-09-03 17:02:28 +00:00
|
|
|
convert_TeX_brace_to_Argument(document, i, 3, 3, False, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-01-22 22:27:18 +00:00
|
|
|
|
|
|
|
|
2012-11-28 11:54:34 +00:00
|
|
|
def revert_literate(document):
|
|
|
|
" Revert Literate document to old format "
|
|
|
|
if del_token(document.header, "noweb", 0):
|
|
|
|
document.textclass = "literate-" + document.textclass
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Chunk", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
document.body[i] = "\\begin_layout Scrap"
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-11-28 11:54:34 +00:00
|
|
|
|
2012-11-30 00:54:57 +00:00
|
|
|
|
2012-11-28 11:54:34 +00:00
|
|
|
def convert_literate(document):
|
|
|
|
" Convert Literate document to new format"
|
|
|
|
i = find_token(document.header, "\\textclass", 0)
|
|
|
|
if (i != -1) and "literate-" in document.header[i]:
|
|
|
|
document.textclass = document.header[i].replace("\\textclass literate-", "")
|
|
|
|
j = find_token(document.header, "\\begin_modules", 0)
|
|
|
|
if (j != -1):
|
|
|
|
document.header.insert(j + 1, "noweb")
|
|
|
|
else:
|
|
|
|
document.header.insert(i + 1, "\\end_modules")
|
|
|
|
document.header.insert(i + 1, "noweb")
|
|
|
|
document.header.insert(i + 1, "\\begin_modules")
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Scrap", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
document.body[i] = "\\begin_layout Chunk"
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-11-26 04:19:47 +00:00
|
|
|
|
2012-11-30 00:54:57 +00:00
|
|
|
|
2012-11-29 14:34:20 +00:00
|
|
|
def revert_itemargs(document):
|
|
|
|
" Reverts \\item arguments to TeX-code "
|
2012-12-02 14:19:40 +00:00
|
|
|
i = 0
|
2012-11-29 14:34:20 +00:00
|
|
|
while True:
|
2012-12-02 14:19:40 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Argument item:", i)
|
2012-11-29 14:34:20 +00:00
|
|
|
if i == -1:
|
2012-12-02 14:19:40 +00:00
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
2012-12-03 07:42:26 +00:00
|
|
|
# Find containing paragraph layout
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-03 07:42:26 +00:00
|
|
|
continue
|
2012-12-09 10:40:14 +00:00
|
|
|
parbeg = parent[3]
|
2012-11-29 14:34:20 +00:00
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
2012-12-02 15:47:27 +00:00
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
2012-11-29 14:34:20 +00:00
|
|
|
del document.body[i:j+1]
|
|
|
|
subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[parbeg : parbeg] = subst
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-11-29 14:34:20 +00:00
|
|
|
|
2012-11-30 00:54:57 +00:00
|
|
|
|
2012-12-04 11:55:47 +00:00
|
|
|
def revert_garamondx_newtxmath(document):
|
|
|
|
" Revert native garamond newtxmath definition to LaTeX "
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_math", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
val = get_value(document.header, "\\font_math", i)
|
|
|
|
if val == "garamondx-ntxm":
|
|
|
|
add_to_preamble(document, "\\usepackage[garamondx]{newtxmath}")
|
|
|
|
document.header[i] = "\\font_math auto"
|
|
|
|
|
|
|
|
|
2012-12-04 11:14:26 +00:00
|
|
|
def revert_garamondx(document):
|
|
|
|
" Revert native garamond font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
i = find_token(document.header, "\\font_roman garamondx", 0)
|
|
|
|
if i != -1:
|
|
|
|
osf = False
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
osf = True
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
if osf:
|
|
|
|
preamble += "[osfI]"
|
|
|
|
preamble += "{garamondx}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_roman default"
|
|
|
|
|
|
|
|
|
2012-12-09 10:04:56 +00:00
|
|
|
def convert_beamerargs(document):
|
|
|
|
" Converts beamer arguments to new layout "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
shifted_layouts = ["Part", "Section", "Subsection", "Subsubsection"]
|
|
|
|
list_layouts = ["Itemize", "Enumerate", "Description"]
|
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\d+)$')
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Find containing paragraph layout
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-09 10:04:56 +00:00
|
|
|
continue
|
|
|
|
parbeg = parent[1]
|
|
|
|
parend = parent[2]
|
|
|
|
layoutname = parent[0]
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if layoutname in shifted_layouts:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = int(m.group(1))
|
|
|
|
argnr += 1
|
|
|
|
document.body[p] = "\\begin_inset Argument %d" % argnr
|
|
|
|
if layoutname == "AgainFrame":
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
document.body[p] = "\\begin_inset Argument 3"
|
|
|
|
if document.body[p + 4] == "\\begin_inset ERT":
|
|
|
|
if document.body[p + 9].startswith("<"):
|
|
|
|
# This is an overlay specification
|
|
|
|
# strip off the <
|
|
|
|
document.body[p + 9] = document.body[p + 9][1:]
|
|
|
|
if document.body[p + 9].endswith(">"):
|
|
|
|
# strip off the >
|
|
|
|
document.body[p + 9] = document.body[p + 9][:-1]
|
|
|
|
# Shift this one
|
|
|
|
document.body[p] = "\\begin_inset Argument 2"
|
|
|
|
if layoutname in list_layouts:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
if m.group(1) == "1":
|
|
|
|
if document.body[p + 4] == "\\begin_inset ERT":
|
|
|
|
if document.body[p + 9].startswith("<"):
|
|
|
|
# This is an overlay specification
|
|
|
|
# strip off the <
|
|
|
|
document.body[p + 9] = document.body[p + 9][1:]
|
|
|
|
if document.body[p + 9].endswith(">"):
|
|
|
|
# strip off the >
|
|
|
|
document.body[p + 9] = document.body[p + 9][:-1]
|
2013-12-13 13:55:09 +00:00
|
|
|
elif document.body[p + 4].startswith("<"):
|
|
|
|
# This is an overlay specification (without ERT)
|
|
|
|
# strip off the <
|
|
|
|
document.body[p + 4] = document.body[p + 4][1:]
|
|
|
|
if document.body[p + 4].endswith(">"):
|
|
|
|
# strip off the >
|
|
|
|
document.body[p + 4] = document.body[p + 4][:-1]
|
2012-12-09 10:04:56 +00:00
|
|
|
elif layoutname != "Itemize":
|
|
|
|
# Shift this one
|
|
|
|
document.body[p] = "\\begin_inset Argument 2"
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-09 10:04:56 +00:00
|
|
|
|
|
|
|
|
2013-12-06 09:53:10 +00:00
|
|
|
#
|
|
|
|
# Helper function for the frame conversion routines
|
|
|
|
#
|
2013-12-06 10:57:33 +00:00
|
|
|
# FIXME: This method currently requires the arguments to be either
|
|
|
|
# * In one (whole) ERT each: <ERT>[<arg1>]</ERT><ERT><arg2></ERT><ERT>[arg3]</ERT>
|
|
|
|
# * Altogether in one whole ERT: <ERT>[<arg1>]<arg2>[arg3]</ERT>
|
|
|
|
# If individual arguments mix ERT and non-ERT or are splitted
|
|
|
|
# over several ERTs, the parsing fails.
|
2013-12-06 09:53:10 +00:00
|
|
|
def convert_beamerframeargs(document, i, parbeg):
|
|
|
|
ertend = i
|
2013-12-06 10:57:33 +00:00
|
|
|
while True:
|
|
|
|
if document.body[parbeg] != "\\begin_inset ERT":
|
|
|
|
return ertend
|
2013-12-06 09:53:10 +00:00
|
|
|
ertend = find_end_of_inset(document.body, parbeg)
|
|
|
|
if ertend == -1:
|
|
|
|
document.warning("Malformed LyX document: missing ERT \\end_inset")
|
|
|
|
return ertend
|
|
|
|
ertcont = parbeg + 5
|
|
|
|
if document.body[ertcont].startswith("[<"):
|
|
|
|
# This is a default overlay specification
|
|
|
|
# strip off the [<
|
|
|
|
document.body[ertcont] = document.body[ertcont][2:]
|
|
|
|
if document.body[ertcont].endswith(">]"):
|
|
|
|
# strip off the >]
|
|
|
|
document.body[ertcont] = document.body[ertcont][:-2]
|
|
|
|
elif document.body[ertcont].endswith("]"):
|
|
|
|
# divide the args
|
|
|
|
tok = document.body[ertcont].find('>][')
|
|
|
|
if tok != -1:
|
|
|
|
subst = [document.body[ertcont][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 3',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcont][tok + 3:-1]]
|
|
|
|
document.body[ertcont : ertcont + 1] = subst
|
|
|
|
ertend += 11
|
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 2"
|
|
|
|
elif document.body[ertcont].startswith("<"):
|
|
|
|
# This is an overlay specification
|
|
|
|
# strip off the <
|
|
|
|
document.body[ertcont] = document.body[ertcont][1:]
|
|
|
|
if document.body[ertcont].endswith(">"):
|
|
|
|
# strip off the >
|
|
|
|
document.body[ertcont] = document.body[ertcont][:-1]
|
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 1"
|
|
|
|
elif document.body[ertcont].endswith(">]"):
|
|
|
|
# divide the args
|
|
|
|
tok = document.body[ertcont].find('>[<')
|
|
|
|
if tok != -1:
|
|
|
|
document.body[ertcont : ertcont + 1] = [document.body[ertcont][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcont][tok + 3:-2]]
|
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 1"
|
|
|
|
ertend += 11
|
|
|
|
elif document.body[ertcont].endswith("]"):
|
|
|
|
# divide the args
|
|
|
|
tok = document.body[ertcont].find('>[<')
|
|
|
|
if tok != -1:
|
|
|
|
# divide the args
|
|
|
|
tokk = document.body[ertcont].find('>][')
|
|
|
|
if tokk != -1:
|
|
|
|
document.body[ertcont : ertcont + 1] = [document.body[ertcont][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcont][tok + 3:tokk],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 3',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcont][tokk + 3:-1]]
|
|
|
|
ertend += 22
|
|
|
|
else:
|
|
|
|
tokk = document.body[ertcont].find('>[')
|
|
|
|
if tokk != -1:
|
|
|
|
document.body[ertcont : ertcont + 1] = [document.body[ertcont][:tokk],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 3',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcont][tokk + 2:-1]]
|
|
|
|
ertend += 11
|
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 1"
|
|
|
|
elif document.body[ertcont].startswith("["):
|
|
|
|
# This is an ERT option
|
|
|
|
# strip off the [
|
|
|
|
document.body[ertcont] = document.body[ertcont][1:]
|
|
|
|
if document.body[ertcont].endswith("]"):
|
|
|
|
# strip off the ]
|
|
|
|
document.body[ertcont] = document.body[ertcont][:-1]
|
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 3"
|
2013-12-06 10:57:33 +00:00
|
|
|
parbeg = ertend + 3
|
|
|
|
continue
|
2013-12-06 09:53:10 +00:00
|
|
|
return ertend
|
|
|
|
|
|
|
|
|
2012-12-09 10:04:56 +00:00
|
|
|
def convert_againframe_args(document):
|
|
|
|
" Converts beamer AgainFrame to new layout "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout AgainFrame", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2012-12-09 12:23:59 +00:00
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent[1] != i:
|
|
|
|
document.warning("Wrong parent layout!")
|
|
|
|
j = parent[2]
|
|
|
|
parbeg = parent[3]
|
2012-12-09 10:04:56 +00:00
|
|
|
if i != -1:
|
2013-12-06 09:53:10 +00:00
|
|
|
# Convert ERT arguments
|
2013-12-06 10:57:33 +00:00
|
|
|
# FIXME: See restrictions in convert_beamerframeargs method
|
2013-12-06 09:53:10 +00:00
|
|
|
ertend = convert_beamerframeargs(document, i, parbeg)
|
2013-12-06 10:57:33 +00:00
|
|
|
if ertend == -1:
|
|
|
|
break
|
2012-12-09 10:04:56 +00:00
|
|
|
i = j
|
|
|
|
|
|
|
|
|
|
|
|
def convert_corollary_args(document):
|
|
|
|
" Converts beamer corrolary-style ERT arguments native InsetArgs "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
corollary_layouts = ["Corollary", "Definition", "Definitions", "Example", "Examples", "Fact", "Proof", "Theorem"]
|
|
|
|
for lay in corollary_layouts:
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token_exact(document.body, "\\begin_layout " + lay, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2012-12-09 12:23:59 +00:00
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent[1] != i:
|
|
|
|
document.warning("Wrong parent layout!")
|
|
|
|
j = parent[2]
|
|
|
|
parbeg = parent[3]
|
2012-12-09 10:04:56 +00:00
|
|
|
if i != -1:
|
2012-12-09 12:23:59 +00:00
|
|
|
if document.body[parbeg] == "\\begin_inset ERT":
|
2014-05-25 13:08:08 +00:00
|
|
|
ertcontfirstline = parbeg + 5
|
|
|
|
# Find the last ERT in this paragraph (which might also be the first)
|
|
|
|
lastertbeg = find_token_backwards(document.body, "\\begin_inset ERT", j)
|
|
|
|
if lastertbeg == -1:
|
|
|
|
document.warning("Last ERT not found!")
|
|
|
|
break
|
|
|
|
lastertend = find_end_of_inset(document.body, lastertbeg)
|
|
|
|
if lastertend == -1:
|
|
|
|
document.warning("End of last ERT not found!")
|
|
|
|
break
|
|
|
|
ertcontlastline = lastertend - 3
|
|
|
|
if document.body[ertcontfirstline].startswith("<"):
|
2012-12-09 10:04:56 +00:00
|
|
|
# This is an overlay specification
|
|
|
|
# strip off the <
|
2014-05-25 13:08:08 +00:00
|
|
|
document.body[ertcontfirstline] = document.body[ertcontfirstline][1:]
|
|
|
|
if document.body[ertcontlastline].endswith(">"):
|
2012-12-09 10:04:56 +00:00
|
|
|
# strip off the >
|
2014-05-25 13:08:08 +00:00
|
|
|
document.body[ertcontlastline] = document.body[ertcontlastline][:-1]
|
|
|
|
if ertcontfirstline < ertcontlastline:
|
|
|
|
# Multiline ERT. Might contain TeX code. Embrace in ERT.
|
|
|
|
document.body[ertcontlastline : ertcontlastline + 1] = [
|
|
|
|
document.body[ertcontlastline], '\\end_layout', '', '\\end_inset']
|
|
|
|
document.body[ertcontfirstline : ertcontfirstline + 1] = [
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 1',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontfirstline]]
|
|
|
|
else:
|
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 1"
|
|
|
|
elif document.body[ertcontlastline].endswith("]"):
|
2012-12-09 10:04:56 +00:00
|
|
|
# divide the args
|
2015-03-24 20:43:25 +00:00
|
|
|
tok = document.body[ertcontfirstline].find('>[')
|
|
|
|
if tok != -1:
|
2014-05-25 13:08:08 +00:00
|
|
|
if ertcontfirstline < ertcontlastline:
|
|
|
|
# Multiline ERT. Might contain TeX code. Embrace in ERT.
|
|
|
|
document.body[ertcontlastline : ertcontlastline + 1] = [
|
|
|
|
document.body[ertcontlastline], '\\end_layout', '', '\\end_inset']
|
2015-03-24 20:43:25 +00:00
|
|
|
document.body[ertcontfirstline : ertcontfirstline + 1] = [document.body[ertcontfirstline][:tok],
|
2014-05-25 13:08:08 +00:00
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
|
2015-03-24 20:43:25 +00:00
|
|
|
document.body[ertcontfirstline][tok + 2:-1]]
|
2014-05-25 13:08:08 +00:00
|
|
|
else:
|
2015-03-24 20:43:25 +00:00
|
|
|
document.body[ertcontfirstline : ertcontfirstline + 1] = [document.body[ertcontfirstline][:tok],
|
2014-05-25 13:08:08 +00:00
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
2015-03-24 20:43:25 +00:00
|
|
|
document.body[ertcontfirstline][tok + 2:-1]]
|
2014-05-25 13:08:08 +00:00
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 1"
|
2012-12-09 10:04:56 +00:00
|
|
|
i = j
|
|
|
|
continue
|
2014-05-25 13:08:08 +00:00
|
|
|
elif document.body[ertcontlastline].startswith("["):
|
|
|
|
if document.body[ertcontlastline].endswith("]"):
|
2013-09-03 17:02:28 +00:00
|
|
|
# This is an ERT option
|
|
|
|
# strip off the [
|
2014-05-25 13:08:08 +00:00
|
|
|
document.body[ertcontlastline] = document.body[ertcontlastline][1:]
|
2012-12-09 10:04:56 +00:00
|
|
|
# strip off the ]
|
2014-05-25 13:08:08 +00:00
|
|
|
document.body[ertcontlastline] = document.body[ertcontlastline][:-1]
|
2013-09-03 17:02:28 +00:00
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 2"
|
|
|
|
else:
|
|
|
|
convert_TeX_brace_to_Argument(document, i, 2, 2, False, True, True)
|
|
|
|
i += 1
|
2012-12-09 10:04:56 +00:00
|
|
|
continue
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_quote_args(document):
|
|
|
|
" Converts beamer quote style ERT args to native InsetArgs "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
quote_layouts = ["Uncover", "Only", "Quotation", "Quote", "Verse"]
|
|
|
|
for lay in quote_layouts:
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout " + lay, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2012-12-09 12:23:59 +00:00
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent[1] != i:
|
|
|
|
document.warning("Wrong parent layout!")
|
|
|
|
j = parent[2]
|
|
|
|
parbeg = parent[3]
|
2012-12-09 10:04:56 +00:00
|
|
|
if i != -1:
|
2012-12-09 12:23:59 +00:00
|
|
|
if document.body[parbeg] == "\\begin_inset ERT":
|
2012-12-09 10:04:56 +00:00
|
|
|
if document.body[i + 6].startswith("<"):
|
|
|
|
# This is an overlay specification
|
|
|
|
# strip off the <
|
|
|
|
document.body[i + 6] = document.body[i + 6][1:]
|
|
|
|
if document.body[i + 6].endswith(">"):
|
|
|
|
# strip off the >
|
|
|
|
document.body[i + 6] = document.body[i + 6][:-1]
|
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[i + 1] = "\\begin_inset Argument 1"
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2014-05-25 20:46:42 +00:00
|
|
|
def cleanup_beamerargs(document):
|
|
|
|
" Clean up empty ERTs (conversion artefacts) "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of Argument inset")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
while True:
|
|
|
|
ertbeg = find_token(document.body, "\\begin_inset ERT", i, j)
|
|
|
|
if ertbeg == -1:
|
|
|
|
break
|
|
|
|
ertend = find_end_of_inset(document.body, ertbeg)
|
|
|
|
if ertend == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of ERT inset")
|
|
|
|
break
|
|
|
|
stripped = [line for line in document.body[ertbeg : ertend + 1] if line.strip()]
|
|
|
|
if len(stripped) == 5:
|
|
|
|
# This is an empty ERT
|
|
|
|
offset = len(document.body[ertbeg : ertend + 1])
|
|
|
|
del document.body[ertbeg : ertend + 1]
|
|
|
|
j = j - offset
|
|
|
|
else:
|
|
|
|
i = ertend
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2012-12-09 10:04:56 +00:00
|
|
|
def revert_beamerargs(document):
|
|
|
|
" Reverts beamer arguments to old layout "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
list_layouts = ["Itemize", "Enumerate", "Description"]
|
|
|
|
headings = ["Part", "Section", "Section*", "Subsection", "Subsection*",
|
|
|
|
"Subsubsection", "Subsubsection*", "FrameSubtitle", "NoteItem"]
|
|
|
|
quote_layouts = ["Uncover", "Only", "Quotation", "Quote", "Verse"]
|
|
|
|
corollary_layouts = ["Corollary", "Definition", "Definitions", "Example", "Examples", "Fact", "Proof", "Theorem"]
|
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\S+)$')
|
|
|
|
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Find containing paragraph layout
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-09 10:04:56 +00:00
|
|
|
continue
|
|
|
|
parbeg = parent[1]
|
|
|
|
parend = parent[2]
|
2012-12-09 10:40:14 +00:00
|
|
|
realparbeg = parent[3]
|
2012-12-09 10:04:56 +00:00
|
|
|
layoutname = parent[0]
|
|
|
|
realparend = parend
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if p >= realparend:
|
|
|
|
i = realparend
|
|
|
|
break
|
|
|
|
if layoutname in headings:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
argcontent = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
if layoutname == "FrameSubtitle":
|
|
|
|
pre = put_cmd_in_ert("\\" + layoutname.lower() + "<") + argcontent + put_cmd_in_ert(">")
|
|
|
|
elif layoutname == "NoteItem":
|
|
|
|
pre = put_cmd_in_ert("\\note<") + argcontent + put_cmd_in_ert(">[item]")
|
|
|
|
elif layoutname.endswith('*'):
|
|
|
|
pre = put_cmd_in_ert("\\lyxframeend\\" + layoutname.lower()[:-1] + "<") + argcontent + put_cmd_in_ert(">*")
|
|
|
|
else:
|
|
|
|
pre = put_cmd_in_ert("\\lyxframeend\\" + layoutname.lower() + "<") + argcontent + put_cmd_in_ert(">")
|
|
|
|
secarg = find_token(document.body, "\\begin_inset Argument 2", parbeg, parend)
|
|
|
|
if secarg != -1:
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", secarg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, secarg)
|
|
|
|
argcontent = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[secarg : endInset + 1])
|
|
|
|
del document.body[secarg : endInset + 1]
|
|
|
|
pre += put_cmd_in_ert("[") + argcontent + put_cmd_in_ert("]")
|
|
|
|
pre += put_cmd_in_ert("{")
|
|
|
|
document.body[parbeg] = "\\begin_layout Standard"
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = pre
|
2012-12-09 10:04:56 +00:00
|
|
|
pe = find_end_of_layout(document.body, parbeg)
|
|
|
|
post = put_cmd_in_ert("}")
|
|
|
|
document.body[pe : pe] = post
|
|
|
|
realparend += len(pre) + len(post)
|
|
|
|
if layoutname == "AgainFrame":
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "3":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
if layoutname == "Overprint":
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
if layoutname == "OverlayArea":
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "2":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("{") + content + put_cmd_in_ert("}")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
if layoutname in list_layouts:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
subst = put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
2012-12-31 13:09:54 +00:00
|
|
|
realparend = realparend + len(subst) - len(content)
|
|
|
|
document.body[beginPlain + 1 : endPlain] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
elif argnr == "item:1":
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
del document.body[i:j+1]
|
2015-02-13 16:45:38 +00:00
|
|
|
if layoutname == "Description":
|
|
|
|
# Description only has one (overlay) item arg
|
|
|
|
subst = put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
|
|
|
# This must be put after the first space (begin of decription body
|
|
|
|
# in LyX's arkward description list syntax)
|
|
|
|
# Try to find that place ...
|
|
|
|
rxx = re.compile(r'^([^\\ ]+ )(.*)$')
|
|
|
|
for q in range(parbeg, parend):
|
|
|
|
m = rxx.match(document.body[q])
|
|
|
|
if m:
|
|
|
|
# We found it. Now insert the ERT argument just there:
|
|
|
|
document.body[q : q] = [m.group(1), ''] + subst + ['', m.group(2)]
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
elif argnr == "item:2":
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
del document.body[i:j+1]
|
|
|
|
subst = put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
if layoutname in quote_layouts:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
if layoutname in corollary_layouts:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "2":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
|
|
|
|
i = realparend
|
|
|
|
|
|
|
|
|
|
|
|
def revert_beamerargs2(document):
|
|
|
|
" Reverts beamer arguments to old layout, step 2 "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
shifted_layouts = ["Part", "Section", "Subsection", "Subsubsection"]
|
|
|
|
corollary_layouts = ["Corollary", "Definition", "Definitions", "Example", "Examples", "Fact", "Proof", "Theorem"]
|
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\S+)$')
|
|
|
|
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Find containing paragraph layout
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-09 10:04:56 +00:00
|
|
|
continue
|
|
|
|
parbeg = parent[1]
|
|
|
|
parend = parent[2]
|
2012-12-09 10:40:14 +00:00
|
|
|
realparbeg = parent[3]
|
2012-12-09 10:04:56 +00:00
|
|
|
layoutname = parent[0]
|
|
|
|
realparend = parend
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if p >= realparend:
|
|
|
|
i = realparend
|
|
|
|
break
|
|
|
|
if layoutname in shifted_layouts:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "2":
|
|
|
|
document.body[p] = "\\begin_inset Argument 1"
|
|
|
|
if layoutname in corollary_layouts:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
if layoutname == "OverlayArea":
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("{") + content + put_cmd_in_ert("}")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
if layoutname == "AgainFrame":
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "2":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("[<") + content + put_cmd_in_ert(">]")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
i = realparend
|
|
|
|
|
|
|
|
|
|
|
|
def revert_beamerargs3(document):
|
|
|
|
" Reverts beamer arguments to old layout, step 3 "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\S+)$')
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Find containing paragraph layout
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-09 10:04:56 +00:00
|
|
|
continue
|
|
|
|
parbeg = parent[1]
|
|
|
|
parend = parent[2]
|
2012-12-09 10:40:14 +00:00
|
|
|
realparbeg = parent[3]
|
2012-12-09 10:04:56 +00:00
|
|
|
layoutname = parent[0]
|
|
|
|
realparend = parend
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if p >= realparend:
|
|
|
|
i = realparend
|
|
|
|
break
|
|
|
|
if layoutname == "AgainFrame":
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
2012-12-09 12:23:59 +00:00
|
|
|
document.body[realparbeg : realparbeg] = subst
|
2012-12-09 10:04:56 +00:00
|
|
|
i = realparend
|
|
|
|
|
|
|
|
|
|
|
|
def revert_beamerflex(document):
|
|
|
|
" Reverts beamer Flex insets "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
2012-12-21 15:42:55 +00:00
|
|
|
new_flexes = {"Bold" : "\\textbf", "Emphasize" : "\\emph", "Only" : "\\only",
|
|
|
|
"Uncover" : "\\uncover", "Visible" : "\\visible",
|
|
|
|
"Invisible" : "\\invisible", "Alternative" : "\\alt",
|
|
|
|
"Beamer_Note" : "\\note"}
|
2012-12-09 10:04:56 +00:00
|
|
|
old_flexes = {"Alert" : "\\alert", "Structure" : "\\structure"}
|
|
|
|
rx = re.compile(r'^\\begin_inset Flex (.+)$')
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
m = rx.match(document.body[i])
|
|
|
|
if m:
|
|
|
|
flextype = m.group(1)
|
|
|
|
z = find_end_of_inset(document.body, i)
|
|
|
|
if z == -1:
|
|
|
|
document.warning("Can't find end of Flex " + flextype + " inset.")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
if flextype in new_flexes:
|
|
|
|
pre = put_cmd_in_ert(new_flexes[flextype])
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, z)
|
|
|
|
if arg != -1:
|
|
|
|
argend = find_end_of_inset(document.body, arg)
|
|
|
|
if argend == -1:
|
|
|
|
document.warning("Can't find end of Argument!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
argcontent = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
z = z - len(document.body[arg : argend + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : argend + 1]
|
|
|
|
pre += put_cmd_in_ert("<") + argcontent + put_cmd_in_ert(">")
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 2", i, z)
|
|
|
|
if arg != -1:
|
|
|
|
argend = find_end_of_inset(document.body, arg)
|
|
|
|
if argend == -1:
|
|
|
|
document.warning("Can't find end of Argument!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
argcontent = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
z = z - len(document.body[arg : argend + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : argend + 1]
|
2012-12-31 13:09:54 +00:00
|
|
|
if flextype == "Alternative":
|
|
|
|
pre += put_cmd_in_ert("{") + argcontent + put_cmd_in_ert("}")
|
|
|
|
else:
|
|
|
|
pre += put_cmd_in_ert("[") + argcontent + put_cmd_in_ert("]")
|
2012-12-09 10:04:56 +00:00
|
|
|
pre += put_cmd_in_ert("{")
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
# Adjust range end
|
|
|
|
z = z - len(document.body[i : beginPlain + 1])
|
|
|
|
z += len(pre)
|
|
|
|
document.body[i : beginPlain + 1] = pre
|
|
|
|
post = put_cmd_in_ert("}")
|
|
|
|
document.body[z - 2 : z + 1] = post
|
|
|
|
elif flextype in old_flexes:
|
|
|
|
pre = put_cmd_in_ert(old_flexes[flextype])
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, z)
|
|
|
|
if arg == -1:
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
argend = find_end_of_inset(document.body, arg)
|
|
|
|
if argend == -1:
|
|
|
|
document.warning("Can't find end of Argument!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
argcontent = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
z = z - len(document.body[arg : argend + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : argend + 1]
|
|
|
|
pre += put_cmd_in_ert("<") + argcontent + put_cmd_in_ert(">")
|
|
|
|
pre += put_cmd_in_ert("{")
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
# Adjust range end
|
|
|
|
z = z - len(document.body[i : beginPlain + 1])
|
|
|
|
z += len(pre)
|
|
|
|
document.body[i : beginPlain + 1] = pre
|
|
|
|
post = put_cmd_in_ert("}")
|
|
|
|
document.body[z - 2 : z + 1] = post
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2012-12-09 16:19:21 +00:00
|
|
|
def revert_beamerblocks(document):
|
|
|
|
" Reverts beamer block arguments to ERT "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
blocks = ["Block", "ExampleBlock", "AlertBlock"]
|
|
|
|
|
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\S+)$')
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Find containing paragraph layout
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-09 16:19:21 +00:00
|
|
|
continue
|
|
|
|
parbeg = parent[1]
|
|
|
|
parend = parent[2]
|
|
|
|
realparbeg = parent[3]
|
|
|
|
layoutname = parent[0]
|
|
|
|
realparend = parend
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if p >= realparend:
|
|
|
|
i = realparend
|
|
|
|
break
|
|
|
|
if layoutname in blocks:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
|
|
|
document.body[realparbeg : realparbeg] = subst
|
|
|
|
elif argnr == "2":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
realparend = realparend - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst = put_cmd_in_ert("{") + content + put_cmd_in_ert("}")
|
|
|
|
document.body[realparbeg : realparbeg] = subst
|
|
|
|
i = realparend
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_beamerblocks(document):
|
|
|
|
" Converts beamer block ERT args to native InsetArgs "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
blocks = ["Block", "ExampleBlock", "AlertBlock"]
|
|
|
|
for lay in blocks:
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token_exact(document.body, "\\begin_layout " + lay, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False or parent[1] != i:
|
|
|
|
document.warning("Wrong parent layout!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
parbeg = parent[3]
|
2014-01-24 08:24:12 +00:00
|
|
|
parend = parent[2]
|
|
|
|
j = parend
|
2012-12-09 16:19:21 +00:00
|
|
|
if i != -1:
|
2014-11-25 16:47:14 +00:00
|
|
|
# If the paragraph starts with a language switch, adjust parbeg
|
|
|
|
if len(document.body[parbeg]) == 0 and parbeg < parend \
|
|
|
|
and document.body[parbeg + 1].startswith("\\lang"):
|
|
|
|
parbeg += 2
|
2012-12-09 16:19:21 +00:00
|
|
|
if document.body[parbeg] == "\\begin_inset ERT":
|
2014-01-24 08:24:12 +00:00
|
|
|
ertcontfirstline = parbeg + 5
|
2014-08-01 07:42:43 +00:00
|
|
|
lastertbeg = -1
|
|
|
|
lastertend = -1
|
|
|
|
while True:
|
|
|
|
# Find the last ERT in this paragraph used for arguments
|
|
|
|
# (which might also be the first)
|
|
|
|
lastertbeg = find_token_backwards(document.body, "\\begin_inset ERT", j)
|
|
|
|
if lastertbeg == -1:
|
|
|
|
document.warning("Last ERT not found!")
|
|
|
|
break
|
|
|
|
lastertend = find_end_of_inset(document.body, lastertbeg)
|
|
|
|
if lastertend == -1:
|
|
|
|
document.warning("End of last ERT not found!")
|
|
|
|
break
|
|
|
|
# Is this ERT really used for an argument?
|
|
|
|
# Note: This will fail when non-argument ERTs actually use brackets
|
|
|
|
# (e.g. \pause{})
|
|
|
|
regexp = re.compile(r'.*[>\]\}]', re.IGNORECASE)
|
|
|
|
cbracket = find_re(document.body, regexp, lastertbeg, lastertend)
|
|
|
|
if cbracket != -1:
|
|
|
|
break
|
|
|
|
if lastertbeg == parbeg:
|
|
|
|
break
|
|
|
|
j = lastertbeg - 1
|
|
|
|
if lastertbeg == -1 or lastertend == -1:
|
2014-04-26 15:39:56 +00:00
|
|
|
break
|
|
|
|
ertcontlastline = lastertend - 3
|
2012-12-09 16:19:21 +00:00
|
|
|
while True:
|
2014-05-08 13:14:44 +00:00
|
|
|
if document.body[ertcontfirstline].lstrip().startswith("<"):
|
2012-12-09 16:19:21 +00:00
|
|
|
# This is an overlay specification
|
|
|
|
# strip off the <
|
2014-05-08 13:14:44 +00:00
|
|
|
document.body[ertcontfirstline] = document.body[ertcontfirstline].lstrip()[1:]
|
|
|
|
if document.body[ertcontlastline].rstrip().endswith(">"):
|
2012-12-09 16:19:21 +00:00
|
|
|
# strip off the >
|
2014-05-08 13:14:44 +00:00
|
|
|
document.body[ertcontlastline] = document.body[ertcontlastline].rstrip()[:-1]
|
2012-12-09 16:19:21 +00:00
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 1"
|
2014-05-08 13:14:44 +00:00
|
|
|
elif document.body[ertcontlastline].rstrip().endswith("}"):
|
2014-01-24 08:24:12 +00:00
|
|
|
# strip off the }
|
2014-05-08 13:14:44 +00:00
|
|
|
document.body[ertcontlastline] = document.body[ertcontlastline].rstrip()[:-1]
|
2012-12-09 16:19:21 +00:00
|
|
|
# divide the args
|
2014-01-24 08:24:12 +00:00
|
|
|
ertcontdivline = ertcontfirstline
|
|
|
|
tok = document.body[ertcontdivline].find('>{')
|
|
|
|
if tok == -1:
|
|
|
|
regexp = re.compile(r'.*>\{', re.IGNORECASE)
|
2014-07-27 09:30:25 +00:00
|
|
|
ertcontdivline = find_re(document.body, regexp, ertcontfirstline, lastertend)
|
2014-01-24 08:24:12 +00:00
|
|
|
tok = document.body[ertcontdivline].find('>{')
|
2012-12-09 16:19:21 +00:00
|
|
|
if tok != -1:
|
2014-01-24 08:24:12 +00:00
|
|
|
if ertcontfirstline < ertcontlastline:
|
|
|
|
# Multiline ERT. Might contain TeX code. Embrace in ERT.
|
|
|
|
document.body[ertcontlastline : ertcontlastline + 1] = [
|
|
|
|
document.body[ertcontlastline], '\\end_layout', '', '\\end_inset']
|
2014-08-30 11:24:29 +00:00
|
|
|
if ertcontdivline == ertcontfirstline:
|
|
|
|
document.body[ertcontdivline : ertcontdivline + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '',
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivline][tok + 2:]]
|
|
|
|
else:
|
|
|
|
document.body[ertcontdivline : ertcontdivline + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivline][tok + 2:]]
|
2014-01-24 08:24:12 +00:00
|
|
|
else:
|
|
|
|
document.body[ertcontdivline : ertcontdivline + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivline][tok + 2:]]
|
2014-05-25 20:42:44 +00:00
|
|
|
else:
|
|
|
|
# check if have delimiters in two different ERTs
|
|
|
|
tok = document.body[ertcontdivline].find('>')
|
|
|
|
if tok == -1:
|
|
|
|
regexp = re.compile(r'.*>', re.IGNORECASE)
|
2014-07-27 09:30:25 +00:00
|
|
|
ertcontdivline = find_re(document.body, regexp, ertcontfirstline, lastertend)
|
2014-05-25 20:42:44 +00:00
|
|
|
tok = document.body[ertcontdivline].find('>')
|
2014-07-27 09:30:25 +00:00
|
|
|
if tok != -1:
|
|
|
|
tokk = document.body[ertcontdivline].find('{')
|
|
|
|
if tokk == -1:
|
|
|
|
regexp = re.compile(r'.*\{', re.IGNORECASE)
|
|
|
|
ertcontdivlinetwo = find_re(document.body, regexp, ertcontfirstline, lastertend)
|
|
|
|
tokk = document.body[ertcontdivlinetwo].find('{')
|
|
|
|
if tokk != -1:
|
|
|
|
if ertcontfirstline < ertcontlastline:
|
|
|
|
# Multiline ERT. Might contain TeX code. Embrace in ERT.
|
|
|
|
document.body[ertcontlastline : ertcontlastline + 1] = [
|
|
|
|
document.body[ertcontlastline], '\\end_layout', '', '\\end_inset']
|
|
|
|
document.body[ertcontdivline : ertcontdivlinetwo + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '\\end_layout', '',
|
|
|
|
'\\end_inset', '', '', '\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivlinetwo][tokk + 1:]]
|
|
|
|
else:
|
|
|
|
document.body[ertcontdivline : ertcontdivlinetwo + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivlinetwo][tokk + 1:]]
|
2014-05-25 20:42:44 +00:00
|
|
|
# Convert to ArgInset
|
|
|
|
if ertcontfirstline < ertcontlastline:
|
|
|
|
# Multiline ERT. Might contain TeX code. Embrace in ERT.
|
|
|
|
document.body[parbeg : parbeg + 1] = ['\\begin_inset Argument 1',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '']
|
|
|
|
else:
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 1"
|
2014-05-08 13:14:44 +00:00
|
|
|
elif document.body[ertcontfirstline].lstrip().startswith("{"):
|
2012-12-09 16:19:21 +00:00
|
|
|
# This is the block title
|
2014-05-08 13:14:44 +00:00
|
|
|
if document.body[ertcontlastline].rstrip().endswith("}"):
|
2012-12-09 16:19:21 +00:00
|
|
|
# strip off the braces
|
2014-05-08 13:14:44 +00:00
|
|
|
document.body[ertcontfirstline] = document.body[ertcontfirstline].lstrip()[1:]
|
|
|
|
document.body[ertcontlastline] = document.body[ertcontlastline].rstrip()[:-1]
|
2014-01-24 08:24:12 +00:00
|
|
|
if ertcontfirstline < ertcontlastline:
|
|
|
|
# Multiline ERT. Might contain TeX code. Embrace in ERT.
|
|
|
|
document.body[parend : parend + 1] = [
|
2014-05-25 12:59:58 +00:00
|
|
|
document.body[parend], '\\end_inset', '', '\\end_layout']
|
2014-01-24 08:24:12 +00:00
|
|
|
document.body[parbeg : parbeg + 1] = ['\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '']
|
|
|
|
else:
|
|
|
|
# Convert to ArgInset
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 2"
|
2014-05-25 20:42:44 +00:00
|
|
|
# the overlay argument can also follow the title, so ...
|
2014-05-25 12:59:58 +00:00
|
|
|
elif document.body[ertcontlastline].rstrip().endswith(">"):
|
2014-05-25 20:42:44 +00:00
|
|
|
# strip off the {
|
|
|
|
document.body[ertcontfirstline] = document.body[ertcontfirstline].lstrip()[1:]
|
2014-05-25 12:59:58 +00:00
|
|
|
# strip off the >
|
|
|
|
document.body[ertcontlastline] = document.body[ertcontlastline].rstrip()[:-1]
|
|
|
|
# divide the args
|
|
|
|
ertcontdivline = ertcontfirstline
|
|
|
|
tok = document.body[ertcontdivline].find('}<')
|
|
|
|
if tok == -1:
|
|
|
|
regexp = re.compile(r'.*\}<', re.IGNORECASE)
|
2014-07-27 09:30:25 +00:00
|
|
|
ertcontdivline = find_re(document.body, regexp, ertcontfirstline, lastertend)
|
2014-05-25 12:59:58 +00:00
|
|
|
tok = document.body[ertcontdivline].find('}<')
|
|
|
|
if tok != -1:
|
|
|
|
if ertcontfirstline < ertcontlastline:
|
|
|
|
# Multiline ERT. Might contain TeX code. Embrace in ERT.
|
|
|
|
document.body[ertcontlastline : ertcontlastline + 1] = [
|
|
|
|
document.body[ertcontlastline], '\\end_layout', '', '\\end_inset']
|
2015-02-13 15:01:57 +00:00
|
|
|
if ertcontdivline == ertcontfirstline:
|
|
|
|
document.body[ertcontdivline : ertcontdivline + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 1',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivline][tok + 2:]]
|
|
|
|
else:
|
|
|
|
document.body[ertcontdivline : ertcontdivline + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '',
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 1',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivline][tok + 2:]]
|
2014-05-25 12:59:58 +00:00
|
|
|
else:
|
|
|
|
document.body[ertcontdivline : ertcontdivline + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 1',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivline][tok + 2:]]
|
2014-05-25 20:42:44 +00:00
|
|
|
else:
|
|
|
|
# check if have delimiters in two different ERTs
|
|
|
|
tok = document.body[ertcontdivline].find('}')
|
|
|
|
if tok == -1:
|
|
|
|
regexp = re.compile(r'.*\}', re.IGNORECASE)
|
2014-07-27 09:30:25 +00:00
|
|
|
ertcontdivline = find_re(document.body, regexp, ertcontfirstline, lastertend)
|
2014-05-25 20:42:44 +00:00
|
|
|
tok = document.body[ertcontdivline].find('}')
|
|
|
|
if tok != -1:
|
|
|
|
tokk = document.body[ertcontdivline].find('<')
|
|
|
|
if tokk == -1:
|
|
|
|
regexp = re.compile(r'.*<', re.IGNORECASE)
|
2014-07-27 09:30:25 +00:00
|
|
|
ertcontdivlinetwo = find_re(document.body, regexp, ertcontfirstline, lastertend)
|
2014-05-25 20:42:44 +00:00
|
|
|
tokk = document.body[ertcontdivlinetwo].find('<')
|
|
|
|
if tokk != -1:
|
|
|
|
if ertcontfirstline < ertcontlastline:
|
|
|
|
# Multiline ERT. Might contain TeX code. Embrace in ERT.
|
|
|
|
document.body[ertcontlastline : ertcontlastline + 1] = [
|
|
|
|
document.body[ertcontlastline], '\\end_layout', '', '\\end_inset']
|
|
|
|
document.body[ertcontdivline : ertcontdivlinetwo + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '\\end_layout', '',
|
|
|
|
'\\end_inset', '', '', '\\begin_inset Argument 1',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivlinetwo][tokk + 1:]]
|
|
|
|
else:
|
|
|
|
document.body[ertcontdivline : ertcontdivlinetwo + 1] = [document.body[ertcontdivline][:tok],
|
|
|
|
'\\end_layout', '', '\\end_inset', '', '', '\\begin_inset Argument 1',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
document.body[ertcontdivlinetwo][tokk + 1:]]
|
2014-05-25 12:59:58 +00:00
|
|
|
# Convert to ArgInset
|
2014-05-25 20:42:44 +00:00
|
|
|
if ertcontfirstline < ertcontlastline:
|
|
|
|
# Multiline ERT. Might contain TeX code. Embrace in ERT.
|
|
|
|
document.body[parbeg : parbeg + 1] = ['\\begin_inset Argument 2',
|
|
|
|
'status collapsed', '', '\\begin_layout Plain Layout',
|
|
|
|
'\\begin_inset ERT', '']
|
|
|
|
else:
|
|
|
|
document.body[parbeg] = "\\begin_inset Argument 2"
|
2014-01-24 08:24:12 +00:00
|
|
|
elif count_pars_in_inset(document.body, ertcontfirstline) > 1:
|
2012-12-09 16:19:21 +00:00
|
|
|
# Multipar ERT. Skip this.
|
|
|
|
break
|
|
|
|
else:
|
2014-08-03 10:21:35 +00:00
|
|
|
# ERT has contents after the closing bracket. We cannot convert this.
|
|
|
|
# convert_TeX_brace_to_Argument cannot either.
|
|
|
|
#convert_TeX_brace_to_Argument(document, i, 2, 2, False, True, False)
|
|
|
|
break
|
2012-12-09 16:19:21 +00:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("end of layout not found!")
|
|
|
|
k = find_token(document.body, "\\begin_inset Argument", i, j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("InsetArgument not found!")
|
|
|
|
break
|
|
|
|
l = find_end_of_inset(document.body, k)
|
|
|
|
m = find_token(document.body, "\\begin_inset ERT", l, j)
|
|
|
|
if m == -1:
|
|
|
|
break
|
2014-01-24 08:24:12 +00:00
|
|
|
ertcontfirstline = m + 5
|
2012-12-09 16:19:21 +00:00
|
|
|
parbeg = m
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2012-12-19 18:33:39 +00:00
|
|
|
def convert_overprint(document):
|
|
|
|
" Convert old beamer overprint layouts to ERT "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Overprint", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Find end of sequence
|
|
|
|
j = find_end_of_sequence(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document. Cannot find end of Overprint sequence!")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-19 18:33:39 +00:00
|
|
|
continue
|
|
|
|
endseq = j
|
|
|
|
subst = ["\\begin_layout Standard"] + put_cmd_in_ert("\\begin{overprint}")
|
2012-12-20 12:29:04 +00:00
|
|
|
esubst = list()
|
|
|
|
if document.body[j] == "\\end_deeper":
|
|
|
|
esubst = ["", "\\begin_layout Standard"] + put_cmd_in_ert("\\end{overprint}") + ["\\end_layout"]
|
|
|
|
else:
|
|
|
|
esubst = ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\end{overprint}") + ["\\end_layout"]
|
2012-12-19 18:33:39 +00:00
|
|
|
endseq = endseq + len(esubst) - len(document.body[j : j])
|
|
|
|
document.body[j : j] = esubst
|
|
|
|
argbeg = find_token(document.body, "\\begin_inset Argument 1", i, j)
|
|
|
|
if argbeg != -1:
|
|
|
|
argend = find_end_of_layout(document.body, argbeg)
|
|
|
|
if argend == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document. Cannot find end of Overprint argument!")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-19 18:33:39 +00:00
|
|
|
continue
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", argbeg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
endseq = endseq - len(document.body[argbeg : argend + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[argbeg : argend + 1]
|
|
|
|
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
|
|
|
|
|
|
|
endseq = endseq - len(document.body[i : i])
|
|
|
|
document.body[i : i] = subst + ["\\end_layout"]
|
|
|
|
endseq += len(subst)
|
|
|
|
|
|
|
|
for p in range(i, endseq):
|
|
|
|
if document.body[p] == "\\begin_layout Overprint":
|
|
|
|
document.body[p] = "\\begin_layout Standard"
|
|
|
|
|
|
|
|
i = endseq
|
|
|
|
|
|
|
|
|
|
|
|
def revert_overprint(document):
|
|
|
|
" Revert old beamer overprint layouts to ERT "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Overprint", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Find end of sequence
|
|
|
|
j = find_end_of_sequence(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document. Cannot find end of Overprint sequence!")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-19 18:33:39 +00:00
|
|
|
continue
|
|
|
|
endseq = j
|
|
|
|
subst = ["\\begin_layout Standard"] + put_cmd_in_ert("\\begin{overprint}")
|
2014-05-21 09:37:51 +00:00
|
|
|
esubst = ["\\begin_layout Standard"] + put_cmd_in_ert("\\end{overprint}")
|
2012-12-19 18:33:39 +00:00
|
|
|
endseq = endseq + len(esubst) - len(document.body[j : j])
|
2012-12-31 13:30:44 +00:00
|
|
|
if document.body[j] == "\\end_deeper":
|
2014-05-21 09:37:51 +00:00
|
|
|
document.body[j : j] = [""] + esubst + ["", "\\end_layout"]
|
2012-12-31 13:30:44 +00:00
|
|
|
else:
|
2014-05-21 09:37:51 +00:00
|
|
|
document.body[j : j] = ["\\end_layout", ""] + esubst
|
2012-12-31 13:30:44 +00:00
|
|
|
r = i
|
|
|
|
while r < j:
|
|
|
|
if document.body[r] == "\\begin_deeper":
|
|
|
|
s = find_end_of(document.body, r, "\\begin_deeper", "\\end_deeper")
|
|
|
|
if s != -1:
|
|
|
|
document.body[r] = ""
|
|
|
|
document.body[s] = ""
|
|
|
|
r = s
|
|
|
|
continue
|
|
|
|
r = r + 1
|
2012-12-19 18:33:39 +00:00
|
|
|
argbeg = find_token(document.body, "\\begin_inset Argument 1", i, j)
|
|
|
|
if argbeg != -1:
|
2014-05-21 09:36:47 +00:00
|
|
|
# Is this really our argument?
|
|
|
|
nested = find_token(document.body, "\\begin_deeper", i, argbeg)
|
|
|
|
if nested != -1:
|
|
|
|
argend = find_end_of_inset(document.body, argbeg)
|
|
|
|
if argend == -1:
|
|
|
|
document.warning("Malformed LyX document. Cannot find end of Overprint argument!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", argbeg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
endseq = endseq - len(document.body[argbeg : argend])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[argbeg : argend + 1]
|
|
|
|
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
2012-12-19 18:33:39 +00:00
|
|
|
|
|
|
|
endseq = endseq - len(document.body[i : i])
|
|
|
|
document.body[i : i] = subst + ["\\end_layout"]
|
|
|
|
endseq += len(subst)
|
|
|
|
|
|
|
|
p = i
|
|
|
|
while True:
|
|
|
|
if p >= endseq:
|
|
|
|
break
|
|
|
|
if document.body[p] == "\\begin_layout Overprint":
|
|
|
|
q = find_end_of_layout(document.body, p)
|
|
|
|
if q == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document. Cannot find end of Overprint layout!")
|
2012-12-19 18:33:39 +00:00
|
|
|
p += 1
|
|
|
|
continue
|
|
|
|
subst = ["\\begin_layout Standard"] + put_cmd_in_ert("\\onslide")
|
|
|
|
argbeg = find_token(document.body, "\\begin_inset Argument item:1", p, q)
|
|
|
|
if argbeg != -1:
|
|
|
|
argend = find_end_of_inset(document.body, argbeg)
|
|
|
|
if argend == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document. Cannot find end of Overprint item argument!")
|
2012-12-19 18:33:39 +00:00
|
|
|
p += 1
|
|
|
|
continue
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", argbeg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
endseq = endseq - len(document.body[argbeg : argend + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[argbeg : argend + 1]
|
|
|
|
subst += put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
|
|
|
endseq = endseq - len(document.body[p : p + 1]) + len(subst)
|
|
|
|
document.body[p : p + 1] = subst
|
|
|
|
p = p + 1
|
|
|
|
|
|
|
|
i = endseq
|
|
|
|
|
2012-12-09 16:19:21 +00:00
|
|
|
|
2012-12-22 15:28:43 +00:00
|
|
|
def revert_frametitle(document):
|
|
|
|
" Reverts beamer frametitle layout to ERT "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\S+)$')
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout FrameTitle", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of FrameTitle layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-22 15:28:43 +00:00
|
|
|
continue
|
|
|
|
endlay = j
|
|
|
|
document.body[j : j] = put_cmd_in_ert("}") + document.body[j : j]
|
|
|
|
endlay += len(put_cmd_in_ert("}"))
|
|
|
|
subst = ["\\begin_layout Standard"] + put_cmd_in_ert("\\frametitle")
|
|
|
|
for p in range(i, j):
|
|
|
|
if p >= endlay:
|
|
|
|
break
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
endlay = endlay - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
|
|
|
elif argnr == "2":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
endlay = endlay - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
|
|
|
|
|
|
|
subst += put_cmd_in_ert("{")
|
|
|
|
document.body[i : i + 1] = subst
|
|
|
|
i = endlay
|
|
|
|
|
|
|
|
|
2012-12-28 11:32:59 +00:00
|
|
|
def convert_epigraph(document):
|
|
|
|
" Converts memoir epigraph to new syntax "
|
|
|
|
|
|
|
|
if document.textclass != "memoir":
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Epigraph", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of Epigraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-28 11:32:59 +00:00
|
|
|
continue
|
|
|
|
endlay = j
|
|
|
|
subst = list()
|
|
|
|
ert = find_token(document.body, "\\begin_inset ERT", i, j)
|
|
|
|
if ert != -1:
|
|
|
|
endInset = find_end_of_inset(document.body, ert)
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", ert)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
ertcont = beginPlain + 2
|
|
|
|
if document.body[ertcont] == "}{":
|
|
|
|
# strip off the <
|
|
|
|
# Convert to ArgInset
|
|
|
|
endlay = endlay - 2 * len(document.body[j])
|
|
|
|
begsubst = ['\\begin_inset Argument post:1', 'status collapsed', '',
|
|
|
|
'\\begin_layout Plain Layout']
|
|
|
|
endsubst = ['\\end_layout', '', '\\end_inset', '', document.body[j]]
|
2012-12-28 13:04:24 +00:00
|
|
|
document.body[j : j + 1] = endsubst
|
2012-12-28 11:32:59 +00:00
|
|
|
document.body[endInset + 1 : endInset + 1] = begsubst
|
|
|
|
# Adjust range end
|
|
|
|
endlay += len(begsubst) + len(endsubst)
|
|
|
|
endlay = endlay - len(document.body[ert : endInset + 1])
|
|
|
|
del document.body[ert : endInset + 1]
|
|
|
|
|
|
|
|
i = endlay
|
|
|
|
|
|
|
|
|
|
|
|
def revert_epigraph(document):
|
|
|
|
" Reverts memoir epigraph argument to ERT "
|
|
|
|
|
|
|
|
if document.textclass != "memoir":
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Epigraph", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of Epigraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-28 11:32:59 +00:00
|
|
|
continue
|
|
|
|
endlay = j
|
|
|
|
subst = list()
|
|
|
|
p = find_token(document.body, "\\begin_layout Argument post:1", i, j)
|
|
|
|
if p != -1:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
endlay = endlay - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("}{") + content
|
|
|
|
else:
|
|
|
|
subst += put_cmd_in_ert("}{")
|
|
|
|
|
|
|
|
document.body[j : j] = subst + document.body[j : j]
|
|
|
|
i = endlay
|
|
|
|
|
2012-12-22 15:28:43 +00:00
|
|
|
|
2012-12-30 17:29:02 +00:00
|
|
|
def convert_captioninsets(document):
|
|
|
|
" Converts caption insets to new syntax "
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Caption", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = "\\begin_inset Caption Standard"
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-30 17:29:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_captioninsets(document):
|
|
|
|
" Reverts caption insets to old syntax "
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Caption Standard", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = "\\begin_inset Caption"
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-30 17:29:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
def convert_captionlayouts(document):
|
|
|
|
" Convert caption layouts to caption insets. "
|
2013-03-22 21:23:38 +00:00
|
|
|
|
2012-12-30 17:29:02 +00:00
|
|
|
caption_dict = {
|
|
|
|
"Captionabove": "Above",
|
|
|
|
"Captionbelow": "Below",
|
|
|
|
"FigCaption" : "FigCaption",
|
|
|
|
"Table_Caption" : "Table",
|
|
|
|
"CenteredCaption" : "Centered",
|
|
|
|
"Bicaption" : "Bicaption",
|
|
|
|
}
|
2013-03-22 21:23:38 +00:00
|
|
|
|
2012-12-30 17:29:02 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_layout", i)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val in list(caption_dict.keys()):
|
2012-12-30 17:29:02 +00:00
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing `\\end_layout'.")
|
|
|
|
return
|
|
|
|
|
|
|
|
document.body[j:j] = ["\\end_layout", "", "\\end_inset", "", ""]
|
|
|
|
document.body[i:i+1] = ["\\begin_layout %s" % document.default_layout,
|
|
|
|
"\\begin_inset Caption %s" % caption_dict[val], "",
|
|
|
|
"\\begin_layout %s" % document.default_layout]
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-30 17:29:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_captionlayouts(document):
|
|
|
|
" Revert caption insets to caption layouts. "
|
|
|
|
|
|
|
|
caption_dict = {
|
|
|
|
"Above" : "Captionabove",
|
|
|
|
"Below" : "Captionbelow",
|
|
|
|
"FigCaption" : "FigCaption",
|
|
|
|
"Table" : "Table_Caption",
|
|
|
|
"Centered" : "CenteredCaption",
|
|
|
|
"Bicaption" : "Bicaption",
|
|
|
|
}
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
rx = re.compile(r'^\\begin_inset Caption (\S+)$')
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Caption", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
m = rx.match(document.body[i])
|
|
|
|
val = ""
|
|
|
|
if m:
|
|
|
|
val = m.group(1)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val not in list(caption_dict.keys()):
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-30 17:29:02 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# We either need to delete the previous \begin_layout line, or we
|
|
|
|
# need to end the previous layout if this inset is not in the first
|
|
|
|
# position of the paragraph.
|
|
|
|
layout_before = find_token_backwards(document.body, "\\begin_layout", i)
|
|
|
|
if layout_before == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing `\\begin_layout'.")
|
|
|
|
return
|
|
|
|
layout_line = document.body[layout_before]
|
|
|
|
del_layout_before = True
|
|
|
|
l = layout_before + 1
|
|
|
|
while l < i:
|
|
|
|
if document.body[l] != "":
|
|
|
|
del_layout_before = False
|
|
|
|
break
|
|
|
|
l = l + 1
|
|
|
|
if del_layout_before:
|
|
|
|
del document.body[layout_before:i]
|
|
|
|
i = layout_before
|
|
|
|
else:
|
|
|
|
document.body[i:i] = ["\\end_layout", ""]
|
|
|
|
i = i + 2
|
|
|
|
|
|
|
|
# Find start of layout in the inset and end of inset
|
|
|
|
j = find_token(document.body, "\\begin_layout", i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing `\\begin_layout'.")
|
|
|
|
return
|
|
|
|
k = find_end_of_inset(document.body, i)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing `\\end_inset'.")
|
|
|
|
return
|
|
|
|
|
|
|
|
# We either need to delete the following \end_layout line, or we need
|
|
|
|
# to restart the old layout if this inset is not at the paragraph end.
|
|
|
|
layout_after = find_token(document.body, "\\end_layout", k)
|
|
|
|
if layout_after == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing `\\end_layout'.")
|
|
|
|
return
|
|
|
|
del_layout_after = True
|
|
|
|
l = k + 1
|
|
|
|
while l < layout_after:
|
|
|
|
if document.body[l] != "":
|
|
|
|
del_layout_after = False
|
|
|
|
break
|
|
|
|
l = l + 1
|
|
|
|
if del_layout_after:
|
|
|
|
del document.body[k+1:layout_after+1]
|
|
|
|
else:
|
|
|
|
document.body[k+1:k+1] = [layout_line, ""]
|
|
|
|
|
|
|
|
# delete \begin_layout and \end_inset and replace \begin_inset with
|
|
|
|
# "\begin_layout XXX". This works because we can only have one
|
|
|
|
# paragraph in the caption inset: The old \end_layout will be recycled.
|
|
|
|
del document.body[k]
|
|
|
|
if document.body[k] == "":
|
|
|
|
del document.body[k]
|
|
|
|
del document.body[j]
|
|
|
|
if document.body[j] == "":
|
|
|
|
del document.body[j]
|
|
|
|
document.body[i] = "\\begin_layout %s" % caption_dict[val]
|
|
|
|
if document.body[i+1] == "":
|
|
|
|
del document.body[i+1]
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-30 17:29:02 +00:00
|
|
|
|
|
|
|
|
2012-12-31 12:00:46 +00:00
|
|
|
def revert_fragileframe(document):
|
|
|
|
" Reverts beamer FragileFrame layout to ERT "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout FragileFrame", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Find end of sequence
|
|
|
|
j = find_end_of_sequence(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document. Cannot find end of FragileFrame sequence!")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-31 12:00:46 +00:00
|
|
|
continue
|
|
|
|
endseq = j
|
|
|
|
subst = ["\\begin_layout Standard"] + put_cmd_in_ert("\\begin{frame}")
|
2014-05-21 09:37:51 +00:00
|
|
|
esubst = ["\\begin_layout Standard"] + put_cmd_in_ert("\\end{frame}")
|
2012-12-31 12:00:46 +00:00
|
|
|
endseq = endseq + len(esubst) - len(document.body[j : j])
|
|
|
|
if document.body[j] == "\\end_deeper":
|
2014-05-21 09:37:51 +00:00
|
|
|
document.body[j : j] = [""] + esubst + ["", "\\end_layout"]
|
2012-12-31 12:00:46 +00:00
|
|
|
else:
|
|
|
|
document.body[j : j] = esubst
|
|
|
|
for q in range(i, j):
|
|
|
|
if document.body[q] == "\\begin_layout FragileFrame":
|
|
|
|
document.body[q] = "\\begin_layout %s" % document.default_layout
|
|
|
|
r = i
|
|
|
|
while r < j:
|
|
|
|
if document.body[r] == "\\begin_deeper":
|
|
|
|
s = find_end_of(document.body, r, "\\begin_deeper", "\\end_deeper")
|
|
|
|
if s != -1:
|
|
|
|
document.body[r] = ""
|
|
|
|
document.body[s] = ""
|
|
|
|
r = s
|
|
|
|
continue
|
|
|
|
r = r + 1
|
|
|
|
for p in range(1, 5):
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument %d" % p, i, j)
|
|
|
|
if arg != -1:
|
|
|
|
if p == 1:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, arg)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
j = j - len(document.body[arg : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
|
|
|
elif p == 2:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, arg)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
j = j - len(document.body[arg : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("[<") + content + put_cmd_in_ert(">]")
|
|
|
|
elif p == 3:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, arg)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
j = j - len(document.body[arg : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("[fragile,") + content + put_cmd_in_ert("]")
|
|
|
|
elif p == 4:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, arg)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
j = j - len(document.body[arg : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("{") + content + put_cmd_in_ert("}")
|
|
|
|
elif p == 3:
|
|
|
|
subst += put_cmd_in_ert("[fragile]")
|
|
|
|
|
|
|
|
document.body[i : i + 1] = subst
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
|
|
|
def revert_newframes(document):
|
|
|
|
" Reverts beamer Frame and PlainFrame layouts to old forms "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
frame_dict = {
|
|
|
|
"Frame" : "BeginFrame",
|
|
|
|
"PlainFrame" : "BeginPlainFrame",
|
|
|
|
}
|
|
|
|
|
|
|
|
rx = re.compile(r'^\\begin_layout (\S+)$')
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
m = rx.match(document.body[i])
|
|
|
|
val = ""
|
|
|
|
if m:
|
|
|
|
val = m.group(1)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val not in list(frame_dict.keys()):
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-31 12:00:46 +00:00
|
|
|
continue
|
|
|
|
# Find end of sequence
|
|
|
|
j = find_end_of_sequence(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document. Cannot find end of Frame sequence!")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2012-12-31 12:00:46 +00:00
|
|
|
continue
|
|
|
|
endseq = j
|
|
|
|
subst = ["\\begin_layout %s" % frame_dict[val]]
|
2014-05-29 08:55:30 +00:00
|
|
|
esubst = ["", "\\begin_layout EndFrame", "", "\\end_layout"]
|
2012-12-31 12:00:46 +00:00
|
|
|
endseq = endseq + len(esubst) - len(document.body[j : j])
|
|
|
|
if document.body[j] == "\\end_deeper":
|
|
|
|
document.body[j : j] = esubst
|
2014-05-29 08:55:30 +00:00
|
|
|
else:
|
|
|
|
document.body[j+1 : j+1] = esubst
|
2012-12-31 12:00:46 +00:00
|
|
|
for q in range(i, j):
|
|
|
|
if document.body[q] == "\\begin_layout %s" % val:
|
|
|
|
document.body[q] = "\\begin_layout %s" % document.default_layout
|
|
|
|
r = i
|
|
|
|
while r < j:
|
|
|
|
if document.body[r] == "\\begin_deeper":
|
|
|
|
s = find_end_of(document.body, r, "\\begin_deeper", "\\end_deeper")
|
|
|
|
if s != -1:
|
|
|
|
document.body[r] = ""
|
|
|
|
document.body[s] = ""
|
|
|
|
r = s
|
|
|
|
continue
|
|
|
|
r = r + 1
|
|
|
|
l = find_end_of_layout(document.body, i)
|
|
|
|
for p in range(1, 5):
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument %d" % p, i, l)
|
|
|
|
if arg != -1:
|
|
|
|
if p == 1:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, arg)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
l = l - len(document.body[arg : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
|
|
|
elif p == 2:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, arg)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
l = l - len(document.body[arg : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("[<") + content + put_cmd_in_ert(">]")
|
|
|
|
elif p == 3:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, arg)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
l = l - len(document.body[arg : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
|
|
|
elif p == 4:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, arg)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
l = l - len(document.body[arg : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : endInset + 1]
|
|
|
|
subst += content
|
|
|
|
|
|
|
|
document.body[i : i + 1] = subst
|
|
|
|
i = j
|
|
|
|
|
2013-01-20 00:52:30 +00:00
|
|
|
# known encodings that do not change their names (same LyX and LaTeX names)
|
|
|
|
known_enc_tuple = ("auto", "default", "ansinew", "applemac", "armscii8", "ascii",
|
|
|
|
"cp437", "cp437de", "cp850", "cp852", "cp855", "cp858", "cp862", "cp865", "cp866",
|
|
|
|
"cp1250", "cp1251", "cp1252", "cp1255", "cp1256", "cp1257", "koi8-r", "koi8-u",
|
|
|
|
"pt154", "pt254", "tis620-0", "utf8", "utf8x", "utf8-plain")
|
|
|
|
|
|
|
|
def convert_encodings(document):
|
|
|
|
"Use the LyX names of the encodings instead of the LaTeX names."
|
|
|
|
LaTeX2LyX_enc_dict = {
|
|
|
|
"8859-6": "iso8859-6",
|
|
|
|
"8859-8": "iso8859-8",
|
|
|
|
"Bg5": "big5",
|
|
|
|
"euc": "euc-jp-platex",
|
|
|
|
"EUC-JP": "euc-jp",
|
|
|
|
"EUC-TW": "euc-tw",
|
|
|
|
"GB": "euc-cn",
|
|
|
|
"GBK": "gbk",
|
|
|
|
"iso88595": "iso8859-5",
|
|
|
|
"iso-8859-7": "iso8859-7",
|
|
|
|
"JIS": "jis",
|
|
|
|
"jis": "jis-platex",
|
|
|
|
"KS": "euc-kr",
|
|
|
|
"l7xenc": "iso8859-13",
|
|
|
|
"latin1": "iso8859-1",
|
|
|
|
"latin2": "iso8859-2",
|
|
|
|
"latin3": "iso8859-3",
|
|
|
|
"latin4": "iso8859-4",
|
|
|
|
"latin5": "iso8859-9",
|
|
|
|
"latin9": "iso8859-15",
|
|
|
|
"latin10": "iso8859-16",
|
|
|
|
"SJIS": "shift-jis",
|
|
|
|
"sjis": "shift-jis-platex",
|
|
|
|
"UTF8": "utf8-cjk"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\inputencoding" , 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\inputencoding", i)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val in list(LaTeX2LyX_enc_dict.keys()):
|
2013-01-20 00:52:30 +00:00
|
|
|
document.header[i] = "\\inputencoding %s" % LaTeX2LyX_enc_dict[val]
|
|
|
|
elif val not in known_enc_tuple:
|
|
|
|
document.warning("Ignoring unknown input encoding: `%s'" % val)
|
|
|
|
|
|
|
|
|
|
|
|
def revert_encodings(document):
|
|
|
|
"""Revert to using the LaTeX names of the encodings instead of the LyX names.
|
|
|
|
Also revert utf8-platex to sjis, the language default when using Japanese.
|
|
|
|
"""
|
|
|
|
LyX2LaTeX_enc_dict = {
|
|
|
|
"big5": "Bg5",
|
|
|
|
"euc-cn": "GB",
|
|
|
|
"euc-kr": "KS",
|
|
|
|
"euc-jp": "EUC-JP",
|
|
|
|
"euc-jp-platex": "euc",
|
|
|
|
"euc-tw": "EUC-TW",
|
|
|
|
"gbk": "GBK",
|
|
|
|
"iso8859-1": "latin1",
|
|
|
|
"iso8859-2": "latin2",
|
|
|
|
"iso8859-3": "latin3",
|
|
|
|
"iso8859-4": "latin4",
|
|
|
|
"iso8859-5": "iso88595",
|
|
|
|
"iso8859-6": "8859-6",
|
|
|
|
"iso8859-7": "iso-8859-7",
|
|
|
|
"iso8859-8": "8859-8",
|
|
|
|
"iso8859-9": "latin5",
|
|
|
|
"iso8859-13": "l7xenc",
|
|
|
|
"iso8859-15": "latin9",
|
|
|
|
"iso8859-16": "latin10",
|
|
|
|
"jis": "JIS",
|
|
|
|
"jis-platex": "jis",
|
|
|
|
"shift-jis": "SJIS",
|
|
|
|
"shift-jis-platex": "sjis",
|
|
|
|
"utf8-cjk": "UTF8",
|
|
|
|
"utf8-platex": "sjis"
|
|
|
|
}
|
|
|
|
i = find_token(document.header, "\\inputencoding" , 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
val = get_value(document.header, "\\inputencoding", i)
|
2015-03-11 12:04:46 +00:00
|
|
|
if val in list(LyX2LaTeX_enc_dict.keys()):
|
2013-01-20 00:52:30 +00:00
|
|
|
document.header[i] = "\\inputencoding %s" % LyX2LaTeX_enc_dict[val]
|
|
|
|
elif val not in known_enc_tuple:
|
|
|
|
document.warning("Ignoring unknown input encoding: `%s'" % val)
|
|
|
|
|
2012-12-31 12:00:46 +00:00
|
|
|
|
2013-02-09 02:23:34 +00:00
|
|
|
def revert_IEEEtran_3(document):
|
|
|
|
'''
|
|
|
|
Reverts Flex Insets to TeX-code
|
|
|
|
'''
|
|
|
|
if document.textclass == "IEEEtran":
|
|
|
|
h = 0
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
if h != -1:
|
|
|
|
h = find_token(document.body, "\\begin_inset Flex Author Mark", h)
|
|
|
|
if h != -1:
|
|
|
|
endh = find_end_of_inset(document.body, h)
|
|
|
|
document.body[endh - 2 : endh + 1] = put_cmd_in_ert("}")
|
|
|
|
document.body[h : h + 4] = put_cmd_in_ert("\\IEEEauthorrefmark{")
|
|
|
|
h = h + 5
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex Author Name", i)
|
|
|
|
if i != -1:
|
|
|
|
endi = find_end_of_inset(document.body, i)
|
|
|
|
document.body[endi - 2 : endi + 1] = put_cmd_in_ert("}")
|
|
|
|
document.body[i : i + 4] = put_cmd_in_ert("\\IEEEauthorblockN{")
|
|
|
|
i = i + 5
|
|
|
|
if j != -1:
|
|
|
|
j = find_token(document.body, "\\begin_inset Flex Author Affiliation", j)
|
|
|
|
if j != -1:
|
|
|
|
endj = find_end_of_inset(document.body, j)
|
|
|
|
document.body[endj - 2 : endj + 1] = put_cmd_in_ert("}")
|
|
|
|
document.body[j : j + 4] = put_cmd_in_ert("\\IEEEauthorblockA{")
|
|
|
|
j = j + 5
|
|
|
|
if i == -1 and j == -1 and h == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2013-02-10 23:47:09 +00:00
|
|
|
def revert_kurier_fonts(document):
|
|
|
|
" Revert kurier font definition to LaTeX "
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_math", 0)
|
|
|
|
if i != -1:
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
val = get_value(document.header, "\\font_math", i)
|
|
|
|
if val == "kurier-math":
|
|
|
|
add_to_preamble(document, "\\let\\Myrmdefault\\rmdefault\n" \
|
|
|
|
"\\usepackage[math]{kurier}\n" \
|
|
|
|
"\\renewcommand{\\rmdefault}{\\Myrmdefault}")
|
|
|
|
document.header[i] = "\\font_math auto"
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
2013-02-12 19:33:42 +00:00
|
|
|
kurier_fonts = ["kurier", "kurierc", "kurierl", "kurierlc"]
|
|
|
|
k = find_token(document.header, "\\font_sans kurier", 0)
|
2013-02-10 23:47:09 +00:00
|
|
|
if k != -1:
|
2013-02-12 19:33:42 +00:00
|
|
|
sf = get_value(document.header, "\\font_sans", k)
|
|
|
|
if sf in kurier_fonts:
|
|
|
|
add_to_preamble(document, "\\renewcommand{\\sfdefault}{%s}" % sf)
|
|
|
|
document.header[k] = "\\font_sans default"
|
2013-02-10 23:47:09 +00:00
|
|
|
|
2013-03-18 23:57:37 +00:00
|
|
|
def revert_iwona_fonts(document):
|
|
|
|
" Revert iwona font definition to LaTeX "
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_math", 0)
|
|
|
|
if i != -1:
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
val = get_value(document.header, "\\font_math", i)
|
|
|
|
if val == "iwona-math":
|
|
|
|
add_to_preamble(document, "\\let\\Myrmdefault\\rmdefault\n" \
|
|
|
|
"\\usepackage[math]{iwona}\n" \
|
|
|
|
"\\renewcommand{\\rmdefault}{\\Myrmdefault}")
|
|
|
|
document.header[i] = "\\font_math auto"
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
|
|
|
|
iwona_fonts = ["iwona", "iwonac", "iwonal", "iwonalc"]
|
|
|
|
k = find_token(document.header, "\\font_sans iwona", 0)
|
|
|
|
if k != -1:
|
|
|
|
sf = get_value(document.header, "\\font_sans", k)
|
|
|
|
if sf in iwona_fonts:
|
|
|
|
add_to_preamble(document, "\\renewcommand{\\sfdefault}{%s}" % sf)
|
|
|
|
document.header[k] = "\\font_sans default"
|
|
|
|
|
2013-02-10 23:47:09 +00:00
|
|
|
|
2013-02-15 09:45:11 +00:00
|
|
|
def revert_new_libertines(document):
|
|
|
|
" Revert new libertine font definition to LaTeX "
|
|
|
|
|
|
|
|
if find_token(document.header, "\\use_non_tex_fonts true", 0) != -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = find_token(document.header, "\\font_typewriter libertine-mono", 0)
|
|
|
|
if i != -1:
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
sc = find_token(document.header, "\\font_tt_scale", 0)
|
|
|
|
if sc != -1:
|
|
|
|
scval = get_value(document.header, "\\font_tt_scale", sc)
|
|
|
|
if scval != "100":
|
|
|
|
preamble += "[scale=%f]" % (float(scval) / 100)
|
|
|
|
document.header[sc] = "\\font_tt_scale 100"
|
|
|
|
preamble += "{libertineMono-type1}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[i] = "\\font_typewriter default"
|
|
|
|
|
|
|
|
k = find_token(document.header, "\\font_sans biolinum", 0)
|
|
|
|
if k != -1:
|
|
|
|
preamble = "\\usepackage"
|
|
|
|
options = ""
|
|
|
|
j = find_token(document.header, "\\font_osf true", 0)
|
|
|
|
if j != -1:
|
|
|
|
options += "osf"
|
|
|
|
else:
|
|
|
|
options += "lining"
|
|
|
|
sc = find_token(document.header, "\\font_sf_scale", 0)
|
|
|
|
if sc != -1:
|
|
|
|
scval = get_value(document.header, "\\font_sf_scale", sc)
|
|
|
|
if scval != "100":
|
|
|
|
options += ",scale=%f" % (float(scval) / 100)
|
|
|
|
document.header[sc] = "\\font_sf_scale 100"
|
|
|
|
if options != "":
|
|
|
|
preamble += "[" + options +"]"
|
|
|
|
preamble += "{biolinum-type1}"
|
|
|
|
add_to_preamble(document, [preamble])
|
|
|
|
document.header[k] = "\\font_sans default"
|
|
|
|
|
|
|
|
|
2013-03-16 11:52:00 +00:00
|
|
|
def convert_lyxframes(document):
|
|
|
|
" Converts old beamer frames to new style "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
framebeg = ["BeginFrame", "BeginPlainFrame"]
|
2013-07-04 16:25:04 +00:00
|
|
|
frameend = ["Frame", "PlainFrame", "EndFrame", "BeginFrame", "BeginPlainFrame", "AgainFrame",
|
|
|
|
"Section", "Section*", "Subsection", "Subsection*", "Subsubsection", "Subsubsection*"]
|
2013-03-16 11:52:00 +00:00
|
|
|
for lay in framebeg:
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token_exact(document.body, "\\begin_layout " + lay, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False or parent[1] != i:
|
|
|
|
document.warning("Wrong parent layout!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
frametype = parent[0]
|
|
|
|
j = parent[2]
|
|
|
|
parbeg = parent[3]
|
|
|
|
if i != -1:
|
|
|
|
# Step I: Convert ERT arguments
|
2013-12-06 10:57:33 +00:00
|
|
|
# FIXME: See restrictions in convert_beamerframeargs method
|
2013-12-06 09:53:10 +00:00
|
|
|
ertend = convert_beamerframeargs(document, i, parbeg)
|
|
|
|
if ertend == -1:
|
2013-12-06 10:57:33 +00:00
|
|
|
break
|
2013-03-17 07:40:38 +00:00
|
|
|
# Step II: Now rename the layout and convert the title to an argument
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
document.body[j : j + 1] = ['\\end_layout', '', '\\end_inset', '', '\\end_layout']
|
2013-03-16 11:52:00 +00:00
|
|
|
if lay == "BeginFrame":
|
|
|
|
document.body[i] = "\\begin_layout Frame"
|
|
|
|
else:
|
|
|
|
document.body[i] = "\\begin_layout PlainFrame"
|
2013-03-17 07:40:38 +00:00
|
|
|
document.body[ertend + 1 : ertend + 1] = ['\\begin_inset Argument 4',
|
|
|
|
'status open', '', '\\begin_layout Plain Layout']
|
2013-03-16 11:52:00 +00:00
|
|
|
# Step III: find real frame end
|
2013-03-17 07:40:38 +00:00
|
|
|
j = j + 8
|
2013-03-16 11:52:00 +00:00
|
|
|
jj = j
|
2015-02-13 09:15:29 +00:00
|
|
|
inInset = get_containing_inset(document.body, i)
|
2013-03-16 11:52:00 +00:00
|
|
|
while True:
|
|
|
|
fend = find_token(document.body, "\\begin_layout", jj)
|
|
|
|
if fend == -1:
|
|
|
|
document.warning("Malformed LyX document: No real frame end!")
|
|
|
|
return
|
|
|
|
val = get_value(document.body, "\\begin_layout", fend)
|
|
|
|
if val not in frameend:
|
|
|
|
jj = fend + 1
|
|
|
|
continue
|
2015-02-13 09:15:29 +00:00
|
|
|
# is this frame nested in an inset (e.g., Note)?
|
|
|
|
if inInset != False:
|
|
|
|
# if so, end the frame inside the inset
|
|
|
|
if inInset[2] < fend:
|
|
|
|
fend = inInset[2]
|
2013-03-16 11:52:00 +00:00
|
|
|
if val == frametype:
|
|
|
|
document.body[fend : fend] = ['\\end_deeper', '', '\\begin_layout Separator', '', '\\end_layout']
|
2013-11-11 08:16:17 +00:00
|
|
|
# consider explicit EndFrames between two identical frame types
|
|
|
|
elif val == "EndFrame":
|
|
|
|
nextlayout = find_token(document.body, "\\begin_layout", fend + 1)
|
|
|
|
if nextlayout != -1 and get_value(document.body, "\\begin_layout", nextlayout) == frametype:
|
|
|
|
document.body[fend : fend] = ['\\end_deeper', '', '\\begin_layout Separator', '', '\\end_layout']
|
|
|
|
else:
|
|
|
|
document.body[fend : fend] = ['\\end_deeper']
|
2013-03-16 11:52:00 +00:00
|
|
|
else:
|
|
|
|
document.body[fend : fend] = ['\\end_deeper']
|
|
|
|
document.body[j + 1 : j + 1] = ['', '\\begin_deeper']
|
|
|
|
break
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
|
|
|
def remove_endframes(document):
|
|
|
|
" Remove deprecated beamer endframes "
|
|
|
|
|
|
|
|
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
|
|
|
|
if document.textclass not in beamer_classes:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token_exact(document.body, "\\begin_layout EndFrame", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\end_layout to EndFrame")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-03-16 11:52:00 +00:00
|
|
|
continue
|
|
|
|
del document.body[i : j + 1]
|
|
|
|
|
|
|
|
|
2013-03-18 12:36:56 +00:00
|
|
|
def revert_powerdot_flexes(document):
|
|
|
|
" Reverts powerdot flex insets "
|
|
|
|
|
|
|
|
if document.textclass != "powerdot":
|
|
|
|
return
|
|
|
|
|
2013-03-18 14:06:49 +00:00
|
|
|
flexes = {"Onslide" : "\\onslide",
|
|
|
|
"Onslide*" : "\\onslide*",
|
|
|
|
"Onslide+" : "\\onslide+"}
|
2013-03-18 12:36:56 +00:00
|
|
|
rx = re.compile(r'^\\begin_inset Flex (.+)$')
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
m = rx.match(document.body[i])
|
|
|
|
if m:
|
|
|
|
flextype = m.group(1)
|
|
|
|
z = find_end_of_inset(document.body, i)
|
|
|
|
if z == -1:
|
|
|
|
document.warning("Can't find end of Flex " + flextype + " inset.")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
if flextype in flexes:
|
|
|
|
pre = put_cmd_in_ert(flexes[flextype])
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, z)
|
|
|
|
if arg != -1:
|
|
|
|
argend = find_end_of_inset(document.body, arg)
|
|
|
|
if argend == -1:
|
|
|
|
document.warning("Can't find end of Argument!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
argcontent = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
z = z - len(document.body[arg : argend + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[arg : argend + 1]
|
|
|
|
pre += put_cmd_in_ert("{") + argcontent + put_cmd_in_ert("}")
|
|
|
|
pre += put_cmd_in_ert("{")
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
# Adjust range end
|
|
|
|
z = z - len(document.body[i : beginPlain + 1])
|
|
|
|
z += len(pre)
|
|
|
|
document.body[i : beginPlain + 1] = pre
|
|
|
|
post = put_cmd_in_ert("}")
|
|
|
|
document.body[z - 2 : z + 1] = post
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_powerdot_pause(document):
|
|
|
|
" Reverts powerdot pause layout to ERT "
|
|
|
|
|
|
|
|
if document.textclass != "powerdot":
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Pause", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of Pause layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-03-18 12:36:56 +00:00
|
|
|
continue
|
|
|
|
endlay = j
|
|
|
|
subst = ["\\begin_layout Standard"] + put_cmd_in_ert("\\pause")
|
|
|
|
for p in range(i, j):
|
|
|
|
if p >= endlay:
|
|
|
|
break
|
|
|
|
arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
|
|
|
|
if arg != -1:
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
endlay = endlay - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
|
|
|
|
|
|
|
document.body[i : i + 1] = subst
|
|
|
|
i = endlay
|
|
|
|
|
|
|
|
|
|
|
|
def revert_powerdot_itemargs(document):
|
|
|
|
" Reverts powerdot item arguments to ERT "
|
|
|
|
|
|
|
|
if document.textclass != "powerdot":
|
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
list_layouts = ["Itemize", "ItemizeType1", "Enumerate", "EnumerateType1"]
|
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\S+)$')
|
|
|
|
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Argument", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Find containing paragraph layout
|
|
|
|
parent = get_containing_layout(document.body, i)
|
|
|
|
if parent == False:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find parent paragraph layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-03-18 12:36:56 +00:00
|
|
|
continue
|
|
|
|
parbeg = parent[1]
|
|
|
|
parend = parent[2]
|
|
|
|
realparbeg = parent[3]
|
|
|
|
layoutname = parent[0]
|
|
|
|
realparend = parend
|
|
|
|
for p in range(parbeg, parend):
|
|
|
|
if p >= realparend:
|
|
|
|
i = realparend
|
|
|
|
break
|
|
|
|
if layoutname in list_layouts:
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "item:1":
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
del document.body[i:j+1]
|
|
|
|
subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
|
|
|
document.body[realparbeg : realparbeg] = subst
|
|
|
|
elif argnr == "item:2":
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
# Find containing paragraph layout
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
del document.body[i:j+1]
|
|
|
|
subst = put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
|
|
|
|
document.body[realparbeg : realparbeg] = subst
|
|
|
|
|
|
|
|
i = realparend
|
|
|
|
|
|
|
|
|
|
|
|
def revert_powerdot_columns(document):
|
|
|
|
" Reverts powerdot twocolumn to TeX-code "
|
|
|
|
if document.textclass != "powerdot":
|
|
|
|
return
|
|
|
|
|
|
|
|
rx = re.compile(r'^\\begin_inset Argument (\S+)$')
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout Twocolumn", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
2013-03-21 01:11:42 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of Twocolumn layout")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-03-18 12:36:56 +00:00
|
|
|
continue
|
|
|
|
endlay = j
|
|
|
|
document.body[j : j] = put_cmd_in_ert("}") + document.body[j : j]
|
|
|
|
endlay += len(put_cmd_in_ert("}"))
|
|
|
|
subst = ["\\begin_layout Standard"] + put_cmd_in_ert("\\twocolumn")
|
|
|
|
for p in range(i, j):
|
|
|
|
if p >= endlay:
|
|
|
|
break
|
|
|
|
m = rx.match(document.body[p])
|
|
|
|
if m:
|
|
|
|
argnr = m.group(1)
|
|
|
|
if argnr == "1":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
endlay = endlay - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
|
|
|
|
elif argnr == "2":
|
|
|
|
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", p)
|
|
|
|
endPlain = find_end_of_layout(document.body, beginPlain)
|
|
|
|
endInset = find_end_of_inset(document.body, p)
|
|
|
|
content = document.body[beginPlain + 1 : endPlain]
|
|
|
|
# Adjust range end
|
|
|
|
endlay = endlay - len(document.body[p : endInset + 1])
|
|
|
|
# Remove arg inset
|
|
|
|
del document.body[p : endInset + 1]
|
|
|
|
subst += put_cmd_in_ert("{") + content + put_cmd_in_ert("}")
|
|
|
|
|
|
|
|
subst += put_cmd_in_ert("{")
|
|
|
|
document.body[i : i + 1] = subst
|
|
|
|
i = endlay
|
|
|
|
|
|
|
|
|
2013-03-22 00:33:58 +00:00
|
|
|
def revert_mbox_fbox(document):
|
|
|
|
'Convert revert mbox/fbox boxes to TeX-code'
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Box", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_token(document.body, "width", i)
|
|
|
|
if j != i + 7:
|
|
|
|
document.warning("Malformed LyX document: Can't find box width")
|
|
|
|
return
|
|
|
|
width = get_value(document.body, "width", j)
|
|
|
|
k = find_end_of_inset(document.body, j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of box inset")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
BeginLayout = find_token(document.body, "\\begin_layout Plain Layout", j)
|
2014-05-29 08:44:02 +00:00
|
|
|
EndLayout = find_end_of_layout(document.body, BeginLayout)
|
2013-04-17 09:30:25 +00:00
|
|
|
# replace if width is ""
|
|
|
|
if (width == '""'):
|
2013-03-22 00:33:58 +00:00
|
|
|
document.body[EndLayout:k + 1] = put_cmd_in_ert("}")
|
|
|
|
if document.body[i] == "\\begin_inset Box Frameless":
|
|
|
|
document.body[i:BeginLayout + 1] = put_cmd_in_ert("\\mbox{")
|
|
|
|
if document.body[i] == "\\begin_inset Box Boxed":
|
|
|
|
document.body[i:BeginLayout + 1] = put_cmd_in_ert("\\fbox{")
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-03-22 00:33:58 +00:00
|
|
|
|
|
|
|
|
2013-03-22 21:23:38 +00:00
|
|
|
def revert_starred_caption(document):
|
|
|
|
" Reverts unnumbered longtable caption insets "
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Caption LongTableNoNumber", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# This is not equivalent, but since the caption inset is a full blown
|
|
|
|
# text inset a true conversion to ERT is too difficult.
|
|
|
|
document.body[i] = "\\begin_inset Caption Standard"
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-03-22 21:23:38 +00:00
|
|
|
|
|
|
|
|
2013-05-15 05:19:49 +00:00
|
|
|
def revert_forced_local_layout(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.header, "\\begin_forced_local_layout", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of(document.header, i, "\\begin_forced_local_layout", "\\end_forced_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
break
|
|
|
|
regexp = re.compile(r'\s*forcelocal', re.IGNORECASE)
|
|
|
|
k = find_re(document.header, regexp, i, j)
|
|
|
|
while k != -1:
|
|
|
|
del document.header[k]
|
|
|
|
j = j - 1
|
|
|
|
k = find_re(document.header, regexp, i, j)
|
|
|
|
k = find_token(document.header, "\\begin_local_layout", 0)
|
|
|
|
if k == -1:
|
|
|
|
document.header[i] = "\\begin_local_layout"
|
|
|
|
document.header[j] = "\\end_local_layout"
|
|
|
|
else:
|
|
|
|
l = find_end_of(document.header, k, "\\begin_local_layout", "\\end_local_layout")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
break
|
|
|
|
lines = document.header[i+1 : j]
|
|
|
|
if k > i:
|
|
|
|
document.header[k+1 : k+1] = lines
|
|
|
|
document.header[i : j ] = []
|
|
|
|
else:
|
|
|
|
document.header[i : j ] = []
|
|
|
|
document.header[k+1 : k+1] = lines
|
|
|
|
|
|
|
|
|
2013-05-26 03:29:24 +00:00
|
|
|
def revert_aa1(document):
|
|
|
|
" Reverts InsetArguments of aa to TeX-code "
|
|
|
|
if document.textclass == "aa":
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Abstract (structured)", i)
|
|
|
|
if i != -1:
|
|
|
|
revert_Argument_to_TeX_brace(document, i, 0, 1, 4, False, False)
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-05-26 03:29:24 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def revert_aa2(document):
|
|
|
|
" Reverts InsetArguments of aa to TeX-code "
|
|
|
|
if document.textclass == "aa":
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i != -1:
|
|
|
|
i = find_token(document.body, "\\begin_layout Abstract (structured)", i)
|
|
|
|
if i != -1:
|
|
|
|
document.body[i] = "\\begin_layout Abstract"
|
2013-07-24 14:59:51 +00:00
|
|
|
i += 1
|
2013-05-26 03:29:24 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2013-05-28 21:40:17 +00:00
|
|
|
def revert_tibetan(document):
|
|
|
|
"Set the document language for Tibetan to English"
|
|
|
|
|
|
|
|
if document.language == "tibetan":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while j < len(document.body):
|
|
|
|
j = find_token(document.body, "\\lang tibetan", j)
|
|
|
|
if j != -1:
|
|
|
|
document.body[j] = document.body[j].replace("\\lang tibetan", "\\lang english")
|
|
|
|
j += 1
|
|
|
|
else:
|
|
|
|
j = len(document.body)
|
|
|
|
|
|
|
|
|
2013-05-30 13:12:48 +00:00
|
|
|
#############
|
|
|
|
#
|
|
|
|
# Chunk stuff
|
|
|
|
#
|
|
|
|
#############
|
|
|
|
|
2014-02-06 09:11:02 +00:00
|
|
|
# The idea here is that we will have a sequence of chunk paragraphs.
|
|
|
|
# We want to convert them to paragraphs in one or several chunk insets.
|
|
|
|
# Individual chunks are terminated by the character @ on the last line.
|
|
|
|
# This line will be discarded, and following lines are treated as new
|
|
|
|
# chunks, which go into their own insets.
|
|
|
|
# The first line of a chunk should look like: <<CONTENT>>=
|
|
|
|
# We will discard the delimiters, and put the CONTENT into the
|
|
|
|
# optional argument of the inset, if the CONTENT is non-empty.
|
2013-05-30 13:12:48 +00:00
|
|
|
def convert_chunks(document):
|
2014-02-06 09:11:02 +00:00
|
|
|
first_re = re.compile(r'<<(.*)>>=(.*)')
|
2014-02-07 15:36:55 +00:00
|
|
|
file_pos = 0
|
2013-05-30 13:12:48 +00:00
|
|
|
while True:
|
|
|
|
# find start of a block of chunks
|
2014-02-07 15:36:55 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout Chunk", file_pos)
|
2013-05-30 13:12:48 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
start = i
|
|
|
|
end = -1
|
|
|
|
contents = []
|
2014-02-06 09:11:02 +00:00
|
|
|
chunk_started = False
|
2013-05-30 13:12:48 +00:00
|
|
|
|
|
|
|
while True:
|
|
|
|
# process the one we just found
|
|
|
|
j = find_end_of_layout(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX documents. Can't find end of Chunk layout!")
|
2014-02-07 15:36:55 +00:00
|
|
|
# there is no point continuing, as we will run into the same error again.
|
|
|
|
return
|
2014-02-06 09:11:02 +00:00
|
|
|
this_chunk = "".join(document.body[i + 1:j])
|
2013-05-30 13:12:48 +00:00
|
|
|
|
2014-02-06 09:11:02 +00:00
|
|
|
# there may be empty lines between chunks
|
|
|
|
# we just skip them.
|
|
|
|
if not chunk_started:
|
|
|
|
if this_chunk != "":
|
|
|
|
# new chunk starts
|
|
|
|
chunk_started = True
|
|
|
|
|
|
|
|
if chunk_started:
|
|
|
|
contents.append(document.body[i + 1:j])
|
|
|
|
|
|
|
|
# look for potential chunk terminator
|
2014-11-03 15:56:22 +00:00
|
|
|
# on the last line of the chunk paragraph
|
2014-02-06 09:11:02 +00:00
|
|
|
if document.body[j - 1] == "@":
|
2013-05-30 13:12:48 +00:00
|
|
|
break
|
|
|
|
|
2014-02-06 09:11:02 +00:00
|
|
|
# look for subsequent chunk paragraph
|
2014-02-07 15:36:55 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout", j)
|
2013-05-30 13:12:48 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
2014-02-06 09:11:02 +00:00
|
|
|
if get_value(document.body, "\\begin_layout", i) != "Chunk":
|
2013-05-30 13:12:48 +00:00
|
|
|
break
|
|
|
|
|
2014-02-07 15:36:55 +00:00
|
|
|
file_pos = end = j + 1
|
2014-11-03 15:56:22 +00:00
|
|
|
|
2014-02-06 09:11:02 +00:00
|
|
|
# The last chunk should simply have an "@" in it
|
|
|
|
# or at least end with "@" (can happen if @ is
|
|
|
|
# preceded by a newline)
|
2014-10-15 12:07:14 +00:00
|
|
|
lastpar = ''
|
|
|
|
if len(contents) > 0:
|
|
|
|
lastpar = ''.join(contents[-1])
|
2014-02-06 09:11:02 +00:00
|
|
|
if not lastpar.endswith("@"):
|
|
|
|
document.warning("Unexpected chunk content: chunk not terminated by '@'!")
|
2014-10-15 12:07:14 +00:00
|
|
|
if len(contents) == 0:
|
|
|
|
# convert empty chunk layouts to Standard
|
|
|
|
document.body[start] = "\\begin_layout Standard"
|
2013-06-03 17:01:39 +00:00
|
|
|
continue
|
|
|
|
|
2014-02-06 09:11:02 +00:00
|
|
|
if lastpar == "@":
|
|
|
|
# chunk par only contains "@". Just drop it.
|
|
|
|
contents.pop()
|
|
|
|
else:
|
|
|
|
# chunk par contains more. Only drop the "@".
|
|
|
|
contents[-1].pop()
|
2013-05-30 13:12:48 +00:00
|
|
|
|
2014-02-06 09:11:02 +00:00
|
|
|
# The first line should look like: <<CONTENT>>=
|
|
|
|
# We want the CONTENT
|
2013-06-03 17:01:39 +00:00
|
|
|
optarg = ' '.join(contents[0])
|
2013-05-30 13:12:48 +00:00
|
|
|
optarg.strip()
|
2014-02-06 09:11:02 +00:00
|
|
|
# We can already have real chunk content in
|
|
|
|
# the first par (separated from the options by a newline).
|
|
|
|
# We collect such stuff to re-insert it later.
|
|
|
|
postoptstuff = []
|
|
|
|
|
2013-05-30 13:12:48 +00:00
|
|
|
match = first_re.search(optarg)
|
|
|
|
if match:
|
|
|
|
optarg = match.groups()[0]
|
2014-02-06 09:11:02 +00:00
|
|
|
if match.groups()[1] != "":
|
|
|
|
postopt = False
|
|
|
|
for c in contents[0]:
|
|
|
|
if c.endswith(">>="):
|
|
|
|
postopt = True
|
|
|
|
continue
|
|
|
|
if postopt:
|
|
|
|
postoptstuff.append(c)
|
|
|
|
# We have stripped everything. This can be deleted.
|
2013-05-30 13:12:48 +00:00
|
|
|
contents.pop(0)
|
|
|
|
|
2014-12-01 11:35:46 +00:00
|
|
|
newstuff = ['\\begin_layout Standard']
|
|
|
|
|
|
|
|
# Maintain paragraph parameters
|
|
|
|
par_params = ["\\noindent", "\\indent", "\\indent-toggle", "\\leftindent",
|
|
|
|
"\\start_of_appendix", "\\paragraph_spacing", "\\align",
|
|
|
|
"\\labelwidthstring"]
|
|
|
|
parms = start + 1
|
|
|
|
while True:
|
|
|
|
if document.body[parms].split(' ', 1)[0] not in par_params:
|
|
|
|
break
|
|
|
|
newstuff.extend([document.body[parms]])
|
|
|
|
parms += 1
|
|
|
|
|
|
|
|
newstuff.extend(
|
|
|
|
['\\begin_inset Flex Chunk',
|
|
|
|
'status open', '',
|
|
|
|
'\\begin_layout Plain Layout', ''])
|
2013-05-30 13:12:48 +00:00
|
|
|
|
2014-02-06 09:11:02 +00:00
|
|
|
# If we have a non-empty optional argument, insert it.
|
|
|
|
if match and optarg != "":
|
2013-05-30 13:12:48 +00:00
|
|
|
newstuff.extend(
|
|
|
|
['\\begin_inset Argument 1',
|
|
|
|
'status open', '',
|
|
|
|
'\\begin_layout Plain Layout',
|
|
|
|
optarg,
|
|
|
|
'\\end_layout', '',
|
|
|
|
'\\end_inset', ''])
|
|
|
|
|
2014-02-06 09:11:02 +00:00
|
|
|
# Since we already opened a Plain layout, the first paragraph
|
|
|
|
# does not need to do that.
|
|
|
|
did_one_par = False
|
|
|
|
if postoptstuff:
|
2014-11-03 15:56:22 +00:00
|
|
|
# we need to replace newlines with new layouts
|
|
|
|
start_newline = -1
|
|
|
|
started_text = False
|
|
|
|
for lno in range(0,len(postoptstuff)):
|
|
|
|
if postoptstuff[lno].startswith("\\begin_inset Newline newline"):
|
|
|
|
start_newline = lno
|
|
|
|
elif start_newline != -1:
|
|
|
|
if postoptstuff[lno].startswith("\\end_inset"):
|
|
|
|
# replace that bit, but only if we already have some text
|
|
|
|
# and we're not at the end except for a blank line
|
|
|
|
if started_text and \
|
|
|
|
(lno != len(postoptstuff) - 2 or postoptstuff[-1] != ""):
|
|
|
|
newstuff.extend(['\\end_layout', '\n', '\\begin_layout Plain Layout', '\n'])
|
|
|
|
start_newline = -1
|
|
|
|
started_text = True
|
|
|
|
else:
|
|
|
|
newstuff.extend([postoptstuff[lno]])
|
2014-02-06 09:11:02 +00:00
|
|
|
newstuff.append('\\end_layout')
|
|
|
|
did_one_par = True
|
2013-05-30 13:12:48 +00:00
|
|
|
for c in contents:
|
2014-02-06 09:11:02 +00:00
|
|
|
if did_one_par:
|
2013-05-30 13:12:48 +00:00
|
|
|
newstuff.extend(['', '\\begin_layout Plain Layout', ''])
|
|
|
|
else:
|
2014-02-06 09:11:02 +00:00
|
|
|
did_one_par = True
|
2013-06-03 17:01:39 +00:00
|
|
|
newstuff.extend(c)
|
|
|
|
newstuff.append('\\end_layout')
|
2013-05-30 13:12:48 +00:00
|
|
|
|
|
|
|
newstuff.extend(['', '\\end_inset', '', '\\end_layout', ''])
|
|
|
|
|
|
|
|
document.body[start:end] = newstuff
|
|
|
|
|
2014-02-07 15:36:55 +00:00
|
|
|
file_pos += len(newstuff) - (end - start)
|
2013-05-30 13:12:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_chunks(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex Chunk", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
iend = find_end_of_inset(document.body, i)
|
|
|
|
if iend == -1:
|
|
|
|
document.warning("Can't find end of Chunk!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Look for optional argument
|
2014-11-27 15:30:00 +00:00
|
|
|
optarg = ""
|
2013-05-30 13:12:48 +00:00
|
|
|
ostart = find_token(document.body, "\\begin_inset Argument 1", i, iend)
|
|
|
|
if ostart != -1:
|
|
|
|
oend = find_end_of_inset(document.body, ostart)
|
|
|
|
k = find_token(document.body, "\\begin_layout Plain Layout", ostart, oend)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find argument contents!")
|
|
|
|
else:
|
|
|
|
m = find_end_of_layout(document.body, k)
|
|
|
|
optarg = "".join(document.body[k+1:m])
|
|
|
|
|
|
|
|
# We now remove the optional argument, so we have something
|
|
|
|
# uniform on which to work
|
|
|
|
document.body[ostart : oend + 1] = []
|
|
|
|
# iend is now invalid
|
|
|
|
iend = find_end_of_inset(document.body, i)
|
|
|
|
|
|
|
|
retval = get_containing_layout(document.body, i)
|
|
|
|
if not retval:
|
|
|
|
document.warning("Can't find containing layout for Chunk!")
|
|
|
|
i = iend
|
|
|
|
continue
|
|
|
|
(lname, lstart, lend, pstart) = retval
|
|
|
|
# we now want to work through the various paragraphs, and collect their contents
|
|
|
|
parlist = []
|
|
|
|
k = i
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, "\\begin_layout Plain Layout", k, lend)
|
|
|
|
if k == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_layout(document.body, k)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of layout inside chunk!")
|
|
|
|
break
|
|
|
|
parlist.append(document.body[k+1:j])
|
|
|
|
k = j
|
|
|
|
# we now need to wrap all of these paragraphs in chunks
|
|
|
|
newlines = []
|
2014-11-27 15:30:00 +00:00
|
|
|
newlines.extend(["\\begin_layout Chunk", "", "<<" + optarg + ">>=", "\\end_layout", ""])
|
2013-05-30 13:12:48 +00:00
|
|
|
for stuff in parlist:
|
|
|
|
newlines.extend(["\\begin_layout Chunk"] + stuff + ["\\end_layout", ""])
|
|
|
|
newlines.extend(["\\begin_layout Chunk", "", "@", "\\end_layout", ""])
|
|
|
|
# replace old content with new content
|
|
|
|
document.body[lstart : lend + 1] = newlines
|
|
|
|
i = lstart + len(newlines)
|
|
|
|
|
|
|
|
|
2011-05-03 13:12:55 +00:00
|
|
|
##
|
|
|
|
# Conversion hub
|
|
|
|
#
|
|
|
|
|
|
|
|
supported_versions = ["2.1.0","2.1"]
|
2011-11-07 18:36:56 +00:00
|
|
|
convert = [
|
|
|
|
[414, []],
|
2011-08-29 14:07:30 +00:00
|
|
|
[415, [convert_undertilde]],
|
2011-11-07 18:36:56 +00:00
|
|
|
[416, []],
|
|
|
|
[417, [convert_japanese_encodings]],
|
2014-04-24 19:52:32 +00:00
|
|
|
[418, [convert_justification]],
|
2011-12-12 14:40:34 +00:00
|
|
|
[419, []],
|
|
|
|
[420, [convert_biblio_style]],
|
2011-12-18 21:27:17 +00:00
|
|
|
[421, [convert_longtable_captions]],
|
2012-01-03 21:26:09 +00:00
|
|
|
[422, [convert_use_packages]],
|
2012-01-05 20:53:48 +00:00
|
|
|
[423, [convert_use_mathtools]],
|
2012-01-09 13:16:38 +00:00
|
|
|
[424, [convert_cite_engine_type]],
|
2014-04-25 20:39:22 +00:00
|
|
|
# No convert_cancel, since cancel will be loaded automatically
|
|
|
|
# in format 425 without any possibility to switch it off.
|
|
|
|
# This has been fixed in format 464.
|
2012-02-20 02:10:33 +00:00
|
|
|
[425, []],
|
2012-03-06 07:54:22 +00:00
|
|
|
[426, []],
|
2012-03-16 01:29:37 +00:00
|
|
|
[427, []],
|
2012-03-21 22:04:45 +00:00
|
|
|
[428, [convert_cell_rotation]],
|
2012-04-16 19:40:59 +00:00
|
|
|
[429, [convert_table_rotation]],
|
|
|
|
[430, [convert_listoflistings]],
|
2012-05-06 18:48:04 +00:00
|
|
|
[431, [convert_use_amssymb]],
|
2012-06-21 23:12:43 +00:00
|
|
|
[432, []],
|
2012-08-17 12:11:02 +00:00
|
|
|
[433, [convert_armenian]],
|
2012-08-17 16:24:18 +00:00
|
|
|
[434, []],
|
2012-08-18 12:45:41 +00:00
|
|
|
[435, []],
|
2012-08-19 09:57:48 +00:00
|
|
|
[436, []],
|
2012-08-23 15:42:53 +00:00
|
|
|
[437, []],
|
2012-09-19 15:46:55 +00:00
|
|
|
[438, []],
|
2012-09-22 15:44:00 +00:00
|
|
|
[439, []],
|
2012-09-23 10:30:19 +00:00
|
|
|
[440, []],
|
2012-09-23 16:33:04 +00:00
|
|
|
[441, [convert_mdnomath]],
|
2012-09-24 11:19:25 +00:00
|
|
|
[442, []],
|
2012-09-25 09:07:33 +00:00
|
|
|
[443, []],
|
2012-10-21 16:55:24 +00:00
|
|
|
[444, []],
|
2012-11-19 13:21:02 +00:00
|
|
|
[445, []],
|
2012-11-26 01:50:53 +00:00
|
|
|
[446, [convert_latexargs]],
|
2013-01-27 04:25:00 +00:00
|
|
|
[447, [convert_IEEEtran, convert_AASTeX, convert_AGUTeX, convert_IJMP, convert_SIGPLAN, convert_SIGGRAPH, convert_EuropeCV, convert_Initials, convert_ModernCV]],
|
2012-11-29 14:34:20 +00:00
|
|
|
[448, [convert_literate]],
|
2012-12-04 11:14:26 +00:00
|
|
|
[449, []],
|
2012-12-09 10:04:56 +00:00
|
|
|
[450, []],
|
2012-12-09 16:19:21 +00:00
|
|
|
[451, [convert_beamerargs, convert_againframe_args, convert_corollary_args, convert_quote_args]],
|
2012-12-15 12:02:40 +00:00
|
|
|
[452, [convert_beamerblocks]],
|
2012-12-19 18:33:39 +00:00
|
|
|
[453, [convert_use_stmaryrd]],
|
2012-12-22 15:28:43 +00:00
|
|
|
[454, [convert_overprint]],
|
2012-12-28 11:32:59 +00:00
|
|
|
[455, []],
|
2012-12-28 18:51:28 +00:00
|
|
|
[456, [convert_epigraph]],
|
2012-12-30 17:29:02 +00:00
|
|
|
[457, [convert_use_stackrel]],
|
2012-12-31 12:00:46 +00:00
|
|
|
[458, [convert_captioninsets, convert_captionlayouts]],
|
2013-02-09 02:23:34 +00:00
|
|
|
[459, []],
|
2013-02-10 23:47:09 +00:00
|
|
|
[460, []],
|
2013-02-15 09:45:11 +00:00
|
|
|
[461, []],
|
2013-01-20 00:52:30 +00:00
|
|
|
[462, []],
|
|
|
|
[463, [convert_encodings]],
|
2013-02-27 12:12:03 +00:00
|
|
|
[464, [convert_use_cancel]],
|
2013-03-18 12:36:56 +00:00
|
|
|
[465, [convert_lyxframes, remove_endframes]],
|
2013-03-18 23:57:37 +00:00
|
|
|
[466, []],
|
2013-03-22 00:33:58 +00:00
|
|
|
[467, []],
|
2013-03-22 21:23:38 +00:00
|
|
|
[468, []],
|
2013-05-15 05:19:49 +00:00
|
|
|
[469, []],
|
2013-05-16 14:00:54 +00:00
|
|
|
[470, []],
|
|
|
|
[471, [convert_cite_engine_type_default]],
|
2013-05-28 21:40:17 +00:00
|
|
|
[472, []],
|
2013-05-30 13:12:48 +00:00
|
|
|
[473, []],
|
2014-05-25 20:46:42 +00:00
|
|
|
[474, [convert_chunks, cleanup_beamerargs]],
|
2011-05-03 13:12:55 +00:00
|
|
|
]
|
|
|
|
|
2011-11-07 18:36:56 +00:00
|
|
|
revert = [
|
2013-05-30 13:12:48 +00:00
|
|
|
[473, [revert_chunks]],
|
2013-05-28 21:40:17 +00:00
|
|
|
[472, [revert_tibetan]],
|
2013-05-26 03:29:24 +00:00
|
|
|
[471, [revert_aa1,revert_aa2]],
|
2013-05-16 14:00:54 +00:00
|
|
|
[470, [revert_cite_engine_type_default]],
|
2013-05-15 05:19:49 +00:00
|
|
|
[469, [revert_forced_local_layout]],
|
2013-03-22 21:23:38 +00:00
|
|
|
[468, [revert_starred_caption]],
|
2013-03-22 00:33:58 +00:00
|
|
|
[467, [revert_mbox_fbox]],
|
2013-03-18 23:57:37 +00:00
|
|
|
[466, [revert_iwona_fonts]],
|
2013-03-18 12:36:56 +00:00
|
|
|
[465, [revert_powerdot_flexes, revert_powerdot_pause, revert_powerdot_itemargs, revert_powerdot_columns]],
|
2013-03-16 11:52:00 +00:00
|
|
|
[464, []],
|
2013-02-27 12:12:03 +00:00
|
|
|
[463, [revert_use_cancel]],
|
2013-01-20 00:52:30 +00:00
|
|
|
[462, [revert_encodings]],
|
2013-02-15 09:45:11 +00:00
|
|
|
[461, [revert_new_libertines]],
|
2013-02-10 23:47:09 +00:00
|
|
|
[460, [revert_kurier_fonts]],
|
2013-02-09 02:23:34 +00:00
|
|
|
[459, [revert_IEEEtran_3]],
|
2012-12-31 12:00:46 +00:00
|
|
|
[458, [revert_fragileframe, revert_newframes]],
|
2012-12-30 17:29:02 +00:00
|
|
|
[457, [revert_captioninsets, revert_captionlayouts]],
|
2012-12-28 18:51:28 +00:00
|
|
|
[456, [revert_use_stackrel]],
|
2012-12-28 11:32:59 +00:00
|
|
|
[455, [revert_epigraph]],
|
2012-12-22 15:28:43 +00:00
|
|
|
[454, [revert_frametitle]],
|
2012-12-19 18:33:39 +00:00
|
|
|
[453, [revert_overprint]],
|
2012-12-15 12:02:40 +00:00
|
|
|
[452, [revert_use_stmaryrd]],
|
2012-12-09 16:19:21 +00:00
|
|
|
[451, [revert_beamerblocks]],
|
2012-12-09 10:04:56 +00:00
|
|
|
[450, [revert_beamerargs, revert_beamerargs2, revert_beamerargs3, revert_beamerflex]],
|
2012-12-04 11:55:47 +00:00
|
|
|
[449, [revert_garamondx, revert_garamondx_newtxmath]],
|
2012-11-29 14:34:20 +00:00
|
|
|
[448, [revert_itemargs]],
|
2012-11-28 11:54:34 +00:00
|
|
|
[447, [revert_literate]],
|
2013-05-28 23:11:14 +00:00
|
|
|
[446, [revert_IEEEtran, revert_IEEEtran_2, revert_AASTeX, revert_AGUTeX, revert_IJMP, revert_SIGPLAN, revert_SIGGRAPH, revert_EuropeCV, revert_Initials, revert_ModernCV_3, revert_ModernCV_4]],
|
2012-11-19 13:21:02 +00:00
|
|
|
[445, [revert_latexargs]],
|
2012-10-21 16:55:24 +00:00
|
|
|
[444, [revert_uop]],
|
2012-09-25 09:07:33 +00:00
|
|
|
[443, [revert_biolinum]],
|
2012-09-24 11:19:25 +00:00
|
|
|
[442, []],
|
2012-09-23 16:33:04 +00:00
|
|
|
[441, [revert_newtxmath]],
|
2012-09-23 10:30:19 +00:00
|
|
|
[440, [revert_mdnomath]],
|
2012-09-22 15:44:00 +00:00
|
|
|
[439, [revert_mathfonts]],
|
2012-09-19 15:46:55 +00:00
|
|
|
[438, [revert_minionpro]],
|
2012-08-23 15:42:53 +00:00
|
|
|
[437, [revert_ipadeco, revert_ipachar]],
|
|
|
|
[436, [revert_texgyre]],
|
|
|
|
[435, [revert_mathdesign]],
|
2012-08-17 16:24:18 +00:00
|
|
|
[434, [revert_txtt]],
|
2012-08-17 12:11:02 +00:00
|
|
|
[433, [revert_libertine]],
|
2012-06-21 23:12:43 +00:00
|
|
|
[432, [revert_armenian]],
|
2012-06-08 00:37:36 +00:00
|
|
|
[431, [revert_languages, revert_ancientgreek]],
|
2012-05-06 18:48:04 +00:00
|
|
|
[430, [revert_use_amssymb]],
|
2012-04-16 19:40:59 +00:00
|
|
|
[429, [revert_listoflistings]],
|
2012-03-21 22:04:45 +00:00
|
|
|
[428, [revert_table_rotation]],
|
2012-03-16 01:29:37 +00:00
|
|
|
[427, [revert_cell_rotation]],
|
2012-03-06 07:54:22 +00:00
|
|
|
[426, [revert_tipa]],
|
2012-02-20 02:10:33 +00:00
|
|
|
[425, [revert_verbatim]],
|
2012-01-23 01:49:49 +00:00
|
|
|
[424, [revert_cancel]],
|
2012-01-23 17:20:07 +00:00
|
|
|
[423, [revert_cite_engine_type]],
|
2012-01-05 20:53:48 +00:00
|
|
|
[422, [revert_use_mathtools]],
|
2012-01-03 21:26:09 +00:00
|
|
|
[421, [revert_use_packages]],
|
2011-12-18 21:27:17 +00:00
|
|
|
[420, [revert_longtable_captions]],
|
2011-12-12 14:40:34 +00:00
|
|
|
[419, [revert_biblio_style]],
|
2011-12-08 23:58:30 +00:00
|
|
|
[418, [revert_australian]],
|
2011-12-07 22:33:25 +00:00
|
|
|
[417, [revert_justification]],
|
2011-11-07 18:36:56 +00:00
|
|
|
[416, [revert_japanese_encodings]],
|
2012-01-23 01:49:49 +00:00
|
|
|
[415, [revert_negative_space, revert_math_spaces]],
|
2011-08-29 14:07:30 +00:00
|
|
|
[414, [revert_undertilde]],
|
2011-08-10 03:37:33 +00:00
|
|
|
[413, [revert_visible_space]]
|
2011-05-03 13:12:55 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|