mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-11-30 05:12:40 +00:00
a8d19aeff5
git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/trunk@24568 a592a061-630c-0410-9148-cb99ea01b6c8
2203 lines
80 KiB
Python
2203 lines
80 KiB
Python
# This file is part of lyx2lyx
|
|
# -*- coding: utf-8 -*-
|
|
# Copyright (C) 2007-2008 The LyX Team <lyx-devel@lists.lyx.org>
|
|
#
|
|
# This program is free software; you can redistribute it and/or
|
|
# modify it under the terms of the GNU General Public License
|
|
# as published by the Free Software Foundation; either version 2
|
|
# of the License, or (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program; if not, write to the Free Software
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
""" Convert files to the file format generated by lyx 1.6"""
|
|
|
|
import re
|
|
import unicodedata
|
|
import sys, os
|
|
|
|
from parser_tools import find_token, find_end_of, find_tokens, get_value, get_value_string
|
|
|
|
####################################################################
|
|
# Private helper functions
|
|
|
|
def find_end_of_inset(lines, i):
|
|
" Find end of inset, where lines[i] is included."
|
|
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
|
|
|
def wrap_into_ert(string, src, dst):
|
|
" Wrap a something into an ERT"
|
|
return string.replace(src, '\n\\begin_inset ERT\nstatus collapsed\n\\begin_layout Standard\n'
|
|
+ dst + '\n\\end_layout\n\\end_inset\n')
|
|
|
|
def add_to_preamble(document, text):
|
|
""" Add text to the preamble if it is not already there.
|
|
Only the first line is checked!"""
|
|
|
|
if find_token(document.preamble, text[0], 0) != -1:
|
|
return
|
|
|
|
document.preamble.extend(text)
|
|
|
|
# Convert a LyX length into a LaTeX length
|
|
def convert_len(len):
|
|
units = {"text%":"\\backslash\ntextwidth", "col%":"\\backslash\ncolumnwidth",
|
|
"page%":"\\backslash\npagewidth", "line%":"\\backslash\nlinewidth",
|
|
"theight%":"\\backslash\ntextheight", "pheight%":"\\backslash\npageheight"}
|
|
|
|
# Convert LyX units to LaTeX units
|
|
for unit in units.keys():
|
|
if len.find(unit) != -1:
|
|
len = '%f' % (len2value(len) / 100)
|
|
len = len.strip('0') + units[unit]
|
|
break
|
|
|
|
return len
|
|
|
|
# Return the value of len without the unit in numerical form.
|
|
def len2value(len):
|
|
result = re.search('([+-]?[0-9.]+)', len)
|
|
if result:
|
|
return float(result.group(1))
|
|
# No number means 1.0
|
|
return 1.0
|
|
|
|
# Unfortunately, this doesn't really work, since Standard isn't always default.
|
|
# But it's as good as we can do right now.
|
|
def find_default_layout(document, start, end):
|
|
l = find_token(document.body, "\\begin_layout Standard", start, end)
|
|
if l == -1:
|
|
l = find_token(document.body, "\\begin_layout PlainLayout", start, end)
|
|
if l == -1:
|
|
l = find_token(document.body, "\\begin_layout Plain Layout", start, end)
|
|
return l
|
|
|
|
####################################################################
|
|
|
|
def get_option(document, m, option, default):
|
|
l = document.body[m].find(option)
|
|
val = default
|
|
if l != -1:
|
|
val = document.body[m][l:].split('"')[1]
|
|
return val
|
|
|
|
def remove_option(document, m, option):
|
|
l = document.body[m].find(option)
|
|
if l != -1:
|
|
val = document.body[m][l:].split('"')[1]
|
|
document.body[m] = document.body[m][:l-1] + document.body[m][l+len(option + '="' + val + '"'):]
|
|
return l
|
|
|
|
def set_option(document, m, option, value):
|
|
l = document.body[m].find(option)
|
|
if l != -1:
|
|
oldval = document.body[m][l:].split('"')[1]
|
|
l = l + len(option + '="')
|
|
document.body[m] = document.body[m][:l] + value + document.body[m][l+len(oldval):]
|
|
else:
|
|
document.body[m] = document.body[m][:-1] + ' ' + option + '="' + value + '">'
|
|
return l
|
|
|
|
def convert_tablines(document):
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Tabular", i)
|
|
if i == -1:
|
|
# LyX 1.3 inserted an extra space between \begin_inset
|
|
# and Tabular so let us try if this is the case and fix it.
|
|
i = find_token(document.body, "\\begin_inset Tabular", i)
|
|
if i == -1:
|
|
return
|
|
else:
|
|
document.body[i] = "\\begin_inset Tabular"
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of tabular.")
|
|
continue
|
|
|
|
m = i + 1
|
|
nrows = int(document.body[i+1].split('"')[3])
|
|
ncols = int(document.body[i+1].split('"')[5])
|
|
|
|
col_info = []
|
|
for k in range(ncols):
|
|
m = find_token(document.body, "<column", m)
|
|
left = get_option(document, m, 'leftline', 'false')
|
|
right = get_option(document, m, 'rightline', 'false')
|
|
col_info.append([left, right])
|
|
remove_option(document, m, 'leftline')
|
|
remove_option(document, m, 'rightline')
|
|
m = m + 1
|
|
|
|
row_info = []
|
|
for k in range(nrows):
|
|
m = find_token(document.body, "<row", m)
|
|
top = get_option(document, m, 'topline', 'false')
|
|
bottom = get_option(document, m, 'bottomline', 'false')
|
|
row_info.append([top, bottom])
|
|
remove_option(document, m, 'topline')
|
|
remove_option(document, m, 'bottomline')
|
|
m = m + 1
|
|
|
|
m = i + 1
|
|
mc_info = []
|
|
for k in range(nrows*ncols):
|
|
m = find_token(document.body, "<cell", m)
|
|
mc_info.append(get_option(document, m, 'multicolumn', '0'))
|
|
m = m + 1
|
|
m = i + 1
|
|
for l in range(nrows):
|
|
for k in range(ncols):
|
|
m = find_token(document.body, '<cell', m)
|
|
if mc_info[l*ncols + k] == '0':
|
|
r = set_option(document, m, 'topline', row_info[l][0])
|
|
r = set_option(document, m, 'bottomline', row_info[l][1])
|
|
r = set_option(document, m, 'leftline', col_info[k][0])
|
|
r = set_option(document, m, 'rightline', col_info[k][1])
|
|
elif mc_info[l*ncols + k] == '1':
|
|
s = k + 1
|
|
while s < ncols and mc_info[l*ncols + s] == '2':
|
|
s = s + 1
|
|
if s < ncols and mc_info[l*ncols + s] != '1':
|
|
r = set_option(document, m, 'rightline', col_info[k][1])
|
|
if k > 0 and mc_info[l*ncols + k - 1] == '0':
|
|
r = set_option(document, m, 'leftline', col_info[k][0])
|
|
m = m + 1
|
|
i = j + 1
|
|
|
|
|
|
def revert_tablines(document):
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Tabular", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of tabular.")
|
|
continue
|
|
|
|
m = i + 1
|
|
nrows = int(document.body[i+1].split('"')[3])
|
|
ncols = int(document.body[i+1].split('"')[5])
|
|
|
|
lines = []
|
|
for k in range(nrows*ncols):
|
|
m = find_token(document.body, "<cell", m)
|
|
top = get_option(document, m, 'topline', 'false')
|
|
bottom = get_option(document, m, 'bottomline', 'false')
|
|
left = get_option(document, m, 'leftline', 'false')
|
|
right = get_option(document, m, 'rightline', 'false')
|
|
lines.append([top, bottom, left, right])
|
|
m = m + 1
|
|
|
|
m = i + 1
|
|
col_info = []
|
|
for k in range(ncols):
|
|
m = find_token(document.body, "<column", m)
|
|
left = 'true'
|
|
for l in range(nrows):
|
|
left = lines[l*ncols + k][2]
|
|
if left == 'false':
|
|
break
|
|
set_option(document, m, 'leftline', left)
|
|
right = 'true'
|
|
for l in range(nrows):
|
|
right = lines[l*ncols + k][3]
|
|
if right == 'false':
|
|
break
|
|
set_option(document, m, 'rightline', right)
|
|
m = m + 1
|
|
|
|
row_info = []
|
|
for k in range(nrows):
|
|
m = find_token(document.body, "<row", m)
|
|
top = 'true'
|
|
for l in range(ncols):
|
|
top = lines[k*ncols + l][0]
|
|
if top == 'false':
|
|
break
|
|
set_option(document, m, 'topline', top)
|
|
bottom = 'true'
|
|
for l in range(ncols):
|
|
bottom = lines[k*ncols + l][1]
|
|
if bottom == 'false':
|
|
break
|
|
set_option(document, m, 'bottomline', bottom)
|
|
m = m + 1
|
|
|
|
i = j + 1
|
|
|
|
|
|
def fix_wrong_tables(document):
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Tabular", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of tabular.")
|
|
continue
|
|
|
|
m = i + 1
|
|
nrows = int(document.body[i+1].split('"')[3])
|
|
ncols = int(document.body[i+1].split('"')[5])
|
|
|
|
for l in range(nrows):
|
|
prev_multicolumn = 0
|
|
for k in range(ncols):
|
|
m = find_token(document.body, '<cell', m)
|
|
|
|
if document.body[m].find('multicolumn') != -1:
|
|
multicol_cont = int(document.body[m].split('"')[1])
|
|
|
|
if multicol_cont == 2 and (k == 0 or prev_multicolumn == 0):
|
|
document.body[m] = document.body[m][:5] + document.body[m][21:]
|
|
prev_multicolumn = 0
|
|
else:
|
|
prev_multicolumn = multicol_cont
|
|
else:
|
|
prev_multicolumn = 0
|
|
|
|
i = j + 1
|
|
|
|
|
|
def close_begin_deeper(document):
|
|
i = 0
|
|
depth = 0
|
|
while True:
|
|
i = find_tokens(document.body, ["\\begin_deeper", "\\end_deeper"], i)
|
|
|
|
if i == -1:
|
|
break
|
|
|
|
if document.body[i][:13] == "\\begin_deeper":
|
|
depth += 1
|
|
else:
|
|
depth -= 1
|
|
|
|
i += 1
|
|
|
|
document.body[-2:-2] = ['\\end_deeper' for i in range(depth)]
|
|
|
|
|
|
def long_charstyle_names(document):
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace("CharStyle ", "CharStyle CharStyle:")
|
|
i += 1
|
|
|
|
def revert_long_charstyle_names(document):
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace("CharStyle CharStyle:", "CharStyle")
|
|
i += 1
|
|
|
|
|
|
def axe_show_label(document):
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
if i == -1:
|
|
return
|
|
if document.body[i + 1].find("show_label") != -1:
|
|
if document.body[i + 1].find("true") != -1:
|
|
document.body[i + 1] = "status open"
|
|
del document.body[ i + 2]
|
|
else:
|
|
if document.body[i + 1].find("false") != -1:
|
|
document.body[i + 1] = "status collapsed"
|
|
del document.body[ i + 2]
|
|
else:
|
|
document.warning("Malformed LyX document: show_label neither false nor true.")
|
|
else:
|
|
document.warning("Malformed LyX document: show_label missing in CharStyle.")
|
|
|
|
i += 1
|
|
|
|
|
|
def revert_show_label(document):
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
if i == -1:
|
|
return
|
|
if document.body[i + 1].find("status open") != -1:
|
|
document.body.insert(i + 1, "show_label true")
|
|
else:
|
|
if document.body[i + 1].find("status collapsed") != -1:
|
|
document.body.insert(i + 1, "show_label false")
|
|
else:
|
|
document.warning("Malformed LyX document: no legal status line in CharStyle.")
|
|
i += 1
|
|
|
|
def revert_begin_modules(document):
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.header, "\\begin_modules", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of(document.header, i, "\\begin_modules", "\\end_modules")
|
|
if j == -1:
|
|
# this should not happen
|
|
break
|
|
document.header[i : j + 1] = []
|
|
|
|
def convert_flex(document):
|
|
"Convert CharStyle to Flex"
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace('\\begin_inset CharStyle', '\\begin_inset Flex')
|
|
|
|
def revert_flex(document):
|
|
"Convert Flex to CharStyle"
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Flex", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace('\\begin_inset Flex', '\\begin_inset CharStyle')
|
|
|
|
|
|
# Discard PDF options for hyperref
|
|
def revert_pdf_options(document):
|
|
"Revert PDF options for hyperref."
|
|
# store the PDF options and delete the entries from the Lyx file
|
|
i = 0
|
|
hyperref = False
|
|
title = ""
|
|
author = ""
|
|
subject = ""
|
|
keywords = ""
|
|
bookmarks = ""
|
|
bookmarksnumbered = ""
|
|
bookmarksopen = ""
|
|
bookmarksopenlevel = ""
|
|
breaklinks = ""
|
|
pdfborder = ""
|
|
colorlinks = ""
|
|
backref = ""
|
|
pagebackref = ""
|
|
pagemode = ""
|
|
otheroptions = ""
|
|
i = find_token(document.header, "\\use_hyperref", i)
|
|
if i != -1:
|
|
hyperref = get_value(document.header, "\\use_hyperref", i) == 'true'
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_store_options", i)
|
|
if i != -1:
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_title", 0)
|
|
if i != -1:
|
|
title = get_value_string(document.header, '\\pdf_title', 0, 0, True)
|
|
title = ' pdftitle={' + title + '}'
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_author", 0)
|
|
if i != -1:
|
|
author = get_value_string(document.header, '\\pdf_author', 0, 0, True)
|
|
if title == "":
|
|
author = ' pdfauthor={' + author + '}'
|
|
else:
|
|
author = ',\n pdfauthor={' + author + '}'
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_subject", 0)
|
|
if i != -1:
|
|
subject = get_value_string(document.header, '\\pdf_subject', 0, 0, True)
|
|
if title == "" and author == "":
|
|
subject = ' pdfsubject={' + subject + '}'
|
|
else:
|
|
subject = ',\n pdfsubject={' + subject + '}'
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_keywords", 0)
|
|
if i != -1:
|
|
keywords = get_value_string(document.header, '\\pdf_keywords', 0, 0, True)
|
|
if title == "" and author == "" and subject == "":
|
|
keywords = ' pdfkeywords={' + keywords + '}'
|
|
else:
|
|
keywords = ',\n pdfkeywords={' + keywords + '}'
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_bookmarks", 0)
|
|
if i != -1:
|
|
bookmarks = get_value_string(document.header, '\\pdf_bookmarks', 0)
|
|
bookmarks = ',\n bookmarks=' + bookmarks
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_bookmarksnumbered", i)
|
|
if i != -1:
|
|
bookmarksnumbered = get_value_string(document.header, '\\pdf_bookmarksnumbered', 0)
|
|
bookmarksnumbered = ',\n bookmarksnumbered=' + bookmarksnumbered
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_bookmarksopen", i)
|
|
if i != -1:
|
|
bookmarksopen = get_value_string(document.header, '\\pdf_bookmarksopen', 0)
|
|
bookmarksopen = ',\n bookmarksopen=' + bookmarksopen
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_bookmarksopenlevel", i)
|
|
if i != -1:
|
|
bookmarksopenlevel = get_value_string(document.header, '\\pdf_bookmarksopenlevel', 0, 0, True)
|
|
bookmarksopenlevel = ',\n bookmarksopenlevel=' + bookmarksopenlevel
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_breaklinks", i)
|
|
if i != -1:
|
|
breaklinks = get_value_string(document.header, '\\pdf_breaklinks', 0)
|
|
breaklinks = ',\n breaklinks=' + breaklinks
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_pdfborder", i)
|
|
if i != -1:
|
|
pdfborder = get_value_string(document.header, '\\pdf_pdfborder', 0)
|
|
if pdfborder == 'true':
|
|
pdfborder = ',\n pdfborder={0 0 0}'
|
|
else:
|
|
pdfborder = ',\n pdfborder={0 0 1}'
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_colorlinks", i)
|
|
if i != -1:
|
|
colorlinks = get_value_string(document.header, '\\pdf_colorlinks', 0)
|
|
colorlinks = ',\n colorlinks=' + colorlinks
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_backref", i)
|
|
if i != -1:
|
|
backref = get_value_string(document.header, '\\pdf_backref', 0)
|
|
backref = ',\n backref=' + backref
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_pagebackref", i)
|
|
if i != -1:
|
|
pagebackref = get_value_string(document.header, '\\pdf_pagebackref', 0)
|
|
pagebackref = ',\n pagebackref=' + pagebackref
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_pagemode", 0)
|
|
if i != -1:
|
|
pagemode = get_value_string(document.header, '\\pdf_pagemode', 0)
|
|
pagemode = ',\n pdfpagemode=' + pagemode
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_quoted_options", 0)
|
|
if i != -1:
|
|
otheroptions = get_value_string(document.header, '\\pdf_quoted_options', 0, 0, True)
|
|
if title == "" and author == "" and subject == "" and keywords == "":
|
|
otheroptions = ' ' + otheroptions
|
|
else:
|
|
otheroptions = ',\n ' + otheroptions
|
|
del document.header[i]
|
|
|
|
# write to the preamble when hyperref was used
|
|
if hyperref == True:
|
|
# preamble write preparations
|
|
# bookmark numbers are only output when they are turned on
|
|
if bookmarksopen == ',\n bookmarksopen=true':
|
|
bookmarksopen = bookmarksopen + bookmarksopenlevel
|
|
if bookmarks == ',\n bookmarks=true':
|
|
bookmarks = bookmarks + bookmarksnumbered + bookmarksopen
|
|
else:
|
|
bookmarks = bookmarks
|
|
# hypersetup is only output when there are things to be set up
|
|
setupstart = '\\hypersetup{%\n'
|
|
setupend = ' }\n'
|
|
if otheroptions == "" and title == "" and author == ""\
|
|
and subject == "" and keywords == "":
|
|
setupstart = ""
|
|
setupend = ""
|
|
# write the preamble
|
|
add_to_preamble(document,
|
|
['% Commands inserted by lyx2lyx for PDF properties',
|
|
'\\usepackage[unicode=true'
|
|
+ bookmarks
|
|
+ breaklinks
|
|
+ pdfborder
|
|
+ backref
|
|
+ pagebackref
|
|
+ colorlinks
|
|
+ pagemode
|
|
+ ']\n'
|
|
' {hyperref}\n'
|
|
+ setupstart
|
|
+ title
|
|
+ author
|
|
+ subject
|
|
+ keywords
|
|
+ otheroptions
|
|
+ setupend])
|
|
|
|
|
|
def remove_inzip_options(document):
|
|
"Remove inzipName and embed options from the Graphics inset"
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_inset Graphics", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
if j == -1:
|
|
# should not happen
|
|
document.warning("Malformed LyX document: Could not find end of graphics inset.")
|
|
# If there's a inzip param, just remove that
|
|
k = find_token(document.body, "\tinzipName", i + 1, j)
|
|
if k != -1:
|
|
del document.body[k]
|
|
# embed option must follow the inzipName option
|
|
del document.body[k+1]
|
|
i = i + 1
|
|
|
|
|
|
def convert_inset_command(document):
|
|
"""
|
|
Convert:
|
|
\begin_inset LatexCommand cmd
|
|
to
|
|
\begin_inset CommandInset InsetType
|
|
LatexCommand cmd
|
|
"""
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_inset LatexCommand", i)
|
|
if i == -1:
|
|
return
|
|
line = document.body[i]
|
|
r = re.compile(r'\\begin_inset LatexCommand (.*)$')
|
|
m = r.match(line)
|
|
cmdName = m.group(1)
|
|
insetName = ""
|
|
#this is adapted from factory.cpp
|
|
if cmdName[0:4].lower() == "cite":
|
|
insetName = "citation"
|
|
elif cmdName == "url" or cmdName == "htmlurl":
|
|
insetName = "url"
|
|
elif cmdName[-3:] == "ref":
|
|
insetName = "ref"
|
|
elif cmdName == "tableofcontents":
|
|
insetName = "toc"
|
|
elif cmdName == "printnomenclature":
|
|
insetName = "nomencl_print"
|
|
elif cmdName == "printindex":
|
|
insetName = "index_print"
|
|
else:
|
|
insetName = cmdName
|
|
insertion = ["\\begin_inset CommandInset " + insetName, "LatexCommand " + cmdName]
|
|
document.body[i : i+1] = insertion
|
|
|
|
|
|
def revert_inset_command(document):
|
|
"""
|
|
Convert:
|
|
\begin_inset CommandInset InsetType
|
|
LatexCommand cmd
|
|
to
|
|
\begin_inset LatexCommand cmd
|
|
Some insets may end up being converted to insets earlier versions of LyX
|
|
will not be able to recognize. Not sure what to do about that.
|
|
"""
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_inset CommandInset", i)
|
|
if i == -1:
|
|
return
|
|
nextline = document.body[i+1]
|
|
r = re.compile(r'LatexCommand\s+(.*)$')
|
|
m = r.match(nextline)
|
|
if not m:
|
|
document.warning("Malformed LyX document: Missing LatexCommand in " + document.body[i] + ".")
|
|
continue
|
|
cmdName = m.group(1)
|
|
insertion = ["\\begin_inset LatexCommand " + cmdName]
|
|
document.body[i : i+2] = insertion
|
|
|
|
|
|
def convert_wrapfig_options(document):
|
|
"Convert optional options for wrap floats (wrapfig)."
|
|
# adds the tokens "lines", "placement", and "overhang"
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Wrap figure", i)
|
|
if i == -1:
|
|
return
|
|
document.body.insert(i + 1, "lines 0")
|
|
j = find_token(document.body, "placement", i)
|
|
# placement can be already set or not; if not, set it
|
|
if j == i+2:
|
|
document.body.insert(i + 3, "overhang 0col%")
|
|
else:
|
|
document.body.insert(i + 2, "placement o")
|
|
document.body.insert(i + 3, "overhang 0col%")
|
|
i = i + 1
|
|
|
|
|
|
def revert_wrapfig_options(document):
|
|
"Revert optional options for wrap floats (wrapfig)."
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "lines", i)
|
|
if i == -1:
|
|
return
|
|
j = find_token(document.body, "overhang", i+1)
|
|
if j != i + 2 and j != -1:
|
|
document.warning("Malformed LyX document: Couldn't find overhang parameter of wrap float.")
|
|
if j == -1:
|
|
return
|
|
del document.body[i]
|
|
del document.body[j-1]
|
|
i = i + 1
|
|
|
|
|
|
def convert_latexcommand_index(document):
|
|
"Convert from LatexCommand form to collapsable form."
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset index", i)
|
|
if i == -1:
|
|
return
|
|
if document.body[i + 1] != "LatexCommand index": # Might also be index_print
|
|
return
|
|
fullcontent = document.body[i + 2][6:].strip('"')
|
|
document.body[i:i + 2] = ["\\begin_inset Index",
|
|
"status collapsed",
|
|
"\\begin_layout Standard"]
|
|
# Put here the conversions needed from LaTeX string to LyXText.
|
|
# Here we do a minimal conversion to prevent crashes and data loss.
|
|
# Manual patch-up may be needed.
|
|
# Umlauted characters (most common ones, can be extended):
|
|
fullcontent = fullcontent.replace(r'\\\"a', u'ä').replace(r'\\\"o', u'ö').replace(r'\\\"u', u'ü')
|
|
# Generic, \" -> ":
|
|
fullcontent = wrap_into_ert(fullcontent, r'\"', '"')
|
|
#fullcontent = fullcontent.replace(r'\"', '\n\\begin_inset ERT\nstatus collapsed\n\\begin_layout standard\n"\n\\end_layout\n\\end_inset\n')
|
|
# Math:
|
|
r = re.compile('^(.*?)(\$.*?\$)(.*)')
|
|
g = fullcontent
|
|
while r.match(g):
|
|
m = r.match(g)
|
|
s = m.group(1)
|
|
f = m.group(2).replace('\\\\', '\\')
|
|
g = m.group(3)
|
|
if s:
|
|
# this is non-math!
|
|
s = wrap_into_ert(s, r'\\', '\\backslash')
|
|
s = wrap_into_ert(s, '{', '{')
|
|
s = wrap_into_ert(s, '}', '}')
|
|
document.body.insert(i + 3, s)
|
|
i += 1
|
|
document.body.insert(i + 3, "\\begin_inset Formula " + f)
|
|
document.body.insert(i + 4, "\\end_inset")
|
|
i += 2
|
|
# Generic, \\ -> \backslash:
|
|
g = wrap_into_ert(g, r'\\', '\\backslash{}')
|
|
g = wrap_into_ert(g, '{', '{')
|
|
g = wrap_into_ert(g, '}', '}')
|
|
document.body.insert(i + 3, g)
|
|
document.body[i + 4] = "\\end_layout"
|
|
i = i + 5
|
|
|
|
|
|
def revert_latexcommand_index(document):
|
|
"Revert from collapsable form to LatexCommand form."
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Index", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
if j == -1:
|
|
return
|
|
del document.body[j - 1]
|
|
del document.body[j - 2] # \end_layout
|
|
document.body[i] = "\\begin_inset CommandInset index"
|
|
document.body[i + 1] = "LatexCommand index"
|
|
# clean up multiline stuff
|
|
content = ""
|
|
for k in range(i + 3, j - 2):
|
|
line = document.body[k]
|
|
if line.startswith("\\begin_inset ERT"):
|
|
line = line[16:]
|
|
if line.startswith("\\begin_inset Formula"):
|
|
line = line[20:]
|
|
if line.startswith("\\begin_layout Standard"):
|
|
line = line[22:]
|
|
if line.startswith("\\begin_layout Plain"):
|
|
line = line[22:]
|
|
if line.startswith("\\end_layout"):
|
|
line = line[11:]
|
|
if line.startswith("\\end_inset"):
|
|
line = line[10:]
|
|
if line.startswith("status collapsed"):
|
|
line = line[16:]
|
|
line = line.replace(u'ä', r'\\\"a').replace(u'ö', r'\\\"o').replace(u'ü', r'\\\"u')
|
|
content = content + line;
|
|
document.body[i + 3] = "name " + '"' + content + '"'
|
|
for k in range(i + 4, j - 2):
|
|
del document.body[i + 4]
|
|
document.body.insert(i + 4, "")
|
|
del document.body[i + 2] # \begin_layout standard
|
|
i = i + 5
|
|
|
|
|
|
def revert_wraptable(document):
|
|
"Revert wrap table to wrap figure."
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Wrap table", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace('\\begin_inset Wrap table', '\\begin_inset Wrap figure')
|
|
i = i + 1
|
|
|
|
|
|
def revert_vietnamese(document):
|
|
"Set language Vietnamese to English"
|
|
# Set document language from Vietnamese to English
|
|
i = 0
|
|
if document.language == "vietnamese":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang vietnamese", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang vietnamese", "\\lang english")
|
|
j = j + 1
|
|
|
|
|
|
def revert_japanese(document):
|
|
"Set language japanese-plain to japanese"
|
|
# Set document language from japanese-plain to japanese
|
|
i = 0
|
|
if document.language == "japanese-plain":
|
|
document.language = "japanese"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language japanese"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang japanese-plain", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang japanese-plain", "\\lang japanese")
|
|
j = j + 1
|
|
|
|
|
|
def revert_japanese_encoding(document):
|
|
"Set input encoding form EUC-JP-plain to EUC-JP etc."
|
|
# Set input encoding form EUC-JP-plain to EUC-JP etc.
|
|
i = 0
|
|
i = find_token(document.header, "\\inputencoding EUC-JP-plain", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\inputencoding EUC-JP"
|
|
j = 0
|
|
j = find_token(document.header, "\\inputencoding JIS-plain", 0)
|
|
if j != -1:
|
|
document.header[j] = "\\inputencoding JIS"
|
|
k = 0
|
|
k = find_token(document.header, "\\inputencoding SJIS-plain", 0)
|
|
if k != -1: # convert to UTF8 since there is currently no SJIS encoding
|
|
document.header[k] = "\\inputencoding UTF8"
|
|
|
|
|
|
def revert_inset_info(document):
|
|
'Replace info inset with its content'
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, '\\begin_inset Info', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
if j == -1:
|
|
# should not happen
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
type = 'unknown'
|
|
arg = ''
|
|
for k in range(i, j+1):
|
|
if document.body[k].startswith("arg"):
|
|
arg = document.body[k][3:].strip().strip('"')
|
|
if document.body[k].startswith("type"):
|
|
type = document.body[k][4:].strip().strip('"')
|
|
# I think there is a newline after \\end_inset, which should be removed.
|
|
if document.body[j + 1].strip() == "":
|
|
document.body[i : (j + 2)] = [type + ':' + arg]
|
|
else:
|
|
document.body[i : (j + 1)] = [type + ':' + arg]
|
|
|
|
|
|
def convert_pdf_options(document):
|
|
# Set the pdfusetitle tag, delete the pdf_store_options,
|
|
# set quotes for bookmarksopenlevel"
|
|
has_hr = get_value(document.header, "\\use_hyperref", 0, default = "0")
|
|
if has_hr == "1":
|
|
k = find_token(document.header, "\\use_hyperref", 0)
|
|
document.header.insert(k + 1, "\\pdf_pdfusetitle true")
|
|
k = find_token(document.header, "\\pdf_store_options", 0)
|
|
if k != -1:
|
|
del document.header[k]
|
|
i = find_token(document.header, "\\pdf_bookmarksopenlevel", k)
|
|
if i == -1: return
|
|
document.header[i] = document.header[i].replace('"', '')
|
|
|
|
|
|
def revert_pdf_options_2(document):
|
|
# reset the pdfusetitle tag, set quotes for bookmarksopenlevel"
|
|
k = find_token(document.header, "\\use_hyperref", 0)
|
|
i = find_token(document.header, "\\pdf_pdfusetitle", k)
|
|
if i != -1:
|
|
del document.header[i]
|
|
i = find_token(document.header, "\\pdf_bookmarksopenlevel", k)
|
|
if i == -1: return
|
|
values = document.header[i].split()
|
|
values[1] = ' "' + values[1] + '"'
|
|
document.header[i] = ''.join(values)
|
|
|
|
|
|
def convert_htmlurl(document):
|
|
'Convert "htmlurl" to "href" insets for docbook'
|
|
if document.backend != "docbook":
|
|
return
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset url", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = "\\begin_inset CommandInset href"
|
|
document.body[i + 1] = "LatexCommand href"
|
|
i = i + 1
|
|
|
|
|
|
def convert_url(document):
|
|
'Convert url insets to url charstyles'
|
|
if document.backend == "docbook":
|
|
return
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset url", i)
|
|
if i == -1:
|
|
break
|
|
n = find_token(document.body, "name", i)
|
|
if n == i + 2:
|
|
# place the URL name in typewriter before the new URL insert
|
|
# grab the name 'bla' from the e.g. the line 'name "bla"',
|
|
# therefore start with the 6th character
|
|
name = document.body[n][6:-1]
|
|
newname = [name + " "]
|
|
document.body[i:i] = newname
|
|
i = i + 1
|
|
j = find_token(document.body, "target", i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Can't find target for url inset")
|
|
i = j
|
|
continue
|
|
target = document.body[j][8:-1]
|
|
k = find_token(document.body, "\\end_inset", j)
|
|
if k == -1:
|
|
document.warning("Malformed LyX document: Can't find end of url inset")
|
|
i = k
|
|
continue
|
|
newstuff = ["\\begin_inset Flex URL",
|
|
"status collapsed", "",
|
|
"\\begin_layout Standard",
|
|
"",
|
|
target,
|
|
"\\end_layout",
|
|
""]
|
|
document.body[i:k] = newstuff
|
|
i = k
|
|
|
|
def convert_ams_classes(document):
|
|
tc = document.textclass
|
|
if (tc != "amsart" and tc != "amsart-plain" and
|
|
tc != "amsart-seq" and tc != "amsbook"):
|
|
return
|
|
if tc == "amsart-plain":
|
|
document.textclass = "amsart"
|
|
document.set_textclass()
|
|
document.add_module("Theorems (Starred)")
|
|
return
|
|
if tc == "amsart-seq":
|
|
document.textclass = "amsart"
|
|
document.set_textclass()
|
|
document.add_module("Theorems (AMS)")
|
|
|
|
#Now we want to see if any of the environments in the extended theorems
|
|
#module were used in this document. If so, we'll add that module, too.
|
|
layouts = ["Criterion", "Algorithm", "Axiom", "Condition", "Note", \
|
|
"Notation", "Summary", "Acknowledgement", "Conclusion", "Fact", \
|
|
"Assumption"]
|
|
|
|
r = re.compile(r'^\\begin_layout (.*?)\*?\s*$')
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_layout", i)
|
|
if i == -1:
|
|
return
|
|
m = r.match(document.body[i])
|
|
if m == None:
|
|
document.warning("Weirdly formed \\begin_layout at line %d of body!" % i)
|
|
i += 1
|
|
continue
|
|
m = m.group(1)
|
|
if layouts.count(m) != 0:
|
|
document.add_module("Theorems (AMS-Extended)")
|
|
return
|
|
i += 1
|
|
|
|
def revert_href(document):
|
|
'Reverts hyperlink insets (href) to url insets (url)'
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset href", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i : i + 2] = \
|
|
["\\begin_inset CommandInset url", "LatexCommand url"]
|
|
i = i + 2
|
|
|
|
|
|
def convert_include(document):
|
|
'Converts include insets to new format.'
|
|
i = 0
|
|
r = re.compile(r'\\begin_inset Include\s+\\([^{]+){([^}]*)}(?:\[(.*)\])?')
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Include", i)
|
|
if i == -1:
|
|
return
|
|
line = document.body[i]
|
|
previewline = document.body[i + 1]
|
|
m = r.match(line)
|
|
if m == None:
|
|
document.warning("Unable to match line " + str(i) + " of body!")
|
|
i += 1
|
|
continue
|
|
cmd = m.group(1)
|
|
fn = m.group(2)
|
|
opt = m.group(3)
|
|
insertion = ["\\begin_inset CommandInset include",
|
|
"LatexCommand " + cmd, previewline,
|
|
"filename \"" + fn + "\""]
|
|
newlines = 2
|
|
if opt:
|
|
insertion.append("lstparams " + '"' + opt + '"')
|
|
newlines += 1
|
|
document.body[i : i + 2] = insertion
|
|
i += newlines
|
|
|
|
|
|
def revert_include(document):
|
|
'Reverts include insets to old format.'
|
|
i = 0
|
|
r1 = re.compile('LatexCommand (.+)')
|
|
r2 = re.compile('filename (.+)')
|
|
r3 = re.compile('options (.*)')
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i)
|
|
if i == -1:
|
|
return
|
|
previewline = document.body[i + 1]
|
|
m = r1.match(document.body[i + 2])
|
|
if m == None:
|
|
document.warning("Malformed LyX document: No LatexCommand line for `" +
|
|
document.body[i] + "' on line " + str(i) + ".")
|
|
i += 1
|
|
continue
|
|
cmd = m.group(1)
|
|
m = r2.match(document.body[i + 3])
|
|
if m == None:
|
|
document.warning("Malformed LyX document: No filename line for `" + \
|
|
document.body[i] + "' on line " + str(i) + ".")
|
|
i += 2
|
|
continue
|
|
fn = m.group(1)
|
|
options = ""
|
|
numlines = 4
|
|
if (cmd == "lstinputlisting"):
|
|
m = r3.match(document.body[i + 4])
|
|
if m != None:
|
|
options = m.group(1)
|
|
numlines = 5
|
|
newline = "\\begin_inset Include \\" + cmd + "{" + fn + "}"
|
|
if options:
|
|
newline += ("[" + options + "]")
|
|
insertion = [newline, previewline]
|
|
document.body[i : i + numlines] = insertion
|
|
i += 2
|
|
|
|
|
|
def revert_albanian(document):
|
|
"Set language Albanian to English"
|
|
i = 0
|
|
if document.language == "albanian":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang albanian", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang albanian", "\\lang english")
|
|
j = j + 1
|
|
|
|
|
|
def revert_lowersorbian(document):
|
|
"Set language lower Sorbian to English"
|
|
i = 0
|
|
if document.language == "lowersorbian":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang lowersorbian", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang lowersorbian", "\\lang english")
|
|
j = j + 1
|
|
|
|
|
|
def revert_uppersorbian(document):
|
|
"Set language uppersorbian to usorbian as this was used in LyX 1.5"
|
|
i = 0
|
|
if document.language == "uppersorbian":
|
|
document.language = "usorbian"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language usorbian"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang uppersorbian", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang uppersorbian", "\\lang usorbian")
|
|
j = j + 1
|
|
|
|
|
|
def convert_usorbian(document):
|
|
"Set language usorbian to uppersorbian"
|
|
i = 0
|
|
if document.language == "usorbian":
|
|
document.language = "uppersorbian"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language uppersorbian"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang usorbian", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang usorbian", "\\lang uppersorbian")
|
|
j = j + 1
|
|
|
|
|
|
def revert_macro_optional_params(document):
|
|
"Convert macro definitions with optional parameters into ERTs"
|
|
# Stub to convert macro definitions with one or more optional parameters
|
|
# into uninterpreted ERT insets
|
|
|
|
|
|
def revert_hyperlinktype(document):
|
|
'Reverts hyperlink type'
|
|
i = 0
|
|
j = 0
|
|
while True:
|
|
i = find_token(document.body, "target", i)
|
|
if i == -1:
|
|
return
|
|
j = find_token(document.body, "type", i)
|
|
if j == -1:
|
|
return
|
|
if j == i + 1:
|
|
del document.body[j]
|
|
i = i + 1
|
|
|
|
|
|
def revert_pagebreak(document):
|
|
'Reverts pagebreak to ERT'
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\pagebreak", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = '\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'pagebreak{}\n\\end_layout\n\n\\end_inset\n\n'
|
|
i = i + 1
|
|
|
|
|
|
def revert_linebreak(document):
|
|
'Reverts linebreak to ERT'
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\linebreak", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = '\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'linebreak{}\n\\end_layout\n\n\\end_inset\n\n'
|
|
i = i + 1
|
|
|
|
|
|
def revert_latin(document):
|
|
"Set language Latin to English"
|
|
i = 0
|
|
if document.language == "latin":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang latin", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang latin", "\\lang english")
|
|
j = j + 1
|
|
|
|
|
|
def revert_samin(document):
|
|
"Set language North Sami to English"
|
|
i = 0
|
|
if document.language == "samin":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang samin", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang samin", "\\lang english")
|
|
j = j + 1
|
|
|
|
|
|
def convert_serbocroatian(document):
|
|
"Set language Serbocroatian to Croatian as this was really Croatian in LyX 1.5"
|
|
i = 0
|
|
if document.language == "serbocroatian":
|
|
document.language = "croatian"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language croatian"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang serbocroatian", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang serbocroatian", "\\lang croatian")
|
|
j = j + 1
|
|
|
|
|
|
def convert_framed_notes(document):
|
|
"Convert framed notes to boxes. "
|
|
i = 0
|
|
while 1:
|
|
i = find_tokens(document.body, ["\\begin_inset Note Framed", "\\begin_inset Note Shaded"], i)
|
|
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace("\\begin_inset Note", "\\begin_inset Box")
|
|
document.body.insert(i + 1, 'position "t"\nhor_pos "c"\nhas_inner_box 0\ninner_pos "t"\n' \
|
|
'use_parbox 0\nwidth "100col%"\nspecial "none"\nheight "1in"\n' \
|
|
'height_special "totalheight"')
|
|
i = i + 1
|
|
|
|
|
|
def convert_module_names(document):
|
|
modulemap = { 'Braille' : 'braille', 'Endnote' : 'endnotes', 'Foot to End' : 'foottoend',\
|
|
'Hanging' : 'hanging', 'Linguistics' : 'linguistics', 'Logical Markup' : 'logicalmkup', \
|
|
'Theorems (AMS-Extended)' : 'theorems-ams-extended', 'Theorems (AMS)' : 'theorems-ams', \
|
|
'Theorems (Order By Chapter)' : 'theorems-chap', 'Theorems (Order By Section)' : 'theorems-sec', \
|
|
'Theorems (Starred)' : 'theorems-starred', 'Theorems' : 'theorems-std' }
|
|
modlist = document.get_module_list()
|
|
if len(modlist) == 0:
|
|
return
|
|
newmodlist = []
|
|
for mod in modlist:
|
|
if modulemap.has_key(mod):
|
|
newmodlist.append(modulemap[mod])
|
|
else:
|
|
document.warning("Can't find module %s in the module map!" % mod)
|
|
newmodlist.append(mod)
|
|
document.set_module_list(newmodlist)
|
|
|
|
|
|
def revert_module_names(document):
|
|
modulemap = { 'braille' : 'Braille', 'endnotes' : 'Endnote', 'foottoend' : 'Foot to End',\
|
|
'hanging' : 'Hanging', 'linguistics' : 'Linguistics', 'logicalmkup' : 'Logical Markup', \
|
|
'theorems-ams-extended' : 'Theorems (AMS-Extended)', 'theorems-ams' : 'Theorems (AMS)', \
|
|
'theorems-chap' : 'Theorems (Order By Chapter)', 'theorems-sec' : 'Theorems (Order By Section)', \
|
|
'theorems-starred' : 'Theorems (Starred)', 'theorems-std' : 'Theorems'}
|
|
modlist = document.get_module_list()
|
|
if len(modlist) == 0:
|
|
return
|
|
newmodlist = []
|
|
for mod in modlist:
|
|
if modulemap.has_key(mod):
|
|
newmodlist.append(modulemap[mod])
|
|
else:
|
|
document.warning("Can't find module %s in the module map!" % mod)
|
|
newmodlist.append(mod)
|
|
document.set_module_list(newmodlist)
|
|
|
|
|
|
def revert_colsep(document):
|
|
i = find_token(document.header, "\\columnsep", 0)
|
|
if i == -1:
|
|
return
|
|
colsepline = document.header[i]
|
|
r = re.compile(r'\\columnsep (.*)')
|
|
m = r.match(colsepline)
|
|
if not m:
|
|
document.warning("Malformed column separation line!")
|
|
return
|
|
colsep = m.group(1)
|
|
del document.header[i]
|
|
#it seems to be safe to add the package even if it is already used
|
|
pretext = ["\\usepackage{geometry}", "\\geometry{columnsep=" + colsep + "}"]
|
|
|
|
add_to_preamble(document, pretext)
|
|
|
|
|
|
def revert_framed_notes(document):
|
|
"Revert framed boxes to notes. "
|
|
i = 0
|
|
while 1:
|
|
i = find_tokens(document.body, ["\\begin_inset Box Framed", "\\begin_inset Box Shaded"], i)
|
|
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
if j == -1:
|
|
# should not happen
|
|
document.warning("Malformed LyX document: Could not find end of Box inset.")
|
|
k = find_token(document.body, "status", i + 1, j)
|
|
if k == -1:
|
|
document.warning("Malformed LyX document: Missing `status' tag in Box inset.")
|
|
return
|
|
status = document.body[k]
|
|
l = find_default_layout(document, i + 1, j)
|
|
if l == -1:
|
|
document.warning("Malformed LyX document: Missing `\\begin_layout' in Box inset.")
|
|
return
|
|
m = find_token(document.body, "\\end_layout", i + 1, j)
|
|
if m == -1:
|
|
document.warning("Malformed LyX document: Missing `\\end_layout' in Box inset.")
|
|
return
|
|
ibox = find_token(document.body, "has_inner_box 1", i + 1, k)
|
|
pbox = find_token(document.body, "use_parbox 1", i + 1, k)
|
|
if ibox == -1 and pbox == -1:
|
|
document.body[i] = document.body[i].replace("\\begin_inset Box", "\\begin_inset Note")
|
|
del document.body[i+1:k]
|
|
else:
|
|
document.body[i] = document.body[i].replace("\\begin_inset Box Shaded", "\\begin_inset Box Frameless")
|
|
document.body.insert(l + 1, "\\begin_inset Note Shaded\n" + status + "\n\\begin_layout Standard\n")
|
|
document.body.insert(m + 1, "\\end_layout\n\\end_inset")
|
|
i = i + 1
|
|
|
|
|
|
def revert_slash(document):
|
|
'Revert \\SpecialChar \\slash{} to ERT'
|
|
for i in range(len(document.body)):
|
|
document.body[i] = document.body[i].replace('\\SpecialChar \\slash{}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'slash{}\n\\end_layout\n\n\\end_inset\n\n')
|
|
|
|
|
|
def revert_nobreakdash(document):
|
|
'Revert \\SpecialChar \\nobreakdash- to ERT'
|
|
found = 0
|
|
for i in range(len(document.body)):
|
|
line = document.body[i]
|
|
r = re.compile(r'\\SpecialChar \\nobreakdash-')
|
|
m = r.match(line)
|
|
if m:
|
|
found = 1
|
|
document.body[i] = document.body[i].replace('\\SpecialChar \\nobreakdash-', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'nobreakdash-\n\\end_layout\n\n\\end_inset\n\n')
|
|
if not found:
|
|
return
|
|
j = find_token(document.header, "\\use_amsmath", 0)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
|
|
return
|
|
document.header[j] = "\\use_amsmath 2"
|
|
|
|
|
|
def revert_nocite_key(body, start, end):
|
|
'key "..." -> \nocite{...}'
|
|
for i in range(start, end):
|
|
if (body[i][0:5] == 'key "'):
|
|
body[i] = body[i].replace('key "', "\\backslash\nnocite{")
|
|
body[i] = body[i].replace('"', "}")
|
|
else:
|
|
body[i] = ""
|
|
|
|
|
|
def revert_nocite(document):
|
|
"Revert LatexCommand nocite to ERT"
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
if i == -1:
|
|
return
|
|
i = i + 1
|
|
if (document.body[i] == "LatexCommand nocite"):
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
if j == -1:
|
|
#this should not happen
|
|
document.warning("End of CommandInset citation not found in revert_nocite!")
|
|
revert_nocite_key(document.body, i + 1, len(document.body))
|
|
return
|
|
revert_nocite_key(document.body, i + 1, j)
|
|
document.body[i-1] = "\\begin_inset ERT"
|
|
document.body[i] = "status collapsed\n\n" \
|
|
"\\begin_layout Standard"
|
|
document.body.insert(j, "\\end_layout\n");
|
|
i = j
|
|
|
|
|
|
def revert_btprintall(document):
|
|
"Revert (non-bibtopic) btPrintAll option to ERT \nocite{*}"
|
|
i = find_token(document.header, '\\use_bibtopic', 0)
|
|
if i == -1:
|
|
document.warning("Malformed lyx document: Missing '\\use_bibtopic'.")
|
|
return
|
|
if get_value(document.header, '\\use_bibtopic', 0) == "false":
|
|
i = 0
|
|
while i < len(document.body):
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
if j == -1:
|
|
#this should not happen
|
|
document.warning("End of CommandInset bibtex not found in revert_btprintall!")
|
|
j = len(document.body)
|
|
for k in range(i, j):
|
|
if (document.body[k] == 'btprint "btPrintAll"'):
|
|
del document.body[k]
|
|
document.body.insert(i, "\\begin_inset ERT\n" \
|
|
"status collapsed\n\n\\begin_layout Standard\n\n" \
|
|
"\\backslash\nnocite{*}\n" \
|
|
"\\end_layout\n\\end_inset\n")
|
|
i = j
|
|
|
|
|
|
def revert_bahasam(document):
|
|
"Set language Bahasa Malaysia to Bahasa Indonesia"
|
|
i = 0
|
|
if document.language == "bahasam":
|
|
document.language = "bahasa"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language bahasa"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang bahasam", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang bahasam", "\\lang bahasa")
|
|
j = j + 1
|
|
|
|
|
|
def revert_interlingua(document):
|
|
"Set language Interlingua to English"
|
|
i = 0
|
|
if document.language == "interlingua":
|
|
document.language = "english"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language english"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang interlingua", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang interlingua", "\\lang english")
|
|
j = j + 1
|
|
|
|
|
|
def revert_serbianlatin(document):
|
|
"Set language Serbian-Latin to Croatian"
|
|
i = 0
|
|
if document.language == "serbian-latin":
|
|
document.language = "croatian"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language croatian"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang serbian-latin", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang serbian-latin", "\\lang croatian")
|
|
j = j + 1
|
|
|
|
|
|
def revert_rotfloat(document):
|
|
" Revert sideways custom floats. "
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_inset Float", i)
|
|
if i == -1:
|
|
return
|
|
line = document.body[i]
|
|
r = re.compile(r'\\begin_inset Float (.*)$')
|
|
m = r.match(line)
|
|
floattype = m.group(1)
|
|
if floattype == "figure" or floattype == "table":
|
|
i = i + 1
|
|
continue
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset'.")
|
|
i = i + 1
|
|
continue
|
|
if get_value(document.body, 'sideways', i, j) != "false":
|
|
l = find_default_layout(document, i + 1, j)
|
|
if l == -1:
|
|
document.warning("Malformed LyX document: Missing `\\begin_layout' in Float inset.")
|
|
return
|
|
document.body[j] = '\\begin_layout Standard\n\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'end{sideways' + floattype + '}\n\\end_layout\n\n\\end_inset\n'
|
|
del document.body[i+1:l-1]
|
|
document.body[i] = '\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'begin{sideways' + floattype + '}\n\\end_layout\n\n\\end_inset\n\n\\end_layout\n\n'
|
|
if floattype == "algorithm":
|
|
add_to_preamble(document,
|
|
['% Commands inserted by lyx2lyx for sideways algorithm float',
|
|
'\\usepackage{rotfloat}\n'
|
|
'\\floatstyle{ruled}\n'
|
|
'\\newfloat{algorithm}{tbp}{loa}\n'
|
|
'\\floatname{algorithm}{Algorithm}\n'])
|
|
else:
|
|
document.warning("Cannot create preamble definition for custom float" + floattype + ".")
|
|
i = i + 1
|
|
continue
|
|
i = i + 1
|
|
|
|
|
|
def revert_widesideways(document):
|
|
" Revert wide sideways floats. "
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, '\\begin_inset Float', i)
|
|
if i == -1:
|
|
return
|
|
line = document.body[i]
|
|
r = re.compile(r'\\begin_inset Float (.*)$')
|
|
m = r.match(line)
|
|
floattype = m.group(1)
|
|
if floattype != "figure" and floattype != "table":
|
|
i = i + 1
|
|
continue
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset'.")
|
|
i = i + 1
|
|
continue
|
|
if get_value(document.body, 'sideways', i, j) != "false":
|
|
if get_value(document.body, 'wide', i, j) != "false":
|
|
l = find_default_layout(document, i + 1, j)
|
|
if l == -1:
|
|
document.warning("Malformed LyX document: Missing `\\begin_layout' in Float inset.")
|
|
return
|
|
document.body[j] = '\\begin_layout Standard\n\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'end{sideways' + floattype + '*}\n\\end_layout\n\n\\end_inset\n'
|
|
del document.body[i+1:l-1]
|
|
document.body[i] = '\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'begin{sideways' + floattype + '*}\n\\end_layout\n\n\\end_inset\n\n\\end_layout\n\n'
|
|
add_to_preamble(document,
|
|
['\\usepackage{rotfloat}\n'])
|
|
i = i + 1
|
|
continue
|
|
i = i + 1
|
|
|
|
|
|
def revert_inset_embedding(document, type):
|
|
' Remove embed tag from certain type of insets'
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_inset %s" % type, i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset'.")
|
|
i = i + 1
|
|
continue
|
|
k = find_token(document.body, "\tembed", i, j)
|
|
if k == -1:
|
|
k = find_token(document.body, "embed", i, j)
|
|
if k != -1:
|
|
del document.body[k]
|
|
i = i + 1
|
|
|
|
|
|
def revert_external_embedding(document):
|
|
' Remove embed tag from external inset '
|
|
revert_inset_embedding(document, 'External')
|
|
|
|
|
|
def convert_subfig(document):
|
|
" Convert subfigures to subfloats. "
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, '\\begin_inset Graphics', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset'.")
|
|
i = i + 1
|
|
continue
|
|
k = find_token(document.body, '\tsubcaption', i, j)
|
|
if k == -1:
|
|
i = i + 1
|
|
continue
|
|
l = find_token(document.body, '\tsubcaptionText', i, j)
|
|
caption = document.body[l][16:].strip('"')
|
|
savestr = document.body[i]
|
|
del document.body[l]
|
|
del document.body[k]
|
|
document.body[i] = '\\begin_inset Float figure\nwide false\nsideways false\n' \
|
|
'status open\n\n\\begin_layout Plain Layout\n\\begin_inset Caption\n\n\\begin_layout Plain Layout\n' \
|
|
+ caption + '\n\\end_layout\n\n\\end_inset\n\n\\end_layout\n\n\\begin_layout Plain Layout\n' + savestr
|
|
savestr = document.body[j]
|
|
document.body[j] = '\n\\end_layout\n\n\\end_inset\n' + savestr
|
|
|
|
|
|
def revert_subfig(document):
|
|
" Revert subfloats. "
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, '\\begin_inset Float', i)
|
|
if i == -1:
|
|
return
|
|
while 1:
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset' (float).")
|
|
i = i + 1
|
|
continue
|
|
# look for embedded float (= subfloat)
|
|
k = find_token(document.body, '\\begin_inset Float', i + 1, j)
|
|
if k == -1:
|
|
break
|
|
l = find_end_of_inset(document.body, k)
|
|
if l == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset' (embedded float).")
|
|
i = i + 1
|
|
continue
|
|
m = find_default_layout(document, k + 1, l)
|
|
# caption?
|
|
cap = find_token(document.body, '\\begin_inset Caption', k + 1, l)
|
|
caption = ''
|
|
shortcap = ''
|
|
if cap != -1:
|
|
capend = find_end_of_inset(document.body, cap)
|
|
if capend == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset' (caption).")
|
|
return
|
|
# label?
|
|
label = ''
|
|
lbl = find_token(document.body, '\\begin_inset CommandInset label', cap, capend)
|
|
if lbl != -1:
|
|
lblend = find_end_of_inset(document.body, lbl + 1)
|
|
if lblend == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset' (label).")
|
|
return
|
|
for line in document.body[lbl:lblend + 1]:
|
|
if line.startswith('name '):
|
|
label = line.split()[1].strip('"')
|
|
break
|
|
else:
|
|
lbl = capend
|
|
lblend = capend
|
|
label = ''
|
|
# opt arg?
|
|
opt = find_token(document.body, '\\begin_inset OptArg', cap, capend)
|
|
if opt != -1:
|
|
optend = find_end_of_inset(document.body, opt)
|
|
if optend == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset' (OptArg).")
|
|
return
|
|
optc = find_default_layout(document, opt, optend)
|
|
if optc == -1:
|
|
document.warning("Malformed LyX document: Missing `\\begin_layout' in Float inset.")
|
|
return
|
|
optcend = find_end_of(document.body, optc, "\\begin_layout", "\\end_layout")
|
|
for line in document.body[optc:optcend]:
|
|
if not line.startswith('\\'):
|
|
shortcap += line.strip()
|
|
else:
|
|
opt = capend
|
|
optend = capend
|
|
for line in document.body[cap:capend]:
|
|
if line in document.body[lbl:lblend]:
|
|
continue
|
|
elif line in document.body[opt:optend]:
|
|
continue
|
|
elif not line.startswith('\\'):
|
|
caption += line.strip()
|
|
if len(label) > 0:
|
|
caption += "\\backslash\nlabel{" + label + "}"
|
|
document.body[l] = '\\begin_layout Plain Layout\n\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Plain Layout\n\n}\n\\end_layout\n\n\\end_inset\n\n\\end_layout\n\n\\begin_layout Plain Layout\n'
|
|
del document.body[cap:capend+1]
|
|
del document.body[k+1:m-1]
|
|
insertion = '\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Plain Layout\n\n\\backslash\n' \
|
|
'subfloat'
|
|
if len(shortcap) > 0:
|
|
insertion = insertion + "[" + shortcap + "]"
|
|
if len(caption) > 0:
|
|
insertion = insertion + "[" + caption + "]"
|
|
insertion = insertion + '{%\n\\end_layout\n\n\\end_inset\n\n\\end_layout\n'
|
|
document.body[k] = insertion
|
|
add_to_preamble(document,
|
|
['\\usepackage{subfig}\n'])
|
|
i = i + 1
|
|
|
|
|
|
def revert_wrapplacement(document):
|
|
" Revert placement options wrap floats (wrapfig). "
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "lines", i)
|
|
if i == -1:
|
|
return
|
|
j = find_token(document.body, "placement", i+1)
|
|
if j != i + 1:
|
|
document.warning("Malformed LyX document: Couldn't find placement parameter of wrap float.")
|
|
return
|
|
document.body[j] = document.body[j].replace("placement O", "placement o")
|
|
document.body[j] = document.body[j].replace("placement I", "placement i")
|
|
document.body[j] = document.body[j].replace("placement L", "placement l")
|
|
document.body[j] = document.body[j].replace("placement R", "placement r")
|
|
i = i + 1
|
|
|
|
|
|
def remove_extra_embedded_files(document):
|
|
" Remove \extra_embedded_files from buffer params "
|
|
i = find_token(document.header, '\\extra_embedded_files', 0)
|
|
if i == -1:
|
|
return
|
|
document.header.pop(i)
|
|
|
|
|
|
def convert_spaceinset(document):
|
|
" Convert '\\InsetSpace foo' to '\\begin_inset Space foo\n\\end_inset' "
|
|
for i in range(len(document.body)):
|
|
if re.search(r'\InsetSpace', document.body[i]):
|
|
document.body[i] = document.body[i].replace('\\InsetSpace', '\n\\begin_inset Space')
|
|
document.body[i] = document.body[i] + "\n\\end_inset"
|
|
|
|
|
|
def revert_spaceinset(document):
|
|
" Revert '\\begin_inset Space foo\n\\end_inset' to '\\InsetSpace foo' "
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\begin_inset Space", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of space inset.")
|
|
continue
|
|
document.body[i] = document.body[i].replace('\\begin_inset Space', '\\InsetSpace')
|
|
del document.body[j]
|
|
|
|
|
|
def convert_hfill(document):
|
|
" Convert hfill to space inset "
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\hfill", i)
|
|
if i == -1:
|
|
return
|
|
document.body[i] = document.body[i].replace('\\hfill', '\n\\begin_inset Space \\hfill{}\n\\end_inset')
|
|
|
|
|
|
def revert_hfills(document):
|
|
' Revert \\hfill commands '
|
|
for i in range(len(document.body)):
|
|
document.body[i] = document.body[i].replace('\\InsetSpace \\hfill{}', '\\hfill')
|
|
document.body[i] = document.body[i].replace('\\InsetSpace \\dotfill{}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'dotfill{}\n\\end_layout\n\n\\end_inset\n\n')
|
|
document.body[i] = document.body[i].replace('\\InsetSpace \\hrulefill{}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'hrulefill{}\n\\end_layout\n\n\\end_inset\n\n')
|
|
|
|
|
|
def revert_hspace(document):
|
|
' Revert \\InsetSpace \\hspace{} to ERT '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, "\\InsetSpace \\hspace", i)
|
|
if i == -1:
|
|
return
|
|
length = get_value(document.body, '\\length', i+1)
|
|
if length == '':
|
|
document.warning("Malformed lyx document: Missing '\\length' in Space inset.")
|
|
return
|
|
del document.body[i+1]
|
|
document.body[i] = document.body[i].replace('\\InsetSpace \\hspace*{}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'hspace*{' + length + '}\n\\end_layout\n\n\\end_inset\n\n')
|
|
document.body[i] = document.body[i].replace('\\InsetSpace \\hspace{}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'hspace{' + length + '}\n\\end_layout\n\n\\end_inset\n\n')
|
|
|
|
|
|
def revert_protected_hfill(document):
|
|
' Revert \\begin_inset Space \\hspace*{\\fill} to ERT '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Space \\hspace*{\\fill}', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of space inset.")
|
|
continue
|
|
del document.body[j]
|
|
document.body[i] = document.body[i].replace('\\begin_inset Space \\hspace*{\\fill}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'hspace*{\n\\backslash\nfill}\n\\end_layout\n\n\\end_inset\n\n')
|
|
|
|
|
|
def revert_leftarrowfill(document):
|
|
' Revert \\begin_inset Space \\leftarrowfill{} to ERT '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Space \\leftarrowfill{}', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of space inset.")
|
|
continue
|
|
del document.body[j]
|
|
document.body[i] = document.body[i].replace('\\begin_inset Space \\leftarrowfill{}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'leftarrowfill{}\n\\end_layout\n\n\\end_inset\n\n')
|
|
|
|
|
|
def revert_rightarrowfill(document):
|
|
' Revert \\begin_inset Space \\rightarrowfill{} to ERT '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Space \\rightarrowfill{}', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of space inset.")
|
|
continue
|
|
del document.body[j]
|
|
document.body[i] = document.body[i].replace('\\begin_inset Space \\rightarrowfill{}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'rightarrowfill{}\n\\end_layout\n\n\\end_inset\n\n')
|
|
|
|
|
|
def revert_upbracefill(document):
|
|
' Revert \\begin_inset Space \\upbracefill{} to ERT '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Space \\upbracefill{}', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of space inset.")
|
|
continue
|
|
del document.body[j]
|
|
document.body[i] = document.body[i].replace('\\begin_inset Space \\upbracefill{}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'upbracefill{}\n\\end_layout\n\n\\end_inset\n\n')
|
|
|
|
|
|
def revert_downbracefill(document):
|
|
' Revert \\begin_inset Space \\downbracefill{} to ERT '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Space \\downbracefill{}', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of space inset.")
|
|
continue
|
|
del document.body[j]
|
|
document.body[i] = document.body[i].replace('\\begin_inset Space \\downbracefill{}', \
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
'downbracefill{}\n\\end_layout\n\n\\end_inset\n\n')
|
|
|
|
|
|
def revert_local_layout(document):
|
|
' Revert local layout headers.'
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.header, "\\begin_local_layout", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
|
|
if j == -1:
|
|
# this should not happen
|
|
break
|
|
document.header[i : j + 1] = []
|
|
|
|
|
|
def convert_pagebreaks(document):
|
|
' Convert inline Newpage insets to new format '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\newpage', i)
|
|
if i == -1:
|
|
break
|
|
document.body[i:i+1] = ['\\begin_inset Newpage newpage',
|
|
'\\end_inset']
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\pagebreak', i)
|
|
if i == -1:
|
|
break
|
|
document.body[i:i+1] = ['\\begin_inset Newpage pagebreak',
|
|
'\\end_inset']
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\clearpage', i)
|
|
if i == -1:
|
|
break
|
|
document.body[i:i+1] = ['\\begin_inset Newpage clearpage',
|
|
'\\end_inset']
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\cleardoublepage', i)
|
|
if i == -1:
|
|
break
|
|
document.body[i:i+1] = ['\\begin_inset Newpage cleardoublepage',
|
|
'\\end_inset']
|
|
|
|
|
|
def revert_pagebreaks(document):
|
|
' Revert \\begin_inset Newpage to previous inline format '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Newpage', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of Newpage inset.")
|
|
continue
|
|
del document.body[j]
|
|
document.body[i] = document.body[i].replace('\\begin_inset Newpage newpage', '\\newpage')
|
|
document.body[i] = document.body[i].replace('\\begin_inset Newpage pagebreak', '\\pagebreak')
|
|
document.body[i] = document.body[i].replace('\\begin_inset Newpage clearpage', '\\clearpage')
|
|
document.body[i] = document.body[i].replace('\\begin_inset Newpage cleardoublepage', '\\cleardoublepage')
|
|
|
|
|
|
def convert_linebreaks(document):
|
|
' Convert inline Newline insets to new format '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\newline', i)
|
|
if i == -1:
|
|
break
|
|
document.body[i:i+1] = ['\\begin_inset Newline newline',
|
|
'\\end_inset']
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\linebreak', i)
|
|
if i == -1:
|
|
break
|
|
document.body[i:i+1] = ['\\begin_inset Newline linebreak',
|
|
'\\end_inset']
|
|
|
|
|
|
def revert_linebreaks(document):
|
|
' Revert \\begin_inset Newline to previous inline format '
|
|
i = 0
|
|
while True:
|
|
i = find_token(document.body, '\\begin_inset Newline', i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed LyX document: Could not find end of Newline inset.")
|
|
continue
|
|
del document.body[j]
|
|
document.body[i] = document.body[i].replace('\\begin_inset Newline newline', '\\newline')
|
|
document.body[i] = document.body[i].replace('\\begin_inset Newline linebreak', '\\linebreak')
|
|
|
|
|
|
def convert_japanese_plain(document):
|
|
' Set language japanese-plain to japanese '
|
|
i = 0
|
|
if document.language == "japanese-plain":
|
|
document.language = "japanese"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language japanese"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang japanese-plain", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang japanese-plain", "\\lang japanese")
|
|
j = j + 1
|
|
|
|
|
|
def revert_pdfpages(document):
|
|
' Revert pdfpages external inset to ERT '
|
|
i = 0
|
|
while 1:
|
|
i = find_token(document.body, "\\begin_inset External", i)
|
|
if i == -1:
|
|
return
|
|
j = find_end_of_inset(document.body, i)
|
|
if j == -1:
|
|
document.warning("Malformed lyx document: Missing '\\end_inset'.")
|
|
i = i + 1
|
|
continue
|
|
if get_value(document.body, 'template', i, j) == "PDFPages":
|
|
filename = get_value(document.body, 'filename', i, j)
|
|
extra = ''
|
|
r = re.compile(r'\textra PDFLaTeX \"(.*)\"$')
|
|
for k in range(i, j):
|
|
m = r.match(document.body[k])
|
|
if m:
|
|
extra = m.group(1)
|
|
angle = get_value(document.body, 'rotateAngle', i, j)
|
|
width = get_value(document.body, 'width', i, j)
|
|
height = get_value(document.body, 'height', i, j)
|
|
scale = get_value(document.body, 'scale', i, j)
|
|
keepAspectRatio = find_token(document.body, "\tkeepAspectRatio", i, j)
|
|
options = extra
|
|
if angle != '':
|
|
if options != '':
|
|
options += ",angle=" + angle
|
|
else:
|
|
options += "angle=" + angle
|
|
if width != '':
|
|
if options != '':
|
|
options += ",width=" + convert_len(width)
|
|
else:
|
|
options += "width=" + convert_len(width)
|
|
if height != '':
|
|
if options != '':
|
|
options += ",height=" + convert_len(height)
|
|
else:
|
|
options += "height=" + convert_len(height)
|
|
if scale != '':
|
|
if options != '':
|
|
options += ",scale=" + scale
|
|
else:
|
|
options += "scale=" + scale
|
|
if keepAspectRatio != '':
|
|
if options != '':
|
|
options += ",keepaspectratio"
|
|
else:
|
|
options += "keepaspectratio"
|
|
if options != '':
|
|
options = '[' + options + ']'
|
|
del document.body[i+1:j+1]
|
|
document.body[i:i+1] = ['\\begin_inset ERT',
|
|
'status collapsed',
|
|
'',
|
|
'\\begin_layout Standard',
|
|
'',
|
|
'\\backslash',
|
|
'includepdf' + options + '{' + filename + '}',
|
|
'\\end_layout',
|
|
'',
|
|
'\\end_inset']
|
|
add_to_preamble(document, ['\\usepackage{pdfpages}\n'])
|
|
i = i + 1
|
|
continue
|
|
i = i + 1
|
|
|
|
|
|
def revert_mexican(document):
|
|
' Set language Spanish(Mexico) to Spanish '
|
|
i = 0
|
|
if document.language == "spanish-mexico":
|
|
document.language = "spanish"
|
|
i = find_token(document.header, "\\language", 0)
|
|
if i != -1:
|
|
document.header[i] = "\\language spanish"
|
|
j = 0
|
|
while True:
|
|
j = find_token(document.body, "\\lang spanish-mexico", j)
|
|
if j == -1:
|
|
return
|
|
document.body[j] = document.body[j].replace("\\lang spanish-mexico", "\\lang spanish")
|
|
j = j + 1
|
|
|
|
|
|
def remove_embedding(document):
|
|
' Remove embed tag from all insets '
|
|
revert_inset_embedding(document, 'Graphics')
|
|
revert_inset_embedding(document, 'External')
|
|
revert_inset_embedding(document, 'CommandInset include')
|
|
revert_inset_embedding(document, 'CommandInset bibtex')
|
|
|
|
|
|
def revert_master(document):
|
|
' Remove master param '
|
|
i = find_token(document.header, "\\master", 0)
|
|
if i != -1:
|
|
del document.header[i]
|
|
|
|
|
|
##
|
|
# Conversion hub
|
|
#
|
|
|
|
supported_versions = ["1.6.0","1.6"]
|
|
convert = [[277, [fix_wrong_tables]],
|
|
[278, [close_begin_deeper]],
|
|
[279, [long_charstyle_names]],
|
|
[280, [axe_show_label]],
|
|
[281, []],
|
|
[282, []],
|
|
[283, [convert_flex]],
|
|
[284, []],
|
|
[285, []],
|
|
[286, []],
|
|
[287, [convert_wrapfig_options]],
|
|
[288, [convert_inset_command]],
|
|
[289, [convert_latexcommand_index]],
|
|
[290, []],
|
|
[291, []],
|
|
[292, []],
|
|
[293, []],
|
|
[294, [convert_pdf_options]],
|
|
[295, [convert_htmlurl, convert_url]],
|
|
[296, [convert_include]],
|
|
[297, [convert_usorbian]],
|
|
[298, []],
|
|
[299, []],
|
|
[300, []],
|
|
[301, []],
|
|
[302, []],
|
|
[303, [convert_serbocroatian]],
|
|
[304, [convert_framed_notes]],
|
|
[305, []],
|
|
[306, []],
|
|
[307, []],
|
|
[308, []],
|
|
[309, []],
|
|
[310, []],
|
|
[311, [convert_ams_classes]],
|
|
[312, []],
|
|
[313, [convert_module_names]],
|
|
[314, []],
|
|
[315, []],
|
|
[316, [convert_subfig]],
|
|
[317, []],
|
|
[318, []],
|
|
[319, [convert_spaceinset, convert_hfill]],
|
|
[320, []],
|
|
[321, [convert_tablines]],
|
|
[322, []],
|
|
[323, [convert_pagebreaks]],
|
|
[324, [convert_linebreaks]],
|
|
[325, [convert_japanese_plain]],
|
|
[326, []],
|
|
[327, []],
|
|
[328, [remove_embedding, remove_extra_embedded_files, remove_inzip_options]],
|
|
[329, []],
|
|
[330, []],
|
|
]
|
|
|
|
revert = [[329, [revert_leftarrowfill, revert_rightarrowfill, revert_upbracefill, revert_downbracefill]],
|
|
[328, [revert_master]],
|
|
[327, []],
|
|
[326, [revert_mexican]],
|
|
[325, [revert_pdfpages]],
|
|
[324, []],
|
|
[323, [revert_linebreaks]],
|
|
[322, [revert_pagebreaks]],
|
|
[321, [revert_local_layout]],
|
|
[320, [revert_tablines]],
|
|
[319, [revert_protected_hfill]],
|
|
[318, [revert_spaceinset, revert_hfills, revert_hspace]],
|
|
[317, [remove_extra_embedded_files]],
|
|
[316, [revert_wrapplacement]],
|
|
[315, [revert_subfig]],
|
|
[314, [revert_colsep]],
|
|
[313, []],
|
|
[312, [revert_module_names]],
|
|
[311, [revert_rotfloat, revert_widesideways]],
|
|
[310, [revert_external_embedding]],
|
|
[309, [revert_btprintall]],
|
|
[308, [revert_nocite]],
|
|
[307, [revert_serbianlatin]],
|
|
[306, [revert_slash, revert_nobreakdash]],
|
|
[305, [revert_interlingua]],
|
|
[304, [revert_bahasam]],
|
|
[303, [revert_framed_notes]],
|
|
[302, []],
|
|
[301, [revert_latin, revert_samin]],
|
|
[300, [revert_linebreak]],
|
|
[299, [revert_pagebreak]],
|
|
[298, [revert_hyperlinktype]],
|
|
[297, [revert_macro_optional_params]],
|
|
[296, [revert_albanian, revert_lowersorbian, revert_uppersorbian]],
|
|
[295, [revert_include]],
|
|
[294, [revert_href]],
|
|
[293, [revert_pdf_options_2]],
|
|
[292, [revert_inset_info]],
|
|
[291, [revert_japanese, revert_japanese_encoding]],
|
|
[290, [revert_vietnamese]],
|
|
[289, [revert_wraptable]],
|
|
[288, [revert_latexcommand_index]],
|
|
[287, [revert_inset_command]],
|
|
[286, [revert_wrapfig_options]],
|
|
[285, [revert_pdf_options]],
|
|
[284, [remove_inzip_options]],
|
|
[283, []],
|
|
[282, [revert_flex]],
|
|
[281, []],
|
|
[280, [revert_begin_modules]],
|
|
[279, [revert_show_label]],
|
|
[278, [revert_long_charstyle_names]],
|
|
[277, []],
|
|
[276, []]
|
|
]
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pass
|