2007-08-12 13:25:36 +00:00
|
|
|
# This file is part of lyx2lyx
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright (C) 2007 José Matos <jamatos@lyx.org>
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
|
|
|
|
""" Convert files to the file format generated by lyx 1.6"""
|
|
|
|
|
|
|
|
import re
|
|
|
|
import unicodedata
|
|
|
|
import sys, os
|
|
|
|
|
2007-10-12 23:51:56 +00:00
|
|
|
from parser_tools import find_token, find_end_of, find_tokens, get_value
|
2007-08-12 13:25:36 +00:00
|
|
|
|
|
|
|
####################################################################
|
|
|
|
# Private helper functions
|
|
|
|
|
|
|
|
def find_end_of_inset(lines, i):
|
|
|
|
" Find end of inset, where lines[i] is included."
|
|
|
|
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
|
|
|
|
2007-11-21 18:18:24 +00:00
|
|
|
def wrap_into_ert(string, src, dst):
|
|
|
|
" Wrap a something into an ERT"
|
2007-12-04 09:25:50 +00:00
|
|
|
return string.replace(src, '\n\\begin_inset ERT\nstatus collapsed\n\\begin_layout Standard\n'
|
2007-11-21 18:18:24 +00:00
|
|
|
+ dst + '\n\\end_layout\n\\end_inset\n')
|
|
|
|
|
2007-08-12 13:25:36 +00:00
|
|
|
####################################################################
|
|
|
|
|
|
|
|
def fix_wrong_tables(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of tabular.")
|
|
|
|
continue
|
|
|
|
|
|
|
|
m = i + 1
|
|
|
|
nrows = int(document.body[i+1].split('"')[3])
|
|
|
|
ncols = int(document.body[i+1].split('"')[5])
|
|
|
|
|
|
|
|
for l in range(nrows):
|
|
|
|
prev_multicolumn = 0
|
|
|
|
for k in range(ncols):
|
|
|
|
m = find_token(document.body, '<cell', m)
|
|
|
|
|
|
|
|
if document.body[m].find('multicolumn') != -1:
|
|
|
|
multicol_cont = int(document.body[m].split('"')[1])
|
|
|
|
|
|
|
|
if multicol_cont == 2 and (k == 0 or prev_multicolumn == 0):
|
|
|
|
document.body[m] = document.body[m][:5] + document.body[m][21:]
|
|
|
|
prev_multicolumn = 0
|
|
|
|
else:
|
|
|
|
prev_multicolumn = multicol_cont
|
|
|
|
else:
|
|
|
|
prev_multicolumn = 0
|
|
|
|
|
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
|
2007-08-12 14:56:49 +00:00
|
|
|
def close_begin_deeper(document):
|
|
|
|
i = 0
|
|
|
|
depth = 0
|
|
|
|
while True:
|
|
|
|
i = find_tokens(document.body, ["\\begin_deeper", "\\end_deeper"], i)
|
|
|
|
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
if document.body[i][:13] == "\\begin_deeper":
|
|
|
|
depth += 1
|
|
|
|
else:
|
|
|
|
depth -= 1
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
document.body[-2:-2] = ['\\end_deeper' for i in range(depth)]
|
2007-08-17 15:48:41 +00:00
|
|
|
|
|
|
|
|
|
|
|
def long_charstyle_names(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = document.body[i].replace("CharStyle ", "CharStyle CharStyle:")
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
def revert_long_charstyle_names(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = document.body[i].replace("CharStyle CharStyle:", "CharStyle")
|
|
|
|
i += 1
|
2007-08-12 14:56:49 +00:00
|
|
|
|
|
|
|
|
2007-08-18 23:26:07 +00:00
|
|
|
def axe_show_label(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if document.body[i + 1].find("show_label") != -1:
|
|
|
|
if document.body[i + 1].find("true") != -1:
|
|
|
|
document.body[i + 1] = "status open"
|
|
|
|
del document.body[ i + 2]
|
|
|
|
else:
|
|
|
|
if document.body[i + 1].find("false") != -1:
|
|
|
|
document.body[i + 1] = "status collapsed"
|
|
|
|
del document.body[ i + 2]
|
|
|
|
else:
|
|
|
|
document.warning("Malformed LyX document: show_label neither false nor true.")
|
|
|
|
else:
|
|
|
|
document.warning("Malformed LyX document: show_label missing in CharStyle.")
|
|
|
|
|
2007-08-19 14:03:51 +00:00
|
|
|
i += 1
|
2007-08-18 23:26:07 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_show_label(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if document.body[i + 1].find("status open") != -1:
|
|
|
|
document.body.insert(i + 1, "show_label true")
|
|
|
|
else:
|
|
|
|
if document.body[i + 1].find("status collapsed") != -1:
|
|
|
|
document.body.insert(i + 1, "show_label false")
|
|
|
|
else:
|
|
|
|
document.warning("Malformed LyX document: no legal status line in CharStyle.")
|
|
|
|
i += 1
|
|
|
|
|
This is one of a series of patches that will merge the layout modules development in personal/branches/rgheck back into the tree.
Design goal: Allow the use of layout "modules", which are to LaTeX packages as layout files are to LaTeX document classes. Thus, one could have a module that defined certain character styles, environments, commands, or what have you, and include it in various documents, each of which uses a different document class, without having to modify the layout files themselves. For example, a theorems.module could be used with article.layout to provide support for theorem-type environments, without having to modify article.layout itself, and the same module could be used with book.layout, etc.
This patch adds the backend. The ModuleList class holds a list of the available modules, which are retrieved from lyxmodules.lst, itself generated by configure.py. There are two LFUNs available: modules-clear and module-add, which do the obvious thing; you can test by typing these into the minibuffer, along with the name of one of the available modules: URL (a CharStyle), Endnote (a Custom Inset), and---with the spaces---End To Foot (View>LaTeX and look at the user preamble), which are themselves in lib/layouts. There are some others, too, that allow theorems to be added to classes like article and book.
The GUI will come next.
Issues: (i) The configure.py script could be improved. It'd be nice, for example, if it tested for the presence of the LaTeX packages a particular module needs. But this would mean re-working the LaTeX script, and I don't know how to do that. Note that at present, the packages are ignored. This will change shortly. (ii) I've used std::string in LyXModule, following what seemed to be a precedent in TextClass. If some of these should be docstrings, please let me know, and I'll change them. (iii) There is at present no distinction between LaTeX and DocBook modules. Should there be? That is: Should there be modules that are available when the document class is a LaTeX class and others that are available only when it is DocBook? Or should there just be one set of modules? Each module can of course indicate for what it is suitable in its description.
git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/trunk@19893 a592a061-630c-0410-9148-cb99ea01b6c8
2007-08-29 17:59:49 +00:00
|
|
|
def revert_begin_modules(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.header, "\\begin_modules", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of(document.header, i, "\\begin_modules", "\\end_modules")
|
|
|
|
if j == -1:
|
|
|
|
# this should not happen
|
|
|
|
break
|
|
|
|
document.header[i : j + 1] = []
|
2007-08-18 23:26:07 +00:00
|
|
|
|
2007-09-08 13:01:29 +00:00
|
|
|
def convert_flex(document):
|
|
|
|
"Convert CharStyle to Flex"
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CharStyle", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = document.body[i].replace('\\begin_inset CharStyle', '\\begin_inset Flex')
|
|
|
|
|
|
|
|
def revert_flex(document):
|
|
|
|
"Convert Flex to CharStyle"
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = document.body[i].replace('\\begin_inset Flex', '\\begin_inset CharStyle')
|
|
|
|
|
|
|
|
|
2007-09-24 14:37:10 +00:00
|
|
|
# Discard PDF options for hyperref
|
2007-09-20 22:32:20 +00:00
|
|
|
def revert_pdf_options(document):
|
2007-09-24 14:37:10 +00:00
|
|
|
"Revert PDF options for hyperref."
|
2007-09-23 12:39:31 +00:00
|
|
|
i = 0
|
|
|
|
i = find_token(document.header, "\\use_hyperref", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_store_options", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_title", 0)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_author", 0)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_subject", 0)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_keywords", 0)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_bookmarks", 0)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_bookmarksnumbered", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_bookmarksopen", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_bookmarksopenlevel", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_breaklinks", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_pdfborder", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_colorlinks", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_backref", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_pagebackref", i)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_pagemode", 0)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_quoted_options", 0)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
2007-09-20 22:32:20 +00:00
|
|
|
|
2007-08-18 23:26:07 +00:00
|
|
|
|
2007-09-15 18:31:32 +00:00
|
|
|
def remove_inzip_options(document):
|
|
|
|
"Remove inzipName and embed options from the Graphics inset"
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, "\\begin_inset Graphics", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
|
|
if j == -1:
|
|
|
|
# should not happen
|
|
|
|
document.warning("Malformed LyX document: Could not find end of graphics inset.")
|
|
|
|
# If there's a inzip param, just remove that
|
|
|
|
k = find_token(document.body, "\tinzipName", i + 1, j)
|
|
|
|
if k != -1:
|
|
|
|
del document.body[k]
|
|
|
|
# embed option must follow the inzipName option
|
|
|
|
del document.body[k+1]
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2007-09-27 18:24:18 +00:00
|
|
|
def convert_inset_command(document):
|
2007-10-07 14:26:16 +00:00
|
|
|
"""
|
|
|
|
Convert:
|
|
|
|
\begin_inset LatexCommand cmd
|
|
|
|
to
|
|
|
|
\begin_inset CommandInset InsetType
|
|
|
|
LatexCommand cmd
|
|
|
|
"""
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, "\\begin_inset LatexCommand", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
line = document.body[i]
|
|
|
|
r = re.compile(r'\\begin_inset LatexCommand (.*)$')
|
|
|
|
m = r.match(line)
|
|
|
|
cmdName = m.group(1)
|
|
|
|
insetName = ""
|
|
|
|
#this is adapted from factory.cpp
|
|
|
|
if cmdName[0:4].lower() == "cite":
|
|
|
|
insetName = "citation"
|
|
|
|
elif cmdName == "url" or cmdName == "htmlurl":
|
|
|
|
insetName = "url"
|
|
|
|
elif cmdName[-3:] == "ref":
|
|
|
|
insetName = "ref"
|
|
|
|
elif cmdName == "tableofcontents":
|
|
|
|
insetName = "toc"
|
|
|
|
elif cmdName == "printnomenclature":
|
|
|
|
insetName = "nomencl_print"
|
|
|
|
elif cmdName == "printindex":
|
|
|
|
insetName = "index_print"
|
|
|
|
else:
|
|
|
|
insetName = cmdName
|
|
|
|
insertion = ["\\begin_inset CommandInset " + insetName, "LatexCommand " + cmdName]
|
|
|
|
document.body[i : i+1] = insertion
|
2007-09-27 18:24:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_inset_command(document):
|
2007-10-07 14:26:16 +00:00
|
|
|
"""
|
|
|
|
Convert:
|
|
|
|
\begin_inset CommandInset InsetType
|
|
|
|
LatexCommand cmd
|
|
|
|
to
|
|
|
|
\begin_inset LatexCommand cmd
|
|
|
|
Some insets may end up being converted to insets earlier versions of LyX
|
|
|
|
will not be able to recognize. Not sure what to do about that.
|
|
|
|
"""
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
nextline = document.body[i+1]
|
|
|
|
r = re.compile(r'LatexCommand\s+(.*)$')
|
|
|
|
m = r.match(nextline)
|
|
|
|
if not m:
|
|
|
|
document.warning("Malformed LyX document: Missing LatexCommand in " + document.body[i] + ".")
|
|
|
|
continue
|
|
|
|
cmdName = m.group(1)
|
|
|
|
insertion = ["\\begin_inset LatexCommand " + cmdName]
|
|
|
|
document.body[i : i+2] = insertion
|
2007-09-27 18:24:18 +00:00
|
|
|
|
|
|
|
|
2007-09-24 14:37:10 +00:00
|
|
|
def convert_wrapfig_options(document):
|
|
|
|
"Convert optional options for wrap floats (wrapfig)."
|
|
|
|
# adds the tokens "lines", "placement", and "overhang"
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Wrap figure", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body.insert(i + 1, "lines 0")
|
|
|
|
j = find_token(document.body, "placement", i)
|
|
|
|
# placement can be already set or not; if not, set it
|
|
|
|
if j == i+2:
|
|
|
|
document.body.insert(i + 3, "overhang 0col%")
|
|
|
|
else:
|
|
|
|
document.body.insert(i + 2, "placement o")
|
|
|
|
document.body.insert(i + 3, "overhang 0col%")
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2007-09-24 13:43:58 +00:00
|
|
|
def revert_wrapfig_options(document):
|
2007-09-24 14:37:10 +00:00
|
|
|
"Revert optional options for wrap floats (wrapfig)."
|
2007-09-24 13:43:58 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2007-09-27 22:52:37 +00:00
|
|
|
i = find_token(document.body, "lines", i)
|
2007-09-24 13:43:58 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2007-09-27 22:52:37 +00:00
|
|
|
j = find_token(document.body, "overhang", i+1)
|
2007-10-03 09:55:37 +00:00
|
|
|
if j != i + 2 and j != -1:
|
2007-09-24 13:43:58 +00:00
|
|
|
document.warning("Malformed LyX document: Couldn't find overhang parameter of wrap float.")
|
|
|
|
if j == -1:
|
|
|
|
return
|
2007-10-03 09:55:37 +00:00
|
|
|
del document.body[i]
|
|
|
|
del document.body[j-1]
|
2007-09-24 13:43:58 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2007-10-03 13:38:19 +00:00
|
|
|
def convert_latexcommand_index(document):
|
|
|
|
"Convert from LatexCommand form to collapsable form."
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset index", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if document.body[i + 1] != "LatexCommand index": # Might also be index_print
|
|
|
|
return
|
2007-11-19 19:53:54 +00:00
|
|
|
fullcontent = document.body[i + 2][6:].strip('"')
|
|
|
|
document.body[i:i + 2] = ["\\begin_inset Index",
|
|
|
|
"status collapsed",
|
2007-12-04 09:25:50 +00:00
|
|
|
"\\begin_layout Standard"]
|
2007-11-21 18:18:24 +00:00
|
|
|
# Put here the conversions needed from LaTeX string to LyXText.
|
|
|
|
# Here we do a minimal conversion to prevent crashes and data loss.
|
|
|
|
# Manual patch-up may be needed.
|
|
|
|
# Umlauted characters (most common ones, can be extended):
|
|
|
|
fullcontent = fullcontent.replace(r'\\\"a', u'ä').replace(r'\\\"o', u'ö').replace(r'\\\"u', u'ü')
|
|
|
|
# Generic, \" -> ":
|
|
|
|
fullcontent = wrap_into_ert(fullcontent, r'\"', '"')
|
|
|
|
#fullcontent = fullcontent.replace(r'\"', '\n\\begin_inset ERT\nstatus collapsed\n\\begin_layout standard\n"\n\\end_layout\n\\end_inset\n')
|
2007-11-19 19:53:54 +00:00
|
|
|
# Math:
|
|
|
|
r = re.compile('^(.*?)(\$.*?\$)(.*)')
|
|
|
|
g = fullcontent
|
|
|
|
while r.match(g):
|
|
|
|
m = r.match(g)
|
|
|
|
s = m.group(1)
|
|
|
|
f = m.group(2).replace('\\\\', '\\')
|
|
|
|
g = m.group(3)
|
|
|
|
if s:
|
2007-11-21 18:18:24 +00:00
|
|
|
# this is non-math!
|
|
|
|
s = wrap_into_ert(s, r'\\', '\\backslash')
|
|
|
|
s = wrap_into_ert(s, '{', '{')
|
|
|
|
s = wrap_into_ert(s, '}', '}')
|
2007-11-19 19:53:54 +00:00
|
|
|
document.body.insert(i + 3, s)
|
|
|
|
i += 1
|
|
|
|
document.body.insert(i + 3, "\\begin_inset Formula " + f)
|
|
|
|
document.body.insert(i + 4, "\\end_inset")
|
|
|
|
i += 2
|
2007-11-21 18:18:24 +00:00
|
|
|
# Generic, \\ -> \backslash:
|
|
|
|
g = wrap_into_ert(g, r'\\', '\\backslash{}')
|
|
|
|
g = wrap_into_ert(g, '{', '{')
|
|
|
|
g = wrap_into_ert(g, '}', '}')
|
2007-11-19 19:53:54 +00:00
|
|
|
document.body.insert(i + 3, g)
|
|
|
|
document.body[i + 4] = "\\end_layout"
|
2007-10-03 13:38:19 +00:00
|
|
|
i = i + 5
|
|
|
|
|
|
|
|
|
|
|
|
def revert_latexcommand_index(document):
|
2007-12-06 11:04:56 +00:00
|
|
|
"Revert from collapsable form to LatexCommand form."
|
2007-10-03 13:38:19 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Index", i)
|
|
|
|
if i == -1:
|
2007-11-21 18:18:24 +00:00
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
|
|
if j == -1:
|
|
|
|
return
|
2007-10-03 13:38:19 +00:00
|
|
|
del document.body[j - 1]
|
|
|
|
del document.body[j - 2] # \end_layout
|
|
|
|
document.body[i] = "\\begin_inset CommandInset index"
|
|
|
|
document.body[i + 1] = "LatexCommand index"
|
2007-11-21 18:18:24 +00:00
|
|
|
# clean up multiline stuff
|
|
|
|
content = ""
|
|
|
|
for k in range(i + 3, j - 2):
|
|
|
|
line = document.body[k]
|
|
|
|
if line.startswith("\\begin_inset ERT"):
|
|
|
|
line = line[16:]
|
|
|
|
if line.startswith("\\begin_inset Formula"):
|
|
|
|
line = line[20:]
|
2007-12-04 09:25:50 +00:00
|
|
|
if line.startswith("\\begin_layout Standard"):
|
2007-11-21 18:18:24 +00:00
|
|
|
line = line[22:]
|
|
|
|
if line.startswith("\\end_layout"):
|
|
|
|
line = line[11:]
|
|
|
|
if line.startswith("\\end_inset"):
|
|
|
|
line = line[10:]
|
|
|
|
if line.startswith("status collapsed"):
|
|
|
|
line = line[16:]
|
2007-12-08 15:56:19 +00:00
|
|
|
line = line.replace(u'ä', r'\\\"a').replace(u'ö', r'\\\"o').replace(u'ü', r'\\\"u')
|
2007-11-21 18:18:24 +00:00
|
|
|
content = content + line;
|
|
|
|
document.body[i + 3] = "name " + '"' + content + '"'
|
|
|
|
for k in range(i + 4, j - 2):
|
|
|
|
del document.body[i + 4]
|
2007-10-03 13:38:19 +00:00
|
|
|
document.body.insert(i + 4, "")
|
|
|
|
del document.body[i + 2] # \begin_layout standard
|
|
|
|
i = i + 5
|
|
|
|
|
|
|
|
|
2007-10-03 21:07:01 +00:00
|
|
|
def revert_wraptable(document):
|
|
|
|
"Revert wrap table to wrap figure."
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Wrap table", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = document.body[i].replace('\\begin_inset Wrap table', '\\begin_inset Wrap figure')
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2007-10-04 23:20:39 +00:00
|
|
|
def revert_vietnamese(document):
|
|
|
|
"Set language Vietnamese to English"
|
|
|
|
# Set document language from Vietnamese to English
|
|
|
|
i = 0
|
|
|
|
if document.language == "vietnamese":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang vietnamese", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang vietnamese", "\\lang english")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
2007-10-08 23:40:23 +00:00
|
|
|
def revert_japanese(document):
|
|
|
|
"Set language japanese-plain to japanese"
|
|
|
|
# Set document language from japanese-plain to japanese
|
|
|
|
i = 0
|
|
|
|
if document.language == "japanese-plain":
|
|
|
|
document.language = "japanese"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language japanese"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang japanese-plain", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang japanese-plain", "\\lang japanese")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
2007-10-09 00:12:48 +00:00
|
|
|
def revert_japanese_encoding(document):
|
|
|
|
"Set input encoding form EUC-JP-plain to EUC-JP etc."
|
|
|
|
# Set input encoding form EUC-JP-plain to EUC-JP etc.
|
|
|
|
i = 0
|
|
|
|
i = find_token(document.header, "\\inputencoding EUC-JP-plain", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\inputencoding EUC-JP"
|
|
|
|
j = 0
|
|
|
|
j = find_token(document.header, "\\inputencoding JIS-plain", 0)
|
|
|
|
if j != -1:
|
|
|
|
document.header[j] = "\\inputencoding JIS"
|
|
|
|
k = 0
|
|
|
|
k = find_token(document.header, "\\inputencoding SJIS-plain", 0)
|
|
|
|
if k != -1: # convert to UTF8 since there is currently no SJIS encoding
|
|
|
|
document.header[k] = "\\inputencoding UTF8"
|
|
|
|
|
|
|
|
|
2007-10-11 14:56:34 +00:00
|
|
|
def revert_inset_info(document):
|
|
|
|
'Replace info inset with its content'
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, '\\begin_inset Info', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
|
|
if j == -1:
|
|
|
|
# should not happen
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
|
|
|
type = 'unknown'
|
|
|
|
arg = ''
|
|
|
|
for k in range(i, j+1):
|
|
|
|
if document.body[k].startswith("arg"):
|
|
|
|
arg = document.body[k][3:].strip().strip('"')
|
|
|
|
if document.body[k].startswith("type"):
|
|
|
|
type = document.body[k][4:].strip().strip('"')
|
|
|
|
# I think there is a newline after \\end_inset, which should be removed.
|
|
|
|
if document.body[j + 1].strip() == "":
|
|
|
|
document.body[i : (j + 2)] = [type + ':' + arg]
|
|
|
|
else:
|
|
|
|
document.body[i : (j + 1)] = [type + ':' + arg]
|
|
|
|
|
|
|
|
|
2007-10-12 23:51:56 +00:00
|
|
|
def convert_pdf_options(document):
|
|
|
|
# Set the pdfusetitle tag, delete the pdf_store_options,
|
|
|
|
# set quotes for bookmarksopenlevel"
|
|
|
|
has_hr = get_value(document.header, "\\use_hyperref", 0, default = "0")
|
|
|
|
if has_hr == "1":
|
|
|
|
k = find_token(document.header, "\\use_hyperref", 0)
|
|
|
|
document.header.insert(k + 1, "\\pdf_pdfusetitle true")
|
|
|
|
k = find_token(document.header, "\\pdf_store_options", 0)
|
|
|
|
if k != -1:
|
|
|
|
del document.header[k]
|
|
|
|
i = find_token(document.header, "\\pdf_bookmarksopenlevel", k)
|
|
|
|
if i == -1: return
|
|
|
|
document.header[i] = document.header[i].replace('"', '')
|
|
|
|
|
|
|
|
|
2007-10-13 03:18:51 +00:00
|
|
|
def revert_pdf_options_2(document):
|
2007-10-12 23:51:56 +00:00
|
|
|
# reset the pdfusetitle tag, set quotes for bookmarksopenlevel"
|
|
|
|
k = find_token(document.header, "\\use_hyperref", 0)
|
|
|
|
i = find_token(document.header, "\\pdf_pdfusetitle", k)
|
|
|
|
if i != -1:
|
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\pdf_bookmarksopenlevel", k)
|
|
|
|
if i == -1: return
|
|
|
|
values = document.header[i].split()
|
|
|
|
values[1] = ' "' + values[1] + '"'
|
|
|
|
document.header[i] = ''.join(values)
|
|
|
|
|
|
|
|
|
2007-10-13 19:06:09 +00:00
|
|
|
def convert_htmlurl(document):
|
2007-10-15 16:40:47 +00:00
|
|
|
'Convert "htmlurl" to "href" insets for docbook'
|
|
|
|
if document.backend != "docbook":
|
|
|
|
return
|
2007-10-13 19:06:09 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2007-10-15 16:40:47 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset url", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = "\\begin_inset CommandInset href"
|
|
|
|
document.body[i + 1] = "LatexCommand href"
|
|
|
|
i = i + 1
|
|
|
|
|
2007-10-29 22:38:36 +00:00
|
|
|
|
2007-10-15 16:40:47 +00:00
|
|
|
def convert_url(document):
|
|
|
|
'Convert url insets to url charstyles'
|
|
|
|
if document.backend == "docbook":
|
|
|
|
return
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset url", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2007-11-01 22:17:54 +00:00
|
|
|
n = find_token(document.body, "name", i)
|
|
|
|
if n == i + 2:
|
|
|
|
# place the URL name in typewriter before the new URL insert
|
|
|
|
# grab the name 'bla' from the e.g. the line 'name "bla"',
|
|
|
|
# therefore start with the 6th character
|
|
|
|
name = document.body[n][6:-1]
|
2007-11-01 22:40:31 +00:00
|
|
|
newname = [name + " "]
|
|
|
|
document.body[i:i] = newname
|
2007-11-01 22:17:54 +00:00
|
|
|
i = i + 1
|
2007-10-15 16:40:47 +00:00
|
|
|
j = find_token(document.body, "target", i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find target for url inset")
|
|
|
|
i = j
|
|
|
|
continue
|
2007-11-01 22:17:54 +00:00
|
|
|
target = document.body[j][8:-1]
|
2007-10-15 16:40:47 +00:00
|
|
|
k = find_token(document.body, "\\end_inset", j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of url inset")
|
|
|
|
i = k
|
|
|
|
continue
|
|
|
|
newstuff = ["\\begin_inset Flex URL",
|
|
|
|
"status collapsed", "",
|
|
|
|
"\\begin_layout Standard",
|
2007-11-01 22:17:54 +00:00
|
|
|
"",
|
2007-10-15 16:40:47 +00:00
|
|
|
target,
|
|
|
|
"\\end_layout",
|
|
|
|
""]
|
|
|
|
document.body[i:k] = newstuff
|
|
|
|
i = k
|
|
|
|
|
2008-01-10 07:22:43 +00:00
|
|
|
def convert_ams_classes(document):
|
|
|
|
tc = document.textclass
|
|
|
|
if (tc != "amsart" and tc != "amsart-plain" and
|
2008-01-11 08:03:48 +00:00
|
|
|
tc != "amsart-seq" and tc != "amsbook"):
|
2008-01-10 07:22:43 +00:00
|
|
|
return
|
|
|
|
if tc == "amsart-plain":
|
|
|
|
document.textclass = "amsart"
|
|
|
|
document.set_textclass()
|
|
|
|
document.add_module("Theorems (Starred)")
|
|
|
|
return
|
|
|
|
if tc == "amsart-seq":
|
|
|
|
document.textclass = "amsart"
|
|
|
|
document.set_textclass()
|
|
|
|
document.add_module("Theorems (AMS)")
|
|
|
|
|
|
|
|
#Now we want to see if any of the environments in the extended theorems
|
|
|
|
#module were used in this document. If so, we'll add that module, too.
|
|
|
|
layouts = ["Criterion", "Algorithm", "Axiom", "Condition", "Note", \
|
|
|
|
"Notation", "Summary", "Acknowledgement", "Conclusion", "Fact", \
|
|
|
|
"Assumption"]
|
|
|
|
|
|
|
|
r = re.compile(r'^\\begin_layout (.*?)\*?\s*$')
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_layout", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
m = r.match(document.body[i])
|
|
|
|
if m == None:
|
|
|
|
document.warning("Weirdly formed \\begin_layout at line " + i + " of body!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
m = m.group(1)
|
|
|
|
if layouts.count(m) != 0:
|
|
|
|
document.add_module("Theorems (AMS-Extended)")
|
|
|
|
return
|
|
|
|
i += 1
|
2007-10-29 22:38:36 +00:00
|
|
|
|
2007-10-13 19:06:09 +00:00
|
|
|
def revert_href(document):
|
|
|
|
'Reverts hyperlink insets (href) to url insets (url)'
|
|
|
|
i = 0
|
|
|
|
while True:
|
2007-10-18 05:34:00 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset href", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2007-10-18 16:49:22 +00:00
|
|
|
document.body[i : i + 2] = \
|
2007-10-18 05:34:00 +00:00
|
|
|
["\\begin_inset CommandInset url", "LatexCommand url"]
|
|
|
|
i = i + 2
|
2007-10-13 19:06:09 +00:00
|
|
|
|
2007-10-29 22:38:36 +00:00
|
|
|
|
2007-10-23 15:02:15 +00:00
|
|
|
def convert_include(document):
|
|
|
|
'Converts include insets to new format.'
|
|
|
|
i = 0
|
|
|
|
r = re.compile(r'\\begin_inset Include\s+\\([^{]+){([^}]*)}(?:\[(.*)\])?')
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Include", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
line = document.body[i]
|
|
|
|
previewline = document.body[i + 1]
|
|
|
|
m = r.match(line)
|
|
|
|
if m == None:
|
|
|
|
document.warning("Unable to match line " + str(i) + " of body!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
cmd = m.group(1)
|
|
|
|
fn = m.group(2)
|
|
|
|
opt = m.group(3)
|
|
|
|
insertion = ["\\begin_inset CommandInset include",
|
|
|
|
"LatexCommand " + cmd, previewline,
|
|
|
|
"filename \"" + fn + "\""]
|
|
|
|
newlines = 2
|
|
|
|
if opt:
|
|
|
|
insertion.append("lstparams " + '"' + opt + '"')
|
|
|
|
newlines += 1
|
|
|
|
document.body[i : i + 2] = insertion
|
|
|
|
i += newlines
|
|
|
|
|
2007-10-29 22:38:36 +00:00
|
|
|
|
2007-10-23 15:02:15 +00:00
|
|
|
def revert_include(document):
|
|
|
|
'Reverts include insets to old format.'
|
|
|
|
i = 0
|
|
|
|
r1 = re.compile('LatexCommand (.+)')
|
|
|
|
r2 = re.compile('filename (.+)')
|
|
|
|
r3 = re.compile('options (.*)')
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset include", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
previewline = document.body[i + 1]
|
|
|
|
m = r1.match(document.body[i + 2])
|
|
|
|
if m == None:
|
|
|
|
document.warning("Malformed LyX document: No LatexCommand line for `" +
|
|
|
|
document.body[i] + "' on line " + str(i) + ".")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
cmd = m.group(1)
|
|
|
|
m = r2.match(document.body[i + 3])
|
|
|
|
if m == None:
|
|
|
|
document.warning("Malformed LyX document: No filename line for `" + \
|
|
|
|
document.body[i] + "' on line " + str(i) + ".")
|
|
|
|
i += 2
|
|
|
|
continue
|
|
|
|
fn = m.group(1)
|
|
|
|
options = ""
|
|
|
|
numlines = 4
|
|
|
|
if (cmd == "lstinputlisting"):
|
|
|
|
m = r3.match(document.body[i + 4])
|
|
|
|
if m != None:
|
|
|
|
options = m.group(1)
|
|
|
|
numlines = 5
|
|
|
|
newline = "\\begin_inset Include \\" + cmd + "{" + fn + "}"
|
|
|
|
if options:
|
|
|
|
newline += ("[" + options + "]")
|
|
|
|
insertion = [newline, previewline]
|
|
|
|
document.body[i : i + numlines] = insertion
|
|
|
|
i += 2
|
2007-10-29 22:38:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_albanian(document):
|
|
|
|
"Set language Albanian to English"
|
|
|
|
i = 0
|
|
|
|
if document.language == "albanian":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang albanian", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang albanian", "\\lang english")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_lowersorbian(document):
|
|
|
|
"Set language lower Sorbian to English"
|
|
|
|
i = 0
|
|
|
|
if document.language == "lowersorbian":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang lowersorbian", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang lowersorbian", "\\lang english")
|
|
|
|
j = j + 1
|
|
|
|
|
2007-10-13 19:06:09 +00:00
|
|
|
|
2007-10-30 22:07:16 +00:00
|
|
|
def revert_uppersorbian(document):
|
|
|
|
"Set language uppersorbian to usorbian as this was used in LyX 1.5"
|
|
|
|
i = 0
|
|
|
|
if document.language == "uppersorbian":
|
|
|
|
document.language = "usorbian"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language usorbian"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang uppersorbian", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang uppersorbian", "\\lang usorbian")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
|
|
|
def convert_usorbian(document):
|
2007-11-25 22:39:04 +00:00
|
|
|
"Set language usorbian to uppersorbian"
|
2007-10-30 22:07:16 +00:00
|
|
|
i = 0
|
|
|
|
if document.language == "usorbian":
|
|
|
|
document.language = "uppersorbian"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language uppersorbian"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang usorbian", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang usorbian", "\\lang uppersorbian")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
2007-11-01 17:37:43 +00:00
|
|
|
def revert_macro_optional_params(document):
|
|
|
|
"Convert macro definitions with optional parameters into ERTs"
|
|
|
|
# Stub to convert macro definitions with one or more optional parameters
|
|
|
|
# into uninterpreted ERT insets
|
|
|
|
|
|
|
|
|
|
|
|
def revert_hyperlinktype(document):
|
|
|
|
'Reverts hyperlink type'
|
|
|
|
i = 0
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "target", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_token(document.body, "type", i)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
if j == i + 1:
|
|
|
|
del document.body[j]
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2007-11-23 02:10:00 +00:00
|
|
|
def revert_pagebreak(document):
|
2007-11-26 22:36:49 +00:00
|
|
|
'Reverts pagebreak to ERT'
|
2007-11-23 02:10:00 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\pagebreak", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2007-11-26 22:36:49 +00:00
|
|
|
document.body[i] = '\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
|
|
'pagebreak{}\n\\end_layout\n\n\\end_inset\n\n'
|
2007-11-23 02:10:00 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2007-11-25 18:34:37 +00:00
|
|
|
def revert_linebreak(document):
|
2007-11-26 22:36:49 +00:00
|
|
|
'Reverts linebreak to ERT'
|
2007-11-25 18:34:37 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\linebreak", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2007-11-26 22:36:49 +00:00
|
|
|
document.body[i] = '\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
|
|
'linebreak{}\n\\end_layout\n\n\\end_inset\n\n'
|
2007-11-25 18:34:37 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2007-11-25 21:09:01 +00:00
|
|
|
def revert_latin(document):
|
|
|
|
"Set language Latin to English"
|
|
|
|
i = 0
|
|
|
|
if document.language == "latin":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang latin", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang latin", "\\lang english")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_samin(document):
|
|
|
|
"Set language North Sami to English"
|
|
|
|
i = 0
|
|
|
|
if document.language == "samin":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang samin", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang samin", "\\lang english")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
2007-11-25 22:39:04 +00:00
|
|
|
def convert_serbocroatian(document):
|
|
|
|
"Set language Serbocroatian to Croatian as this was really Croatian in LyX 1.5"
|
|
|
|
i = 0
|
|
|
|
if document.language == "serbocroatian":
|
|
|
|
document.language = "croatian"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language croatian"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang serbocroatian", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang serbocroatian", "\\lang croatian")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
2007-12-04 09:25:50 +00:00
|
|
|
def convert_framed_notes(document):
|
|
|
|
"Convert framed notes to boxes. "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_tokens(document.body, ["\\begin_inset Note Framed", "\\begin_inset Note Shaded"], i)
|
|
|
|
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = document.body[i].replace("\\begin_inset Note", "\\begin_inset Box")
|
|
|
|
document.body.insert(i + 1, 'position "t"\nhor_pos "c"\nhas_inner_box 0\ninner_pos "t"\n' \
|
|
|
|
'use_parbox 0\nwidth "100col%"\nspecial "none"\nheight "1in"\n' \
|
|
|
|
'height_special "totalheight"')
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_framed_notes(document):
|
|
|
|
"Revert framed boxes to notes. "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_tokens(document.body, ["\\begin_inset Box Framed", "\\begin_inset Box Shaded"], i)
|
|
|
|
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
|
|
if j == -1:
|
|
|
|
# should not happen
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Box inset.")
|
|
|
|
k = find_token(document.body, "status", i + 1, j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing `status' tag in Box inset.")
|
|
|
|
return
|
|
|
|
status = document.body[k]
|
|
|
|
l = find_token(document.body, "\\begin_layout Standard", i + 1, j)
|
|
|
|
if l == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing `\\begin_layout Standard' in Box inset.")
|
|
|
|
return
|
|
|
|
m = find_token(document.body, "\\end_layout", i + 1, j)
|
|
|
|
if m == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing `\\end_layout' in Box inset.")
|
|
|
|
return
|
|
|
|
ibox = find_token(document.body, "has_inner_box 1", i + 1, k)
|
|
|
|
pbox = find_token(document.body, "use_parbox 1", i + 1, k)
|
|
|
|
if ibox == -1 and pbox == -1:
|
|
|
|
document.body[i] = document.body[i].replace("\\begin_inset Box", "\\begin_inset Note")
|
|
|
|
del document.body[i+1:k]
|
|
|
|
else:
|
|
|
|
document.body[i] = document.body[i].replace("\\begin_inset Box Shaded", "\\begin_inset Box Frameless")
|
|
|
|
document.body.insert(l + 1, "\\begin_inset Note Shaded\n" + status + "\n\\begin_layout Standard\n")
|
|
|
|
document.body.insert(m + 1, "\\end_layout\n\\end_inset")
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2007-12-06 11:04:56 +00:00
|
|
|
def revert_slash(document):
|
|
|
|
'Revert \\SpecialChar \\slash{} to ERT'
|
|
|
|
for i in range(len(document.body)):
|
|
|
|
document.body[i] = document.body[i].replace('\\SpecialChar \\slash{}', \
|
|
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
|
|
'slash{}\n\\end_layout\n\n\\end_inset\n\n')
|
2007-12-26 20:15:31 +00:00
|
|
|
|
2007-12-06 11:04:56 +00:00
|
|
|
|
|
|
|
def revert_nobreakdash(document):
|
|
|
|
'Revert \\SpecialChar \\nobreakdash- to ERT'
|
|
|
|
found = 0
|
|
|
|
for i in range(len(document.body)):
|
|
|
|
line = document.body[i]
|
|
|
|
r = re.compile(r'\\SpecialChar \\nobreakdash-')
|
|
|
|
m = r.match(line)
|
|
|
|
if m:
|
|
|
|
found = 1
|
|
|
|
document.body[i] = document.body[i].replace('\\SpecialChar \\nobreakdash-', \
|
|
|
|
'\\begin_inset ERT\nstatus collapsed\n\n' \
|
|
|
|
'\\begin_layout Standard\n\n\n\\backslash\n' \
|
|
|
|
'nobreakdash-\n\\end_layout\n\n\\end_inset\n\n')
|
|
|
|
if not found:
|
|
|
|
return
|
|
|
|
j = find_token(document.header, "\\use_amsmath", 0)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
|
|
|
|
return
|
|
|
|
document.header[j] = "\\use_amsmath 2"
|
|
|
|
|
|
|
|
|
2007-12-20 15:46:14 +00:00
|
|
|
def revert_nocite_key(body, start, end):
|
|
|
|
'key "..." -> \nocite{...}'
|
|
|
|
for i in range(start, end):
|
|
|
|
if (body[i][0:5] == 'key "'):
|
|
|
|
body[i] = body[i].replace('key "', "\\backslash\nnocite{")
|
|
|
|
body[i] = body[i].replace('"', "}")
|
|
|
|
else:
|
|
|
|
body[i] = ""
|
|
|
|
|
|
|
|
|
|
|
|
def revert_nocite(document):
|
|
|
|
"Revert LatexCommand nocite to ERT"
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
i = i + 1
|
|
|
|
if (document.body[i] == "LatexCommand nocite"):
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
|
|
if j == -1:
|
|
|
|
#this should not happen
|
|
|
|
document.warning("End of CommandInset citation not found in revert_nocite!")
|
|
|
|
revert_nocite_key(document.body, i + 1, len(document.body))
|
|
|
|
return
|
|
|
|
revert_nocite_key(document.body, i + 1, j)
|
|
|
|
document.body[i-1] = "\\begin_inset ERT"
|
|
|
|
document.body[i] = "status collapsed\n\n" \
|
|
|
|
"\\begin_layout Standard"
|
|
|
|
document.body.insert(j, "\\end_layout\n");
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2007-12-28 16:56:57 +00:00
|
|
|
def revert_btprintall(document):
|
|
|
|
"Revert (non-bibtopic) btPrintAll option to ERT \nocite{*}"
|
|
|
|
i = find_token(document.header, '\\use_bibtopic', 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed lyx document: Missing '\\use_bibtopic'.")
|
|
|
|
return
|
|
|
|
if get_value(document.header, '\\use_bibtopic', 0) == "false":
|
|
|
|
i = 0
|
|
|
|
while i < len(document.body):
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
|
|
if j == -1:
|
|
|
|
#this should not happen
|
|
|
|
document.warning("End of CommandInset bibtex not found in revert_btprintall!")
|
|
|
|
j = len(document.body)
|
|
|
|
for k in range(i, j):
|
|
|
|
if (document.body[k] == 'btprint "btPrintAll"'):
|
|
|
|
del document.body[k]
|
|
|
|
document.body.insert(i, "\\begin_inset ERT\n" \
|
|
|
|
"status collapsed\n\n\\begin_layout Standard\n\n" \
|
|
|
|
"\\backslash\nnocite{*}\n" \
|
|
|
|
"\\end_layout\n\\end_inset\n")
|
|
|
|
i = j
|
|
|
|
|
|
|
|
|
2007-12-05 21:42:57 +00:00
|
|
|
def revert_bahasam(document):
|
|
|
|
"Set language Bahasa Malaysia to Bahasa Indonesia"
|
|
|
|
i = 0
|
|
|
|
if document.language == "bahasam":
|
|
|
|
document.language = "bahasa"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language bahasa"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang bahasam", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang bahasam", "\\lang bahasa")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
2007-12-05 22:04:33 +00:00
|
|
|
def revert_interlingua(document):
|
|
|
|
"Set language Interlingua to English"
|
|
|
|
i = 0
|
|
|
|
if document.language == "interlingua":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang interlingua", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang interlingua", "\\lang english")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
2007-12-13 23:29:56 +00:00
|
|
|
def revert_serbianlatin(document):
|
|
|
|
"Set language Serbian-Latin to Croatian"
|
|
|
|
i = 0
|
|
|
|
if document.language == "serbian-latin":
|
|
|
|
document.language = "croatian"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language croatian"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang serbian-latin", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang serbian-latin", "\\lang croatian")
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
|
2007-08-12 13:25:36 +00:00
|
|
|
##
|
|
|
|
# Conversion hub
|
|
|
|
#
|
|
|
|
|
|
|
|
supported_versions = ["1.6.0","1.6"]
|
2007-10-03 13:38:19 +00:00
|
|
|
convert = [[277, [fix_wrong_tables]],
|
2007-08-12 14:56:49 +00:00
|
|
|
[278, [close_begin_deeper]],
|
2007-08-18 23:26:07 +00:00
|
|
|
[279, [long_charstyle_names]],
|
This is one of a series of patches that will merge the layout modules development in personal/branches/rgheck back into the tree.
Design goal: Allow the use of layout "modules", which are to LaTeX packages as layout files are to LaTeX document classes. Thus, one could have a module that defined certain character styles, environments, commands, or what have you, and include it in various documents, each of which uses a different document class, without having to modify the layout files themselves. For example, a theorems.module could be used with article.layout to provide support for theorem-type environments, without having to modify article.layout itself, and the same module could be used with book.layout, etc.
This patch adds the backend. The ModuleList class holds a list of the available modules, which are retrieved from lyxmodules.lst, itself generated by configure.py. There are two LFUNs available: modules-clear and module-add, which do the obvious thing; you can test by typing these into the minibuffer, along with the name of one of the available modules: URL (a CharStyle), Endnote (a Custom Inset), and---with the spaces---End To Foot (View>LaTeX and look at the user preamble), which are themselves in lib/layouts. There are some others, too, that allow theorems to be added to classes like article and book.
The GUI will come next.
Issues: (i) The configure.py script could be improved. It'd be nice, for example, if it tested for the presence of the LaTeX packages a particular module needs. But this would mean re-working the LaTeX script, and I don't know how to do that. Note that at present, the packages are ignored. This will change shortly. (ii) I've used std::string in LyXModule, following what seemed to be a precedent in TextClass. If some of these should be docstrings, please let me know, and I'll change them. (iii) There is at present no distinction between LaTeX and DocBook modules. Should there be? That is: Should there be modules that are available when the document class is a LaTeX class and others that are available only when it is DocBook? Or should there just be one set of modules? Each module can of course indicate for what it is suitable in its description.
git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/trunk@19893 a592a061-630c-0410-9148-cb99ea01b6c8
2007-08-29 17:59:49 +00:00
|
|
|
[280, [axe_show_label]],
|
2007-08-31 09:46:14 +00:00
|
|
|
[281, []],
|
2007-09-08 13:01:29 +00:00
|
|
|
[282, []],
|
2007-09-09 23:47:22 +00:00
|
|
|
[283, [convert_flex]],
|
2007-09-11 14:23:12 +00:00
|
|
|
[284, []],
|
2007-10-21 18:06:00 +00:00
|
|
|
[285, []],
|
2007-09-24 13:43:58 +00:00
|
|
|
[286, []],
|
2007-09-27 18:24:18 +00:00
|
|
|
[287, [convert_wrapfig_options]],
|
2007-10-03 13:38:19 +00:00
|
|
|
[288, [convert_inset_command]],
|
2007-10-03 22:00:45 +00:00
|
|
|
[289, [convert_latexcommand_index]],
|
2007-10-04 23:20:39 +00:00
|
|
|
[290, []],
|
2007-10-08 23:40:23 +00:00
|
|
|
[291, []],
|
2007-10-11 14:56:34 +00:00
|
|
|
[292, []],
|
2007-10-12 23:51:56 +00:00
|
|
|
[293, []],
|
2007-10-13 19:06:09 +00:00
|
|
|
[294, [convert_pdf_options]],
|
2007-10-23 15:02:15 +00:00
|
|
|
[295, [convert_htmlurl, convert_url]],
|
2007-10-29 22:38:36 +00:00
|
|
|
[296, [convert_include]],
|
2007-11-01 11:03:51 +00:00
|
|
|
[297, [convert_usorbian]],
|
2007-11-01 17:37:43 +00:00
|
|
|
[298, []],
|
2007-11-23 02:10:00 +00:00
|
|
|
[299, []],
|
2007-11-25 18:34:37 +00:00
|
|
|
[300, []],
|
2007-11-25 21:09:01 +00:00
|
|
|
[301, []],
|
2007-11-25 22:39:04 +00:00
|
|
|
[302, []],
|
2007-12-04 09:25:50 +00:00
|
|
|
[303, [convert_serbocroatian]],
|
2007-12-05 21:42:57 +00:00
|
|
|
[304, [convert_framed_notes]],
|
2007-12-05 22:04:33 +00:00
|
|
|
[305, []],
|
2007-12-06 11:04:56 +00:00
|
|
|
[306, []],
|
2007-12-13 23:29:56 +00:00
|
|
|
[307, []],
|
2007-12-20 15:46:14 +00:00
|
|
|
[308, []],
|
2007-12-28 16:56:57 +00:00
|
|
|
[309, []],
|
2008-01-10 07:22:43 +00:00
|
|
|
[310, []],
|
|
|
|
[311, [convert_ams_classes]]
|
2007-08-12 13:25:36 +00:00
|
|
|
]
|
|
|
|
|
2008-01-10 07:22:43 +00:00
|
|
|
revert = [[310, []],
|
|
|
|
[309, [revert_btprintall]],
|
2007-12-28 16:56:57 +00:00
|
|
|
[308, [revert_nocite]],
|
2007-12-20 15:46:14 +00:00
|
|
|
[307, [revert_serbianlatin]],
|
2007-12-13 23:29:56 +00:00
|
|
|
[306, [revert_slash, revert_nobreakdash]],
|
2007-12-06 11:04:56 +00:00
|
|
|
[305, [revert_interlingua]],
|
2007-12-05 22:04:33 +00:00
|
|
|
[304, [revert_bahasam]],
|
2007-12-05 21:42:57 +00:00
|
|
|
[303, [revert_framed_notes]],
|
2007-12-04 09:25:50 +00:00
|
|
|
[302, []],
|
2007-11-25 22:39:04 +00:00
|
|
|
[301, [revert_latin, revert_samin]],
|
2007-11-25 21:09:01 +00:00
|
|
|
[300, [revert_linebreak]],
|
2007-11-25 18:34:37 +00:00
|
|
|
[299, [revert_pagebreak]],
|
2007-11-23 02:10:00 +00:00
|
|
|
[298, [revert_hyperlinktype]],
|
2007-11-01 17:37:43 +00:00
|
|
|
[297, [revert_macro_optional_params]],
|
2007-11-01 11:03:51 +00:00
|
|
|
[296, [revert_albanian, revert_lowersorbian, revert_uppersorbian]],
|
2007-10-29 22:42:58 +00:00
|
|
|
[295, [revert_include]],
|
2007-10-23 15:02:15 +00:00
|
|
|
[294, [revert_href]],
|
2007-10-13 19:06:09 +00:00
|
|
|
[293, [revert_pdf_options_2]],
|
2007-10-12 23:51:56 +00:00
|
|
|
[292, [revert_inset_info]],
|
2007-10-11 14:56:34 +00:00
|
|
|
[291, [revert_japanese, revert_japanese_encoding]],
|
2007-10-08 23:40:23 +00:00
|
|
|
[290, [revert_vietnamese]],
|
2007-10-04 23:20:39 +00:00
|
|
|
[289, [revert_wraptable]],
|
2007-10-03 13:38:19 +00:00
|
|
|
[288, [revert_latexcommand_index]],
|
2007-09-27 18:24:18 +00:00
|
|
|
[287, [revert_inset_command]],
|
2007-09-24 13:43:58 +00:00
|
|
|
[286, [revert_wrapfig_options]],
|
2007-09-20 22:57:23 +00:00
|
|
|
[285, [revert_pdf_options]],
|
2007-10-21 18:06:00 +00:00
|
|
|
[284, [remove_inzip_options]],
|
2007-09-09 23:47:22 +00:00
|
|
|
[283, []],
|
2007-09-08 13:01:29 +00:00
|
|
|
[282, [revert_flex]],
|
2007-08-31 09:46:14 +00:00
|
|
|
[281, []],
|
This is one of a series of patches that will merge the layout modules development in personal/branches/rgheck back into the tree.
Design goal: Allow the use of layout "modules", which are to LaTeX packages as layout files are to LaTeX document classes. Thus, one could have a module that defined certain character styles, environments, commands, or what have you, and include it in various documents, each of which uses a different document class, without having to modify the layout files themselves. For example, a theorems.module could be used with article.layout to provide support for theorem-type environments, without having to modify article.layout itself, and the same module could be used with book.layout, etc.
This patch adds the backend. The ModuleList class holds a list of the available modules, which are retrieved from lyxmodules.lst, itself generated by configure.py. There are two LFUNs available: modules-clear and module-add, which do the obvious thing; you can test by typing these into the minibuffer, along with the name of one of the available modules: URL (a CharStyle), Endnote (a Custom Inset), and---with the spaces---End To Foot (View>LaTeX and look at the user preamble), which are themselves in lib/layouts. There are some others, too, that allow theorems to be added to classes like article and book.
The GUI will come next.
Issues: (i) The configure.py script could be improved. It'd be nice, for example, if it tested for the presence of the LaTeX packages a particular module needs. But this would mean re-working the LaTeX script, and I don't know how to do that. Note that at present, the packages are ignored. This will change shortly. (ii) I've used std::string in LyXModule, following what seemed to be a precedent in TextClass. If some of these should be docstrings, please let me know, and I'll change them. (iii) There is at present no distinction between LaTeX and DocBook modules. Should there be? That is: Should there be modules that are available when the document class is a LaTeX class and others that are available only when it is DocBook? Or should there just be one set of modules? Each module can of course indicate for what it is suitable in its description.
git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/trunk@19893 a592a061-630c-0410-9148-cb99ea01b6c8
2007-08-29 17:59:49 +00:00
|
|
|
[280, [revert_begin_modules]],
|
2007-08-18 23:26:07 +00:00
|
|
|
[279, [revert_show_label]],
|
2007-08-17 15:48:41 +00:00
|
|
|
[278, [revert_long_charstyle_names]],
|
2007-08-12 14:56:49 +00:00
|
|
|
[277, []],
|
2007-08-17 15:48:41 +00:00
|
|
|
[276, []]
|
2007-08-12 13:25:36 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|