2004-04-14 08:45:46 +00:00
|
|
|
# This file is part of lyx2lyx
|
2006-08-02 14:19:22 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2004-04-14 08:45:46 +00:00
|
|
|
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
|
2006-08-02 14:19:22 +00:00
|
|
|
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
2005-02-17 19:38:40 +00:00
|
|
|
# Copyright (C) 2004-2005 Georg Baum <Georg.Baum@post.rwth-aachen.de>
|
2004-04-14 08:45:46 +00:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
""" Convert files to the file format generated by lyx 1.4"""
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
import re
|
2004-04-29 09:24:29 +00:00
|
|
|
from os import access, F_OK
|
|
|
|
import os.path
|
2006-07-27 18:30:13 +00:00
|
|
|
from parser_tools import check_token, find_token, \
|
|
|
|
get_value, del_token, is_nonempty_line, \
|
|
|
|
find_tokens, find_end_of, find_beginning_of, find_token_exact, find_tokens_exact, \
|
|
|
|
find_re, find_tokens_backwards
|
2004-04-29 09:24:29 +00:00
|
|
|
from sys import stdin
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2004-12-03 18:39:51 +00:00
|
|
|
from lyx_0_12 import update_latexaccents
|
|
|
|
|
2006-07-27 18:30:13 +00:00
|
|
|
####################################################################
|
|
|
|
# Private helper functions
|
|
|
|
|
|
|
|
def get_layout(line, default_layout):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Get layout, if empty return the default layout."
|
2006-08-02 15:45:44 +00:00
|
|
|
tokens = line.split()
|
2006-07-27 18:30:13 +00:00
|
|
|
if len(tokens) > 1:
|
|
|
|
return tokens[1]
|
|
|
|
return default_layout
|
|
|
|
|
|
|
|
|
|
|
|
def get_paragraph(lines, i, format):
|
|
|
|
"Finds the paragraph that contains line i."
|
|
|
|
|
|
|
|
if format < 225:
|
|
|
|
begin_layout = "\\layout"
|
|
|
|
else:
|
|
|
|
begin_layout = "\\begin_layout"
|
|
|
|
while i != -1:
|
|
|
|
i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
|
|
|
|
if i == -1: return -1
|
|
|
|
if check_token(lines[i], begin_layout):
|
|
|
|
return i
|
|
|
|
i = find_beginning_of_inset(lines, i)
|
|
|
|
return -1
|
|
|
|
|
|
|
|
|
|
|
|
def find_beginning_of_inset(lines, i):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Find beginning of inset, where lines[i] is included."
|
2006-07-27 18:30:13 +00:00
|
|
|
return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset")
|
|
|
|
|
|
|
|
|
|
|
|
def get_next_paragraph(lines, i, format):
|
|
|
|
"Finds the paragraph after the paragraph that contains line i."
|
|
|
|
|
|
|
|
if format < 225:
|
|
|
|
tokens = ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"]
|
|
|
|
elif format < 236:
|
|
|
|
tokens = ["\\begin_inset", "\\begin_layout", "\\end_float", "\\end_document"]
|
|
|
|
else:
|
|
|
|
tokens = ["\\begin_inset", "\\begin_layout", "\\end_float", "\\end_body", "\\end_document"]
|
|
|
|
while i != -1:
|
|
|
|
i = find_tokens(lines, tokens, i)
|
|
|
|
if not check_token(lines[i], "\\begin_inset"):
|
|
|
|
return i
|
|
|
|
i = find_end_of_inset(lines, i)
|
|
|
|
return -1
|
|
|
|
|
|
|
|
|
|
|
|
def find_end_of_inset(lines, i):
|
|
|
|
"Finds the matching \end_inset"
|
|
|
|
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
|
|
|
|
|
|
|
# End of helper functions
|
|
|
|
####################################################################
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_color_default(document):
|
|
|
|
" Remove \color default"
|
2004-08-04 15:45:26 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\color default", i)
|
2004-08-04 15:45:26 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].replace("\\color default",
|
|
|
|
"\\color inherit")
|
2004-08-04 15:45:26 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def add_end_header(document):
|
|
|
|
" Add \end_header"
|
|
|
|
document.header.append("\\end_header");
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def rm_end_header(document):
|
|
|
|
" Remove \end_header"
|
|
|
|
i = find_token(document.header, "\\end_header", 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_amsmath(document):
|
|
|
|
" Convert \\use_amsmath"
|
|
|
|
i = find_token(document.header, "\\use_amsmath", 0)
|
2006-02-22 17:05:12 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
|
2006-02-22 17:05:12 +00:00
|
|
|
return
|
2006-08-02 15:45:44 +00:00
|
|
|
tokens = document.header[i].split()
|
2006-02-22 17:05:12 +00:00
|
|
|
if len(tokens) != 2:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Could not parse line '%s'." % document.header[i])
|
2006-02-22 17:05:12 +00:00
|
|
|
use_amsmath = '0'
|
|
|
|
else:
|
|
|
|
use_amsmath = tokens[1]
|
|
|
|
# old: 0 == off, 1 == on
|
|
|
|
# new: 0 == off, 1 == auto, 2 == on
|
|
|
|
# translate off -> auto, since old format 'off' means auto in reality
|
|
|
|
if use_amsmath == '0':
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = "\\use_amsmath 1"
|
2006-02-22 17:05:12 +00:00
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = "\\use_amsmath 2"
|
2006-02-22 17:05:12 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_amsmath(document):
|
|
|
|
" Revert \\use_amsmath"
|
|
|
|
i = find_token(document.header, "\\use_amsmath", 0)
|
2006-02-22 17:05:12 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
|
2006-02-22 17:05:12 +00:00
|
|
|
return
|
2006-08-02 15:45:44 +00:00
|
|
|
tokens = document.header[i].split()
|
2006-02-22 17:05:12 +00:00
|
|
|
if len(tokens) != 2:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Could not parse line '%s'." % document.header[i])
|
2006-02-22 17:05:12 +00:00
|
|
|
use_amsmath = '0'
|
|
|
|
else:
|
|
|
|
use_amsmath = tokens[1]
|
|
|
|
# old: 0 == off, 1 == on
|
|
|
|
# new: 0 == off, 1 == auto, 2 == on
|
|
|
|
# translate auto -> off, since old format 'off' means auto in reality
|
|
|
|
if use_amsmath == '2':
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = "\\use_amsmath 1"
|
2006-02-22 17:05:12 +00:00
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = "\\use_amsmath 0"
|
2006-02-22 17:05:12 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_spaces(document):
|
|
|
|
" \SpecialChar ~ -> \InsetSpace ~"
|
|
|
|
for i in range(len(document.body)):
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].replace("\\SpecialChar ~",
|
|
|
|
"\\InsetSpace ~")
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_spaces(document):
|
|
|
|
" \InsetSpace ~ -> \SpecialChar ~"
|
2005-09-28 09:40:50 +00:00
|
|
|
regexp = re.compile(r'(.*)(\\InsetSpace\s+)(\S+)')
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_re(document.body, regexp, i)
|
2005-09-28 09:40:50 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
space = regexp.match(document.body[i]).group(3)
|
|
|
|
prepend = regexp.match(document.body[i]).group(1)
|
2005-09-28 09:40:50 +00:00
|
|
|
if space == '~':
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = regexp.sub(prepend + '\\SpecialChar ~', document.body[i])
|
2005-09-28 09:40:50 +00:00
|
|
|
i = i + 1
|
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = regexp.sub(prepend, document.body[i])
|
|
|
|
document.body[i+1:i+1] = ''
|
2006-07-01 19:16:09 +00:00
|
|
|
if space == "\\space":
|
|
|
|
space = "\\ "
|
2006-08-02 14:19:22 +00:00
|
|
|
i = insert_ert(document.body, i+1, 'Collapsed', space, document.format - 1, document.default_layout)
|
2005-09-28 09:40:50 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def rename_spaces(document):
|
|
|
|
""" \InsetSpace \, -> \InsetSpace \thinspace{}
|
|
|
|
\InsetSpace \space -> \InsetSpace \space{}"""
|
|
|
|
for i in range(len(document.body)):
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].replace("\\InsetSpace \\space",
|
|
|
|
"\\InsetSpace \\space{}")
|
|
|
|
document.body[i] = document.body[i].replace("\\InsetSpace \,",
|
|
|
|
"\\InsetSpace \\thinspace{}")
|
2005-09-28 09:40:50 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
|
|
|
|
def revert_space_names(document):
|
|
|
|
""" \InsetSpace \thinspace{} -> \InsetSpace \,
|
|
|
|
\InsetSpace \space{} -> \InsetSpace \space"""
|
|
|
|
for i in range(len(document.body)):
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].replace("\\InsetSpace \\space{}",
|
|
|
|
"\\InsetSpace \\space")
|
|
|
|
document.body[i] = document.body[i].replace("\\InsetSpace \\thinspace{}",
|
|
|
|
"\\InsetSpace \\,")
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2005-02-21 12:04:23 +00:00
|
|
|
def lyx_support_escape(lab):
|
2006-11-12 13:42:20 +00:00
|
|
|
" Equivalent to pre-unicode lyx::support::escape()"
|
2005-02-21 12:04:23 +00:00
|
|
|
hexdigit = ['0', '1', '2', '3', '4', '5', '6', '7',
|
|
|
|
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
|
|
|
|
enc = ""
|
|
|
|
for c in lab:
|
|
|
|
o = ord(c)
|
|
|
|
if o >= 128 or c == '=' or c == '%':
|
|
|
|
enc = enc + '='
|
|
|
|
enc = enc + hexdigit[o >> 4]
|
|
|
|
enc = enc + hexdigit[o & 15]
|
|
|
|
else:
|
|
|
|
enc = enc + c
|
|
|
|
return enc;
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_eqref(document):
|
|
|
|
"\\begin_inset LatexCommand \\eqref -> ERT"
|
2005-02-21 12:04:23 +00:00
|
|
|
regexp = re.compile(r'^\\begin_inset\s+LatexCommand\s+\\eqref')
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_re(document.body, regexp, i)
|
2005-02-21 12:04:23 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
eqref = lyx_support_escape(regexp.sub("", document.body[i]))
|
|
|
|
document.body[i:i+1] = ["\\begin_inset ERT", "status Collapsed", "",
|
|
|
|
'\\layout %s' % document.default_layout, "", "\\backslash ",
|
2005-02-21 12:04:23 +00:00
|
|
|
"eqref" + eqref]
|
|
|
|
i = i + 7
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_bibtex(document):
|
|
|
|
" Convert BibTeX changes."
|
|
|
|
for i in range(len(document.body)):
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].replace("\\begin_inset LatexCommand \\BibTeX",
|
|
|
|
"\\begin_inset LatexCommand \\bibtex")
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_bibtex(document):
|
|
|
|
" Revert BibTeX changes."
|
|
|
|
for i in range(len(document.body)):
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].replace("\\begin_inset LatexCommand \\bibtex",
|
|
|
|
"\\begin_inset LatexCommand \\BibTeX")
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_insetparent(document):
|
|
|
|
" Remove \lyxparent"
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset LatexCommand \\lyxparent", i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i:i+3]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_external(document):
|
|
|
|
" Convert inset External."
|
2004-04-14 08:45:46 +00:00
|
|
|
external_rexp = re.compile(r'\\begin_inset External ([^,]*),"([^"]*)",')
|
|
|
|
external_header = "\\begin_inset External"
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, external_header, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
look = external_rexp.search(document.body[i])
|
2004-04-14 08:45:46 +00:00
|
|
|
args = ['','']
|
|
|
|
if look:
|
|
|
|
args[0] = look.group(1)
|
|
|
|
args[1] = look.group(2)
|
|
|
|
#FIXME: if the previous search fails then warn
|
|
|
|
|
|
|
|
if args[0] == "RasterImage":
|
|
|
|
# Convert a RasterImage External Inset to a Graphics Inset.
|
|
|
|
top = "\\begin_inset Graphics"
|
|
|
|
if args[1]:
|
|
|
|
filename = "\tfilename " + args[1]
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i+1] = [top, filename]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
else:
|
|
|
|
# Convert the old External Inset format to the new.
|
|
|
|
top = external_header
|
|
|
|
template = "\ttemplate " + args[0]
|
|
|
|
if args[1]:
|
|
|
|
filename = "\tfilename " + args[1]
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i+1] = [top, template, filename]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 2
|
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i+1] = [top, template]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_external_1(document):
|
|
|
|
" Revert inset External."
|
2004-04-14 08:45:46 +00:00
|
|
|
external_header = "\\begin_inset External"
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, external_header, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
template = document.body[i+1].split()
|
2004-04-14 08:45:46 +00:00
|
|
|
template.reverse()
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i+1]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
filename = document.body[i+1].split()
|
2004-04-14 08:45:46 +00:00
|
|
|
filename.reverse()
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i+1]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
params = document.body[i+1].split()
|
2004-04-14 08:45:46 +00:00
|
|
|
params.reverse()
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.body[i+1]: del document.body[i+1]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 16:13:01 +00:00
|
|
|
document.body[i] = document.body[i] + " " + template[0]+ ', "' + filename[0] + '", " '+ " ".join(params[1:]) + '"'
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_external_2(document):
|
|
|
|
" Revert inset External. (part II)"
|
2004-04-14 08:45:46 +00:00
|
|
|
draft_token = '\tdraft'
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, '\\begin_inset External', i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2004-04-14 08:45:46 +00:00
|
|
|
if j == -1:
|
|
|
|
#this should not happen
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
k = find_token(document.body, draft_token, i+1, j-1)
|
|
|
|
if (k != -1 and len(draft_token) == len(document.body[k])):
|
|
|
|
del document.body[k]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_comment(document):
|
|
|
|
" Convert \\layout comment"
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
comment = "\\layout Comment"
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, comment, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i+1] = ['\\layout %s' % document.default_layout,"","",
|
2004-04-14 08:45:46 +00:00
|
|
|
"\\begin_inset Comment",
|
|
|
|
"collapsed true","",
|
2006-08-02 14:19:22 +00:00
|
|
|
'\\layout %s' % document.default_layout]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 7
|
|
|
|
|
|
|
|
while 1:
|
|
|
|
old_i = i
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\layout", i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = len(document.body) - 1
|
|
|
|
document.body[i:i] = ["\\end_inset","",""]
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_token(document.body, '\\begin_deeper', old_i, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if j == -1: j = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
k = find_token(document.body, '\\begin_inset', old_i, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if k == -1: k = i + 1
|
|
|
|
|
|
|
|
if j < i and j < k:
|
|
|
|
i = j
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
|
|
|
i = find_end_of( document.body, i, "\\begin_deeper","\\end_deeper")
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
#This case should not happen
|
|
|
|
#but if this happens deal with it greacefully adding
|
|
|
|
#the missing \end_deeper.
|
2006-08-02 14:19:22 +00:00
|
|
|
i = len(document.body) - 1
|
2007-01-27 16:40:39 +00:00
|
|
|
document.body[i:i] = ["\\end_deeper",""]
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
if k < i:
|
|
|
|
i = k
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_end_of( document.body, i, "\\begin_inset","\\end_inset")
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
#This case should not happen
|
|
|
|
#but if this happens deal with it greacefully adding
|
|
|
|
#the missing \end_inset.
|
2006-08-02 14:19:22 +00:00
|
|
|
i = len(document.body) - 1
|
|
|
|
document.body[i:i] = ["\\end_inset","","","\\end_inset","",""]
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
|
|
|
else:
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
if document.body[i].find(comment) == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i] = ["\\end_inset"]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i+1] = ['\\layout %s' % document.default_layout]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_comment(document):
|
|
|
|
" Revert comments"
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_tokens(document.body, ["\\begin_inset Comment", "\\begin_inset Greyedout"], i)
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = "\\begin_inset Note"
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def add_end_layout(document):
|
|
|
|
" Add \end_layout"
|
|
|
|
i = find_token(document.body, '\\layout', 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
struct_stack = ["\\layout"]
|
2004-05-11 16:13:33 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_tokens(document.body, ["\\begin_inset", "\\end_inset", "\\layout",
|
2004-04-14 08:45:46 +00:00
|
|
|
"\\begin_deeper", "\\end_deeper", "\\the_end"], i)
|
|
|
|
|
2005-07-06 17:40:38 +00:00
|
|
|
if i != -1:
|
2006-08-02 15:45:44 +00:00
|
|
|
token = document.body[i].split()[0]
|
2005-07-06 17:40:38 +00:00
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Truncated document.")
|
|
|
|
i = len(document.body)
|
|
|
|
document.body.insert(i, '\\the_end')
|
2005-07-06 17:40:38 +00:00
|
|
|
token = ""
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if token == "\\begin_inset":
|
|
|
|
struct_stack.append(token)
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
if token == "\\end_inset":
|
|
|
|
tail = struct_stack.pop()
|
|
|
|
if tail == "\\layout":
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i,"")
|
|
|
|
document.body.insert(i,"\\end_layout")
|
2004-05-11 16:13:33 +00:00
|
|
|
i = i + 2
|
2004-04-14 08:45:46 +00:00
|
|
|
#Check if it is the correct tag
|
|
|
|
struct_stack.pop()
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
if token == "\\layout":
|
|
|
|
tail = struct_stack.pop()
|
|
|
|
if tail == token:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i,"")
|
|
|
|
document.body.insert(i,"\\end_layout")
|
2004-05-11 16:13:33 +00:00
|
|
|
i = i + 3
|
2004-04-14 08:45:46 +00:00
|
|
|
else:
|
|
|
|
struct_stack.append(tail)
|
|
|
|
i = i + 1
|
|
|
|
struct_stack.append(token)
|
|
|
|
continue
|
|
|
|
|
2004-05-11 16:13:33 +00:00
|
|
|
if token == "\\begin_deeper":
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i,"")
|
|
|
|
document.body.insert(i,"\\end_layout")
|
2004-05-11 16:13:33 +00:00
|
|
|
i = i + 3
|
|
|
|
struct_stack.append(token)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if token == "\\end_deeper":
|
2004-08-18 13:48:33 +00:00
|
|
|
if struct_stack[-1] == '\\layout':
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, '\\end_layout')
|
2004-08-18 13:48:33 +00:00
|
|
|
i = i + 1
|
2004-05-11 16:13:33 +00:00
|
|
|
struct_stack.pop()
|
2004-08-18 13:48:33 +00:00
|
|
|
i = i + 1
|
2004-04-14 08:45:46 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
#case \end_document
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, "")
|
|
|
|
document.body.insert(i, "\\end_layout")
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def rm_end_layout(document):
|
|
|
|
" Remove \end_layout"
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, '\\end_layout', i)
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def insert_tracking_changes(document):
|
|
|
|
" Handle change tracking keywords."
|
|
|
|
i = find_token(document.header, "\\tracking_changes", 0)
|
2004-05-11 16:13:33 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header.append("\\tracking_changes 0")
|
2004-05-11 16:13:33 +00:00
|
|
|
|
2004-10-09 21:32:56 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def rm_tracking_changes(document):
|
|
|
|
" Remove change tracking keywords."
|
|
|
|
i = find_token(document.header, "\\author", 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i != -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.header, "\\tracking_changes", 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def rm_body_changes(document):
|
|
|
|
" Remove body changes."
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\change_", i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def layout2begin_layout(document):
|
|
|
|
" \layout -> \begin_layout "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, '\\layout', i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].replace('\\layout', '\\begin_layout')
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def begin_layout2layout(document):
|
|
|
|
" \begin_layout -> \layout "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, '\\begin_layout', i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].replace('\\begin_layout', '\\layout')
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2004-10-09 21:32:56 +00:00
|
|
|
def convert_valignment_middle(body, start, end):
|
2006-08-02 14:19:22 +00:00
|
|
|
'valignment="center" -> valignment="middle"'
|
2004-04-14 08:45:46 +00:00
|
|
|
for i in range(start, end):
|
2004-10-09 21:32:56 +00:00
|
|
|
if re.search('^<(column|cell) .*valignment="center".*>$', body[i]):
|
2006-08-02 15:45:44 +00:00
|
|
|
body[i] = body[i].replace('valignment="center"', 'valignment="middle"')
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_table_valignment_middle(document):
|
|
|
|
" Convert table valignment, center -> middle"
|
2005-02-17 19:38:40 +00:00
|
|
|
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_re(document.body, regexp, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2004-04-14 08:45:46 +00:00
|
|
|
if j == -1:
|
|
|
|
#this should not happen
|
2006-08-02 14:19:22 +00:00
|
|
|
convert_valignment_middle(document.body, i + 1, len(document.body))
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
convert_valignment_middle(document.body, i + 1, j)
|
2004-04-14 08:45:46 +00:00
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
|
2004-10-09 21:32:56 +00:00
|
|
|
def revert_table_valignment_middle(body, start, end):
|
2006-08-02 14:19:22 +00:00
|
|
|
" valignment, middle -> center"
|
2004-04-14 08:45:46 +00:00
|
|
|
for i in range(start, end):
|
2004-10-09 21:32:56 +00:00
|
|
|
if re.search('^<(column|cell) .*valignment="middle".*>$', body[i]):
|
2006-08-02 15:45:44 +00:00
|
|
|
body[i] = body[i].replace('valignment="middle"', 'valignment="center"')
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_valignment_middle(document):
|
|
|
|
" Convert table valignment, middle -> center"
|
2005-02-17 19:38:40 +00:00
|
|
|
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_re(document.body, regexp, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
2004-04-14 08:45:46 +00:00
|
|
|
if j == -1:
|
|
|
|
#this should not happen
|
2006-08-02 14:19:22 +00:00
|
|
|
revert_table_valignment_middle(document.body, i + 1, len(document.body))
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
revert_table_valignment_middle(document.body, i + 1, j)
|
2004-04-14 08:45:46 +00:00
|
|
|
i = j + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_end_document(document):
|
|
|
|
"\\the_end -> \\end_document"
|
|
|
|
i = find_token(document.body, "\\the_end", 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.append("\\end_document")
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = "\\end_document"
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_end_document(document):
|
|
|
|
"\\end_document -> \\the_end"
|
|
|
|
i = find_token(document.body, "\\end_document", 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.append("\\the_end")
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = "\\the_end"
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_breaks(document):
|
|
|
|
r"""
|
|
|
|
Convert line and page breaks
|
|
|
|
Old:
|
|
|
|
\layout Standard
|
|
|
|
\line_top \line_bottom \pagebreak_top \pagebreak_bottom \added_space_top xxx \added_space_bottom yyy
|
|
|
|
0
|
|
|
|
|
|
|
|
New:
|
|
|
|
\begin layout Standard
|
|
|
|
|
|
|
|
\newpage
|
|
|
|
|
|
|
|
\lyxline
|
|
|
|
\begin_inset ERT
|
|
|
|
\begin layout Standard
|
|
|
|
\backslash
|
|
|
|
vspace{-1\backslash
|
|
|
|
parskip}
|
|
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
|
|
\begin_inset VSpace xxx
|
|
|
|
\end_inset
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
\begin_inset VSpace xxx
|
|
|
|
\end_inset
|
|
|
|
\lyxline
|
|
|
|
|
|
|
|
\newpage
|
|
|
|
|
|
|
|
\end_layout
|
|
|
|
"""
|
2005-05-18 14:50:29 +00:00
|
|
|
par_params = ('added_space_bottom', 'added_space_top', 'align',
|
|
|
|
'labelwidthstring', 'line_bottom', 'line_top', 'noindent',
|
|
|
|
'pagebreak_bottom', 'pagebreak_top', 'paragraph_spacing',
|
|
|
|
'start_of_appendix')
|
2005-09-20 12:47:45 +00:00
|
|
|
font_attributes = ['\\family', '\\series', '\\shape', '\\emph',
|
|
|
|
'\\numeric', '\\bar', '\\noun', '\\color', '\\lang']
|
|
|
|
attribute_values = ['default', 'default', 'default', 'default',
|
2006-08-02 14:19:22 +00:00
|
|
|
'default', 'default', 'default', 'none', document.language]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout", i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
layout = get_layout(document.body[i], document.default_layout)
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
2005-05-06 14:31:56 +00:00
|
|
|
|
|
|
|
# Merge all paragraph parameters into a single line
|
2005-05-18 14:50:29 +00:00
|
|
|
# We cannot check for '\\' only because paragraphs may start e.g.
|
|
|
|
# with '\\backslash'
|
2006-08-02 15:45:44 +00:00
|
|
|
while document.body[i + 1][:1] == '\\' and document.body[i + 1][1:].split()[0] in par_params:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = document.body[i + 1] + ' ' + document.body[i]
|
|
|
|
del document.body[i+1]
|
2005-05-06 14:31:56 +00:00
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
line_top = document.body[i].find("\\line_top")
|
|
|
|
line_bot = document.body[i].find("\\line_bottom")
|
|
|
|
pb_top = document.body[i].find("\\pagebreak_top")
|
|
|
|
pb_bot = document.body[i].find("\\pagebreak_bottom")
|
|
|
|
vspace_top = document.body[i].find("\\added_space_top")
|
|
|
|
vspace_bot = document.body[i].find("\\added_space_bottom")
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if line_top == -1 and line_bot == -1 and pb_bot == -1 and pb_top == -1 and vspace_top == -1 and vspace_bot == -1:
|
|
|
|
continue
|
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
# Do we have a nonstandard paragraph? We need to create new paragraphs
|
|
|
|
# if yes to avoid putting lyxline etc. inside of special environments.
|
|
|
|
# This is wrong for itemize and enumerate environments, but it is
|
|
|
|
# impossible to convert these correctly.
|
|
|
|
# We want to avoid new paragraphs if possible becauase we want to
|
|
|
|
# inherit font sizes.
|
|
|
|
nonstandard = 0
|
2006-08-02 14:19:22 +00:00
|
|
|
if (not document.is_default_layout(layout) or
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i].find("\\align") != -1 or
|
|
|
|
document.body[i].find("\\labelwidthstring") != -1 or
|
|
|
|
document.body[i].find("\\noindent") != -1):
|
2005-09-20 12:47:45 +00:00
|
|
|
nonstandard = 1
|
|
|
|
|
|
|
|
# get the font size of the beginning of this paragraph, since we need
|
|
|
|
# it for the lyxline inset
|
|
|
|
j = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
while not is_nonempty_line(document.body[j]):
|
2005-09-20 12:47:45 +00:00
|
|
|
j = j + 1
|
|
|
|
size_top = ""
|
2006-08-02 15:45:44 +00:00
|
|
|
if document.body[j].find("\\size") != -1:
|
|
|
|
size_top = document.body[j].split()[1]
|
2005-09-20 12:47:45 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
for tag in "\\line_top", "\\line_bottom", "\\pagebreak_top", "\\pagebreak_bottom":
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].replace(tag, "")
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if vspace_top != -1:
|
|
|
|
# the position could be change because of the removal of other
|
|
|
|
# paragraph properties above
|
2006-08-02 15:45:44 +00:00
|
|
|
vspace_top = document.body[i].find("\\added_space_top")
|
|
|
|
tmp_list = document.body[i][vspace_top:].split()
|
2004-04-14 08:45:46 +00:00
|
|
|
vspace_top_value = tmp_list[1]
|
2006-08-02 16:13:01 +00:00
|
|
|
document.body[i] = document.body[i][:vspace_top] + " ".join(tmp_list[2:])
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if vspace_bot != -1:
|
|
|
|
# the position could be change because of the removal of other
|
|
|
|
# paragraph properties above
|
2006-08-02 15:45:44 +00:00
|
|
|
vspace_bot = document.body[i].find("\\added_space_bottom")
|
|
|
|
tmp_list = document.body[i][vspace_bot:].split()
|
2004-04-14 08:45:46 +00:00
|
|
|
vspace_bot_value = tmp_list[1]
|
2006-08-02 16:13:01 +00:00
|
|
|
document.body[i] = document.body[i][:vspace_bot] + " ".join(tmp_list[2:])
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].strip()
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
# Create an empty paragraph or paragraph fragment for line and
|
|
|
|
# page break that belong above the paragraph
|
2005-04-26 15:42:03 +00:00
|
|
|
if pb_top !=-1 or line_top != -1 or vspace_top != -1:
|
2004-10-26 21:16:44 +00:00
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
paragraph_above = list()
|
|
|
|
if nonstandard:
|
|
|
|
# We need to create an extra paragraph for nonstandard environments
|
2006-08-02 14:19:22 +00:00
|
|
|
paragraph_above = ['\\begin_layout %s' % document.default_layout, '']
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if pb_top != -1:
|
|
|
|
paragraph_above.extend(['\\newpage ',''])
|
|
|
|
|
|
|
|
if vspace_top != -1:
|
2004-08-16 11:27:51 +00:00
|
|
|
paragraph_above.extend(['\\begin_inset VSpace ' + vspace_top_value,'\\end_inset','',''])
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if line_top != -1:
|
2005-09-20 12:47:45 +00:00
|
|
|
if size_top != '':
|
|
|
|
paragraph_above.extend(['\\size ' + size_top + ' '])
|
|
|
|
# We need an additional vertical space of -\parskip.
|
|
|
|
# We can't use the vspace inset because it does not know \parskip.
|
|
|
|
paragraph_above.extend(['\\lyxline ', '', ''])
|
|
|
|
insert_ert(paragraph_above, len(paragraph_above) - 1, 'Collapsed',
|
2006-08-02 14:19:22 +00:00
|
|
|
'\\vspace{-1\\parskip}\n', document.format + 1, document.default_layout)
|
2005-09-20 12:47:45 +00:00
|
|
|
paragraph_above.extend([''])
|
|
|
|
|
|
|
|
if nonstandard:
|
|
|
|
paragraph_above.extend(['\\end_layout ',''])
|
|
|
|
# insert new paragraph above the current paragraph
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i-2:i-2] = paragraph_above
|
2005-09-20 12:47:45 +00:00
|
|
|
else:
|
|
|
|
# insert new lines at the beginning of the current paragraph
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i] = paragraph_above
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
i = i + len(paragraph_above)
|
|
|
|
|
|
|
|
# Ensure that nested style are converted later.
|
2006-08-02 14:19:22 +00:00
|
|
|
k = find_end_of(document.body, i, "\\begin_layout", "\\end_layout")
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if k == -1:
|
|
|
|
return
|
|
|
|
|
2005-04-26 15:42:03 +00:00
|
|
|
if pb_bot !=-1 or line_bot != -1 or vspace_bot != -1:
|
2004-10-26 21:16:44 +00:00
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
# get the font size of the end of this paragraph
|
|
|
|
size_bot = size_top
|
|
|
|
j = i + 1
|
|
|
|
while j < k:
|
2006-08-02 15:45:44 +00:00
|
|
|
if document.body[j].find("\\size") != -1:
|
|
|
|
size_bot = document.body[j].split()[1]
|
2005-09-20 12:47:45 +00:00
|
|
|
j = j + 1
|
2006-08-02 15:45:44 +00:00
|
|
|
elif document.body[j].find("\\begin_inset") != -1:
|
2005-09-20 12:47:45 +00:00
|
|
|
# skip insets
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_end_of_inset(document.body, j)
|
2005-09-20 12:47:45 +00:00
|
|
|
else:
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
paragraph_below = list()
|
|
|
|
if nonstandard:
|
|
|
|
# We need to create an extra paragraph for nonstandard environments
|
2006-08-02 14:19:22 +00:00
|
|
|
paragraph_below = ['', '\\begin_layout %s' % document.default_layout, '']
|
2005-09-20 12:47:45 +00:00
|
|
|
else:
|
|
|
|
for a in range(len(font_attributes)):
|
2006-08-02 14:19:22 +00:00
|
|
|
if find_token(document.body, font_attributes[a], i, k) != -1:
|
2005-09-20 12:47:45 +00:00
|
|
|
paragraph_below.extend([font_attributes[a] + ' ' + attribute_values[a]])
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if line_bot != -1:
|
2005-09-20 12:47:45 +00:00
|
|
|
if nonstandard and size_bot != '':
|
|
|
|
paragraph_below.extend(['\\size ' + size_bot + ' '])
|
2005-04-26 15:45:51 +00:00
|
|
|
paragraph_below.extend(['\\lyxline ',''])
|
2005-09-20 12:47:45 +00:00
|
|
|
if size_bot != '':
|
|
|
|
paragraph_below.extend(['\\size default '])
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if vspace_bot != -1:
|
2005-04-26 15:45:51 +00:00
|
|
|
paragraph_below.extend(['\\begin_inset VSpace ' + vspace_bot_value,'\\end_inset','',''])
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if pb_bot != -1:
|
2005-04-26 15:45:51 +00:00
|
|
|
paragraph_below.extend(['\\newpage ',''])
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
if nonstandard:
|
|
|
|
paragraph_below.extend(['\\end_layout '])
|
|
|
|
# insert new paragraph below the current paragraph
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[k+1:k+1] = paragraph_below
|
2005-09-20 12:47:45 +00:00
|
|
|
else:
|
|
|
|
# insert new lines at the end of the current paragraph
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[k:k] = paragraph_below
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_note(document):
|
|
|
|
" Convert Notes. "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_tokens(document.body, ["\\begin_inset Note",
|
2004-04-14 08:45:46 +00:00
|
|
|
"\\begin_inset Comment",
|
|
|
|
"\\begin_inset Greyedout"], i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = document.body[i][0:13] + 'Note ' + document.body[i][13:]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_note(document):
|
|
|
|
" Revert Notes. "
|
2004-04-14 08:45:46 +00:00
|
|
|
note_header = "\\begin_inset Note "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, note_header, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = "\\begin_inset " + document.body[i][len(note_header):]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_box(document):
|
|
|
|
" Convert Boxes. "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_tokens(document.body, ["\\begin_inset Boxed",
|
2004-04-14 08:45:46 +00:00
|
|
|
"\\begin_inset Doublebox",
|
|
|
|
"\\begin_inset Frameless",
|
|
|
|
"\\begin_inset ovalbox",
|
|
|
|
"\\begin_inset Ovalbox",
|
|
|
|
"\\begin_inset Shadowbox"], i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = document.body[i][0:13] + 'Box ' + document.body[i][13:]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_box(document):
|
|
|
|
" Revert Boxes."
|
2004-04-14 08:45:46 +00:00
|
|
|
box_header = "\\begin_inset Box "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, box_header, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = "\\begin_inset " + document.body[i][len(box_header):]
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_collapsable(document):
|
|
|
|
" Convert collapsed insets. "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_tokens_exact(document.body, ["\\begin_inset Box",
|
2004-04-14 08:45:46 +00:00
|
|
|
"\\begin_inset Branch",
|
|
|
|
"\\begin_inset CharStyle",
|
|
|
|
"\\begin_inset Float",
|
|
|
|
"\\begin_inset Foot",
|
|
|
|
"\\begin_inset Marginal",
|
|
|
|
"\\begin_inset Note",
|
|
|
|
"\\begin_inset OptArg",
|
|
|
|
"\\begin_inset Wrap"], i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
# Seach for a line starting 'collapsed'
|
|
|
|
# If, however, we find a line starting '\begin_layout'
|
|
|
|
# (_always_ present) then break with a warning message
|
|
|
|
i = i + 1
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
if (document.body[i] == "collapsed false"):
|
|
|
|
document.body[i] = "status open"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i] == "collapsed true"):
|
|
|
|
document.body[i] = "status collapsed"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i][:13] == "\\begin_layout"):
|
|
|
|
document.warning("Malformed LyX document: Missing 'collapsed'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_collapsable(document):
|
|
|
|
" Revert collapsed insets. "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_tokens_exact(document.body, ["\\begin_inset Box",
|
2004-04-14 08:45:46 +00:00
|
|
|
"\\begin_inset Branch",
|
|
|
|
"\\begin_inset CharStyle",
|
|
|
|
"\\begin_inset Float",
|
|
|
|
"\\begin_inset Foot",
|
|
|
|
"\\begin_inset Marginal",
|
|
|
|
"\\begin_inset Note",
|
|
|
|
"\\begin_inset OptArg",
|
|
|
|
"\\begin_inset Wrap"], i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
# Seach for a line starting 'status'
|
|
|
|
# If, however, we find a line starting '\begin_layout'
|
|
|
|
# (_always_ present) then break with a warning message
|
|
|
|
i = i + 1
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
if (document.body[i] == "status open"):
|
|
|
|
document.body[i] = "collapsed false"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i] == "status collapsed" or
|
|
|
|
document.body[i] == "status inlined"):
|
|
|
|
document.body[i] = "collapsed true"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i][:13] == "\\begin_layout"):
|
|
|
|
document.warning("Malformed LyX document: Missing 'status'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_ert(document):
|
|
|
|
" Convert ERT. "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset ERT", i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
# Seach for a line starting 'status'
|
|
|
|
# If, however, we find a line starting '\begin_layout'
|
|
|
|
# (_always_ present) then break with a warning message
|
|
|
|
i = i + 1
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
if (document.body[i] == "status Open"):
|
|
|
|
document.body[i] = "status open"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i] == "status Collapsed"):
|
|
|
|
document.body[i] = "status collapsed"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i] == "status Inlined"):
|
|
|
|
document.body[i] = "status inlined"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i][:13] == "\\begin_layout"):
|
|
|
|
document.warning("Malformed LyX document: Missing 'status'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_ert(document):
|
|
|
|
" Revert ERT. "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset ERT", i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
# Seach for a line starting 'status'
|
|
|
|
# If, however, we find a line starting '\begin_layout'
|
|
|
|
# (_always_ present) then break with a warning message
|
|
|
|
i = i + 1
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
if (document.body[i] == "status open"):
|
|
|
|
document.body[i] = "status Open"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i] == "status collapsed"):
|
|
|
|
document.body[i] = "status Collapsed"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i] == "status inlined"):
|
|
|
|
document.body[i] = "status Inlined"
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i][:13] == "\\begin_layout"):
|
|
|
|
document.warning("Malformed LyX document : Missing 'status'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_minipage(document):
|
2004-04-14 08:45:46 +00:00
|
|
|
""" Convert minipages to the box inset.
|
|
|
|
We try to use the same order of arguments as lyx does.
|
|
|
|
"""
|
|
|
|
pos = ["t","c","b"]
|
|
|
|
inner_pos = ["c","t","b","s"]
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Minipage", i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = "\\begin_inset Box Frameless"
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
# convert old to new position using the pos list
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.body[i][:8] == "position":
|
|
|
|
document.body[i] = 'position "%s"' % pos[int(document.body[i][9])]
|
2004-04-14 08:45:46 +00:00
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'position "%s"' % pos[0])
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'hor_pos "c"')
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'has_inner_box 1')
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
# convert the inner_position
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.body[i][:14] == "inner_position":
|
|
|
|
innerpos = inner_pos[int(document.body[i][15])]
|
|
|
|
del document.body[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
else:
|
2006-04-20 07:43:29 +00:00
|
|
|
innerpos = inner_pos[0]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
# We need this since the new file format has a height and width
|
|
|
|
# in a different order.
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.body[i][:6] == "height":
|
|
|
|
height = document.body[i][6:]
|
2004-04-14 08:45:46 +00:00
|
|
|
# test for default value of 221 and convert it accordingly
|
2005-07-18 18:37:05 +00:00
|
|
|
if height == ' "0pt"' or height == ' "0"':
|
2004-04-14 08:45:46 +00:00
|
|
|
height = ' "1pt"'
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
else:
|
|
|
|
height = ' "1pt"'
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.body[i][:5] == "width":
|
|
|
|
width = document.body[i][5:]
|
|
|
|
del document.body[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
else:
|
|
|
|
width = ' "0"'
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.body[i][:9] == "collapsed":
|
|
|
|
if document.body[i][9:] == "true":
|
2006-07-01 19:16:09 +00:00
|
|
|
status = "collapsed"
|
2004-04-14 08:45:46 +00:00
|
|
|
else:
|
2006-07-01 19:16:09 +00:00
|
|
|
status = "open"
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
else:
|
2006-07-01 19:16:09 +00:00
|
|
|
status = "collapsed"
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-04-20 07:43:29 +00:00
|
|
|
# Handle special default case:
|
|
|
|
if height == ' "1pt"' and innerpos == 'c':
|
|
|
|
innerpos = 't'
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'inner_pos "' + innerpos + '"')
|
2006-04-20 07:43:29 +00:00
|
|
|
i = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'use_parbox 0')
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'width' + width)
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'special "none"')
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'height' + height)
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'height_special "totalheight"')
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'status ' + status)
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-02-13 07:48:26 +00:00
|
|
|
def convert_ertbackslash(body, i, ert, format, default_layout):
|
2006-08-02 14:19:22 +00:00
|
|
|
r""" -------------------------------------------------------------------------------------------
|
|
|
|
Convert backslashes and '\n' into valid ERT code, append the converted
|
|
|
|
text to body[i] and return the (maybe incremented) line index i"""
|
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
for c in ert:
|
2006-07-01 19:16:09 +00:00
|
|
|
if c == '\\':
|
|
|
|
body[i] = body[i] + '\\backslash '
|
|
|
|
i = i + 1
|
|
|
|
body.insert(i, '')
|
|
|
|
elif c == '\n':
|
2005-09-20 12:47:45 +00:00
|
|
|
if format <= 240:
|
|
|
|
body[i+1:i+1] = ['\\newline ', '']
|
|
|
|
i = i + 2
|
|
|
|
else:
|
2006-02-13 07:48:26 +00:00
|
|
|
body[i+1:i+1] = ['\\end_layout', '', '\\begin_layout %s' % default_layout, '']
|
2005-09-20 12:47:45 +00:00
|
|
|
i = i + 4
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
|
|
|
body[i] = body[i] + c
|
2004-04-14 08:45:46 +00:00
|
|
|
return i
|
|
|
|
|
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
def ert2latex(lines, format):
|
2006-08-02 14:19:22 +00:00
|
|
|
r""" Converts lines in ERT code to LaTeX
|
|
|
|
The surrounding \begin_layout ... \end_layout pair must not be included"""
|
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
backslash = re.compile(r'\\backslash\s*$')
|
|
|
|
newline = re.compile(r'\\newline\s*$')
|
|
|
|
if format <= 224:
|
|
|
|
begin_layout = re.compile(r'\\layout\s*\S+$')
|
|
|
|
else:
|
|
|
|
begin_layout = re.compile(r'\\begin_layout\s*\S+$')
|
|
|
|
end_layout = re.compile(r'\\end_layout\s*$')
|
|
|
|
ert = ''
|
|
|
|
for i in range(len(lines)):
|
|
|
|
line = backslash.sub('\\\\', lines[i])
|
|
|
|
if format <= 240:
|
|
|
|
if begin_layout.match(line):
|
|
|
|
line = '\n\n'
|
|
|
|
else:
|
|
|
|
line = newline.sub('\n', line)
|
|
|
|
else:
|
|
|
|
if begin_layout.match(line):
|
|
|
|
line = '\n'
|
|
|
|
if format > 224 and end_layout.match(line):
|
|
|
|
line = ''
|
|
|
|
ert = ert + line
|
|
|
|
return ert
|
|
|
|
|
|
|
|
|
2005-09-05 07:06:11 +00:00
|
|
|
def get_par_params(lines, i):
|
2006-08-02 14:19:22 +00:00
|
|
|
""" get all paragraph parameters. They can be all on one line or on several lines.
|
|
|
|
lines[i] must be the first parameter line"""
|
2005-09-05 07:06:11 +00:00
|
|
|
par_params = ('added_space_bottom', 'added_space_top', 'align',
|
|
|
|
'labelwidthstring', 'line_bottom', 'line_top', 'noindent',
|
|
|
|
'pagebreak_bottom', 'pagebreak_top', 'paragraph_spacing',
|
|
|
|
'start_of_appendix')
|
|
|
|
# We cannot check for '\\' only because paragraphs may start e.g.
|
|
|
|
# with '\\backslash'
|
|
|
|
params = ''
|
2006-08-02 15:45:44 +00:00
|
|
|
while lines[i][:1] == '\\' and lines[i][1:].split()[0] in par_params:
|
|
|
|
params = params + ' ' + lines[i].strip()
|
2006-07-01 19:16:09 +00:00
|
|
|
i = i + 1
|
2006-08-02 15:45:44 +00:00
|
|
|
return params.strip()
|
2005-09-05 07:06:11 +00:00
|
|
|
|
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
def lyxsize2latexsize(lyxsize):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Convert LyX font size to LaTeX fontsize. "
|
2005-09-20 12:47:45 +00:00
|
|
|
sizes = {"tiny" : "tiny", "scriptsize" : "scriptsize",
|
|
|
|
"footnotesize" : "footnotesize", "small" : "small",
|
|
|
|
"normal" : "normalsize", "large" : "large", "larger" : "Large",
|
|
|
|
"largest" : "LARGE", "huge" : "huge", "giant" : "Huge"}
|
|
|
|
if lyxsize in sizes:
|
|
|
|
return '\\' + sizes[lyxsize]
|
|
|
|
return ''
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_breaks(document):
|
|
|
|
""" Change vspace insets, page breaks and lyxlines to paragraph options
|
|
|
|
(if possible) or ERT"""
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
# Get default spaceamount
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.header, '\\defskip', 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
2005-09-05 07:06:11 +00:00
|
|
|
defskipamount = 'medskip'
|
2004-04-14 08:45:46 +00:00
|
|
|
else:
|
2006-08-02 15:45:44 +00:00
|
|
|
defskipamount = document.header[i].split()[1]
|
2005-09-05 07:06:11 +00:00
|
|
|
|
|
|
|
keys = {"\\begin_inset" : "vspace", "\\lyxline" : "lyxline",
|
|
|
|
"\\newpage" : "newpage"}
|
|
|
|
keywords_top = {"vspace" : "\\added_space_top", "lyxline" : "\\line_top",
|
|
|
|
"newpage" : "\\pagebreak_top"}
|
|
|
|
keywords_bot = {"vspace" : "\\added_space_bottom", "lyxline" : "\\line_bottom",
|
|
|
|
"newpage" : "\\pagebreak_bottom"}
|
|
|
|
tokens = ["\\begin_inset VSpace", "\\lyxline", "\\newpage"]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
# Convert the insets
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_tokens(document.body, tokens, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2005-09-05 07:06:11 +00:00
|
|
|
|
|
|
|
# Are we at the beginning of a paragraph?
|
|
|
|
paragraph_start = 1
|
2006-08-02 14:19:22 +00:00
|
|
|
this_par = get_paragraph(document.body, i, document.format - 1)
|
2005-09-20 12:47:45 +00:00
|
|
|
start = this_par + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
params = get_par_params(document.body, start)
|
2005-09-20 12:47:45 +00:00
|
|
|
size = "normal"
|
2005-09-05 07:06:11 +00:00
|
|
|
# Paragraph parameters may be on one or more lines.
|
|
|
|
# Find the start of the real paragraph text.
|
2006-08-02 15:45:44 +00:00
|
|
|
while document.body[start][:1] == '\\' and document.body[start].split()[0] in params:
|
2005-09-05 07:06:11 +00:00
|
|
|
start = start + 1
|
|
|
|
for k in range(start, i):
|
2006-08-02 15:45:44 +00:00
|
|
|
if document.body[k].find("\\size") != -1:
|
2005-09-20 12:47:45 +00:00
|
|
|
# store font size
|
2006-08-02 15:45:44 +00:00
|
|
|
size = document.body[k].split()[1]
|
2006-08-02 14:19:22 +00:00
|
|
|
elif is_nonempty_line(document.body[k]):
|
2005-09-05 07:06:11 +00:00
|
|
|
paragraph_start = 0
|
|
|
|
break
|
2005-09-20 12:47:45 +00:00
|
|
|
# Find the end of the real paragraph text.
|
2006-08-02 14:19:22 +00:00
|
|
|
next_par = get_next_paragraph(document.body, i, document.format - 1)
|
2005-09-20 12:47:45 +00:00
|
|
|
if next_par == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Missing next paragraph.")
|
2005-09-20 12:47:45 +00:00
|
|
|
i = i + 1
|
|
|
|
continue
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
# first line of our insets
|
|
|
|
inset_start = i
|
|
|
|
# last line of our insets
|
|
|
|
inset_end = inset_start
|
2005-09-05 07:06:11 +00:00
|
|
|
# Are we at the end of a paragraph?
|
|
|
|
paragraph_end = 1
|
2005-09-20 12:47:45 +00:00
|
|
|
# start and end line numbers to delete if we convert this inset
|
|
|
|
del_lines = list()
|
|
|
|
# is this inset a lyxline above a paragraph?
|
|
|
|
top = list()
|
|
|
|
# raw inset information
|
|
|
|
lines = list()
|
|
|
|
# name of this inset
|
|
|
|
insets = list()
|
|
|
|
# font size of this inset
|
|
|
|
sizes = list()
|
|
|
|
|
|
|
|
# Detect subsequent lyxline, vspace and pagebreak insets created by convert_breaks()
|
|
|
|
n = 0
|
|
|
|
k = inset_start
|
|
|
|
while k < next_par:
|
2006-08-02 14:19:22 +00:00
|
|
|
if find_tokens(document.body, tokens, k) == k:
|
2005-09-20 12:47:45 +00:00
|
|
|
# inset to convert
|
2006-08-02 15:45:44 +00:00
|
|
|
lines.append(document.body[k].split())
|
2005-09-20 12:47:45 +00:00
|
|
|
insets.append(keys[lines[n][0]])
|
|
|
|
del_lines.append([k, k])
|
|
|
|
top.append(0)
|
|
|
|
sizes.append(size)
|
|
|
|
n = n + 1
|
|
|
|
inset_end = k
|
2006-08-02 15:45:44 +00:00
|
|
|
elif document.body[k].find("\\size") != -1:
|
2005-09-20 12:47:45 +00:00
|
|
|
# store font size
|
2006-08-02 15:45:44 +00:00
|
|
|
size = document.body[k].split()[1]
|
2006-08-02 14:19:22 +00:00
|
|
|
elif find_token(document.body, "\\begin_inset ERT", k) == k:
|
|
|
|
ert_begin = find_token(document.body, "\\layout", k) + 1
|
2005-09-20 12:47:45 +00:00
|
|
|
if ert_begin == 0:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Missing '\\layout'.")
|
2005-09-20 12:47:45 +00:00
|
|
|
continue
|
2006-08-02 14:19:22 +00:00
|
|
|
ert_end = find_end_of_inset(document.body, k)
|
2005-09-20 12:47:45 +00:00
|
|
|
if ert_end == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Missing '\\end_inset'.")
|
2005-09-20 12:47:45 +00:00
|
|
|
continue
|
2006-08-02 14:19:22 +00:00
|
|
|
ert = ert2latex(document.body[ert_begin:ert_end], document.format - 1)
|
2005-09-20 12:47:45 +00:00
|
|
|
if (n > 0 and insets[n - 1] == "lyxline" and
|
|
|
|
ert == '\\vspace{-1\\parskip}\n'):
|
|
|
|
# vspace ERT created by convert_breaks() for top lyxline
|
|
|
|
top[n - 1] = 1
|
|
|
|
del_lines[n - 1][1] = ert_end
|
|
|
|
inset_end = ert_end
|
|
|
|
k = ert_end
|
|
|
|
else:
|
|
|
|
paragraph_end = 0
|
|
|
|
break
|
|
|
|
elif (n > 0 and insets[n - 1] == "vspace" and
|
2006-08-02 14:19:22 +00:00
|
|
|
find_token(document.body, "\\end_inset", k) == k):
|
2005-09-20 12:47:45 +00:00
|
|
|
# ignore end of vspace inset
|
|
|
|
del_lines[n - 1][1] = k
|
|
|
|
inset_end = k
|
2006-08-02 14:19:22 +00:00
|
|
|
elif is_nonempty_line(document.body[k]):
|
2005-09-05 07:06:11 +00:00
|
|
|
paragraph_end = 0
|
|
|
|
break
|
2005-09-20 12:47:45 +00:00
|
|
|
k = k + 1
|
2005-09-05 07:06:11 +00:00
|
|
|
|
|
|
|
# Determine space amount for vspace insets
|
|
|
|
spaceamount = list()
|
|
|
|
arguments = list()
|
2005-09-20 12:47:45 +00:00
|
|
|
for k in range(n):
|
2005-09-05 07:06:11 +00:00
|
|
|
if insets[k] == "vspace":
|
|
|
|
spaceamount.append(lines[k][2])
|
|
|
|
arguments.append(' ' + spaceamount[k] + ' ')
|
|
|
|
else:
|
|
|
|
spaceamount.append('')
|
|
|
|
arguments.append(' ')
|
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
# Can we convert to top paragraph parameters?
|
|
|
|
before = 0
|
|
|
|
if ((n == 3 and insets[0] == "newpage" and insets[1] == "vspace" and
|
|
|
|
insets[2] == "lyxline" and top[2]) or
|
|
|
|
(n == 2 and
|
|
|
|
((insets[0] == "newpage" and insets[1] == "vspace") or
|
|
|
|
(insets[0] == "newpage" and insets[1] == "lyxline" and top[1]) or
|
|
|
|
(insets[0] == "vspace" and insets[1] == "lyxline" and top[1]))) or
|
|
|
|
(n == 1 and insets[0] == "lyxline" and top[0])):
|
|
|
|
# These insets have been created before a paragraph by
|
|
|
|
# convert_breaks()
|
|
|
|
before = 1
|
|
|
|
|
|
|
|
# Can we convert to bottom paragraph parameters?
|
|
|
|
after = 0
|
|
|
|
if ((n == 3 and insets[0] == "lyxline" and not top[0] and
|
|
|
|
insets[1] == "vspace" and insets[2] == "newpage") or
|
|
|
|
(n == 2 and
|
|
|
|
((insets[0] == "lyxline" and not top[0] and insets[1] == "vspace") or
|
|
|
|
(insets[0] == "lyxline" and not top[0] and insets[1] == "newpage") or
|
|
|
|
(insets[0] == "vspace" and insets[1] == "newpage"))) or
|
|
|
|
(n == 1 and insets[0] == "lyxline" and not top[0])):
|
|
|
|
# These insets have been created after a paragraph by
|
|
|
|
# convert_breaks()
|
|
|
|
after = 1
|
|
|
|
|
2005-09-05 07:06:11 +00:00
|
|
|
if paragraph_start and paragraph_end:
|
|
|
|
# We are in a paragraph of our own.
|
|
|
|
# We must not delete this paragraph if it has parameters
|
|
|
|
if params == '':
|
|
|
|
# First try to merge with the previous paragraph.
|
|
|
|
# We try the previous paragraph first because we would
|
|
|
|
# otherwise need ERT for two subsequent vspaces.
|
2006-08-02 14:19:22 +00:00
|
|
|
prev_par = get_paragraph(document.body, this_par - 1, document.format - 1) + 1
|
2005-09-05 07:06:11 +00:00
|
|
|
if prev_par > 0 and not before:
|
2006-08-02 14:19:22 +00:00
|
|
|
prev_params = get_par_params(document.body, prev_par + 1)
|
2005-09-05 07:06:11 +00:00
|
|
|
ert = 0
|
2005-09-20 12:47:45 +00:00
|
|
|
# determine font size
|
|
|
|
prev_size = "normal"
|
|
|
|
k = prev_par + 1
|
2006-08-02 15:45:44 +00:00
|
|
|
while document.body[k][:1] == '\\' and document.body[k].split()[0] in prev_params:
|
2005-09-20 12:47:45 +00:00
|
|
|
k = k + 1
|
|
|
|
while k < this_par:
|
2006-08-02 15:45:44 +00:00
|
|
|
if document.body[k].find("\\size") != -1:
|
|
|
|
prev_size = document.body[k].split()[1]
|
2005-09-20 12:47:45 +00:00
|
|
|
break
|
2006-08-02 15:45:44 +00:00
|
|
|
elif document.body[k].find("\\begin_inset") != -1:
|
2005-09-20 12:47:45 +00:00
|
|
|
# skip insets
|
2006-08-02 14:19:22 +00:00
|
|
|
k = find_end_of_inset(document.body, k)
|
|
|
|
elif is_nonempty_line(document.body[k]):
|
2005-09-20 12:47:45 +00:00
|
|
|
break
|
|
|
|
k = k + 1
|
2005-09-05 07:06:11 +00:00
|
|
|
for k in range(n):
|
2005-09-20 12:47:45 +00:00
|
|
|
if (keywords_bot[insets[k]] in prev_params or
|
|
|
|
(insets[k] == "lyxline" and sizes[k] != prev_size)):
|
2005-09-05 07:06:11 +00:00
|
|
|
ert = 1
|
|
|
|
break
|
|
|
|
if not ert:
|
2005-09-20 12:47:45 +00:00
|
|
|
for k in range(n):
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(prev_par + 1,
|
2005-09-05 07:06:11 +00:00
|
|
|
keywords_bot[insets[k]] + arguments[k])
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[this_par+n:next_par-1+n]
|
2005-09-20 12:47:45 +00:00
|
|
|
i = this_par + n
|
2005-09-05 07:06:11 +00:00
|
|
|
continue
|
|
|
|
# Then try next paragraph
|
|
|
|
if next_par > 0 and not after:
|
2006-08-02 14:19:22 +00:00
|
|
|
next_params = get_par_params(document.body, next_par + 1)
|
2005-09-05 07:06:11 +00:00
|
|
|
ert = 0
|
2006-08-02 15:45:44 +00:00
|
|
|
while document.body[k][:1] == '\\' and document.body[k].split()[0] in next_params:
|
2005-09-20 12:47:45 +00:00
|
|
|
k = k + 1
|
|
|
|
# determine font size
|
|
|
|
next_size = "normal"
|
|
|
|
k = next_par + 1
|
|
|
|
while k < this_par:
|
2006-08-02 15:45:44 +00:00
|
|
|
if document.body[k].find("\\size") != -1:
|
|
|
|
next_size = document.body[k].split()[1]
|
2005-09-20 12:47:45 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif is_nonempty_line(document.body[k]):
|
2005-09-20 12:47:45 +00:00
|
|
|
break
|
|
|
|
k = k + 1
|
2005-09-05 07:06:11 +00:00
|
|
|
for k in range(n):
|
2005-09-20 12:47:45 +00:00
|
|
|
if (keywords_top[insets[k]] in next_params or
|
|
|
|
(insets[k] == "lyxline" and sizes[k] != next_size)):
|
2005-09-05 07:06:11 +00:00
|
|
|
ert = 1
|
|
|
|
break
|
|
|
|
if not ert:
|
2005-09-20 12:47:45 +00:00
|
|
|
for k in range(n):
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(next_par + 1,
|
2005-09-05 07:06:11 +00:00
|
|
|
keywords_top[insets[k]] + arguments[k])
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[this_par:next_par-1]
|
2005-09-20 12:47:45 +00:00
|
|
|
i = this_par
|
2005-09-05 07:06:11 +00:00
|
|
|
continue
|
2005-09-20 12:47:45 +00:00
|
|
|
elif paragraph_start or paragraph_end:
|
2005-09-05 07:06:11 +00:00
|
|
|
# Convert to paragraph formatting if we are at the beginning or end
|
|
|
|
# of a paragraph and the resulting paragraph would not be empty
|
|
|
|
# The order is important: del and insert invalidate some indices
|
|
|
|
if paragraph_start:
|
2005-09-20 12:47:45 +00:00
|
|
|
keywords = keywords_top
|
|
|
|
else:
|
|
|
|
keywords = keywords_bot
|
|
|
|
ert = 0
|
|
|
|
for k in range(n):
|
|
|
|
if keywords[insets[k]] in params:
|
|
|
|
ert = 1
|
|
|
|
break
|
|
|
|
if not ert:
|
|
|
|
for k in range(n):
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(this_par + 1,
|
2005-09-20 12:47:45 +00:00
|
|
|
keywords[insets[k]] + arguments[k])
|
|
|
|
for j in range(k, n):
|
|
|
|
del_lines[j][0] = del_lines[j][0] + 1
|
|
|
|
del_lines[j][1] = del_lines[j][1] + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[del_lines[k][0]:del_lines[k][1]+1]
|
2005-09-20 12:47:45 +00:00
|
|
|
deleted = del_lines[k][1] - del_lines[k][0] + 1
|
|
|
|
for j in range(k + 1, n):
|
|
|
|
del_lines[j][0] = del_lines[j][0] - deleted
|
|
|
|
del_lines[j][1] = del_lines[j][1] - deleted
|
|
|
|
i = this_par
|
2005-09-05 07:06:11 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# Convert the first inset to ERT.
|
|
|
|
# The others are converted in the next loop runs (if they exist)
|
|
|
|
if insets[0] == "vspace":
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i+1] = ['\\begin_inset ERT', 'status Collapsed', '',
|
|
|
|
'\\layout %s' % document.default_layout, '', '\\backslash ']
|
2005-09-05 07:06:11 +00:00
|
|
|
i = i + 6
|
|
|
|
if spaceamount[0][-1] == '*':
|
|
|
|
spaceamount[0] = spaceamount[0][:-1]
|
|
|
|
keep = 1
|
|
|
|
else:
|
|
|
|
keep = 0
|
|
|
|
|
|
|
|
# Replace defskip by the actual value
|
|
|
|
if spaceamount[0] == 'defskip':
|
|
|
|
spaceamount[0] = defskipamount
|
|
|
|
|
|
|
|
# LaTeX does not know \\smallskip* etc
|
|
|
|
if keep:
|
|
|
|
if spaceamount[0] == 'smallskip':
|
|
|
|
spaceamount[0] = '\\smallskipamount'
|
|
|
|
elif spaceamount[0] == 'medskip':
|
|
|
|
spaceamount[0] = '\\medskipamount'
|
|
|
|
elif spaceamount[0] == 'bigskip':
|
|
|
|
spaceamount[0] = '\\bigskipamount'
|
|
|
|
elif spaceamount[0] == 'vfill':
|
|
|
|
spaceamount[0] = '\\fill'
|
|
|
|
|
|
|
|
# Finally output the LaTeX code
|
|
|
|
if (spaceamount[0] == 'smallskip' or spaceamount[0] == 'medskip' or
|
|
|
|
spaceamount[0] == 'bigskip' or spaceamount[0] == 'vfill'):
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, spaceamount[0] + '{}')
|
2005-09-05 07:06:11 +00:00
|
|
|
else :
|
|
|
|
if keep:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'vspace*{')
|
2005-09-05 07:06:11 +00:00
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(i, 'vspace{')
|
|
|
|
i = convert_ertbackslash(document.body, i, spaceamount[0], document.format - 1, document.default_layout)
|
|
|
|
document.body[i] = document.body[i] + '}'
|
2005-09-05 07:06:11 +00:00
|
|
|
i = i + 1
|
|
|
|
elif insets[0] == "lyxline":
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = ''
|
2005-09-20 12:47:45 +00:00
|
|
|
latexsize = lyxsize2latexsize(size)
|
|
|
|
if latexsize == '':
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Could not convert LyX fontsize '%s' to LaTeX font size." % size)
|
2005-09-20 12:47:45 +00:00
|
|
|
latexsize = '\\normalsize'
|
2006-08-02 14:19:22 +00:00
|
|
|
i = insert_ert(document.body, i, 'Collapsed',
|
2005-09-20 12:47:45 +00:00
|
|
|
'\\lyxline{%s}' % latexsize,
|
2006-08-02 14:19:22 +00:00
|
|
|
document.format - 1, document.default_layout)
|
2005-09-05 07:06:11 +00:00
|
|
|
# We use \providecommand so that we don't get an error if native
|
|
|
|
# lyxlines are used (LyX writes first its own preamble and then
|
|
|
|
# the user specified one)
|
2006-08-02 14:19:22 +00:00
|
|
|
add_to_preamble(document,
|
2005-09-05 07:06:11 +00:00
|
|
|
['% Commands inserted by lyx2lyx for lyxlines',
|
|
|
|
'\\providecommand{\\lyxline}[1]{',
|
|
|
|
' {#1 \\vspace{1ex} \\hrule width \\columnwidth \\vspace{1ex}}'
|
|
|
|
'}'])
|
|
|
|
elif insets[0] == "newpage":
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = ''
|
|
|
|
i = insert_ert(document.body, i, 'Collapsed', '\\newpage{}',
|
|
|
|
document.format - 1, document.default_layout)
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2005-02-10 12:14:51 +00:00
|
|
|
# Convert a LyX length into a LaTeX length
|
|
|
|
def convert_len(len, special):
|
2004-04-14 08:45:46 +00:00
|
|
|
units = {"text%":"\\textwidth", "col%":"\\columnwidth",
|
|
|
|
"page%":"\\pagewidth", "line%":"\\linewidth",
|
|
|
|
"theight%":"\\textheight", "pheight%":"\\pageheight"}
|
|
|
|
|
|
|
|
# Convert special lengths
|
|
|
|
if special != 'none':
|
2006-07-01 19:16:09 +00:00
|
|
|
len = '%f\\' % len2value(len) + special
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
# Convert LyX units to LaTeX units
|
|
|
|
for unit in units.keys():
|
2006-08-02 15:45:44 +00:00
|
|
|
if len.find(unit) != -1:
|
2006-07-01 19:16:09 +00:00
|
|
|
len = '%f' % (len2value(len) / 100) + units[unit]
|
|
|
|
break
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2005-02-10 12:14:51 +00:00
|
|
|
return len
|
|
|
|
|
|
|
|
|
2006-02-13 07:48:26 +00:00
|
|
|
def convert_ertlen(body, i, len, special, format, default_layout):
|
2006-08-02 14:19:22 +00:00
|
|
|
""" Convert a LyX length into valid ERT code and append it to body[i]
|
|
|
|
Return the (maybe incremented) line index i
|
|
|
|
Convert backslashes and insert the converted length into body. """
|
2006-02-13 07:48:26 +00:00
|
|
|
return convert_ertbackslash(body, i, convert_len(len, special), format, default_layout)
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
def len2value(len):
|
2006-08-02 14:19:22 +00:00
|
|
|
" Return the value of len without the unit in numerical form. "
|
2004-04-14 08:45:46 +00:00
|
|
|
result = re.search('([+-]?[0-9.]+)', len)
|
|
|
|
if result:
|
2006-07-01 19:16:09 +00:00
|
|
|
return float(result.group(1))
|
2004-04-14 08:45:46 +00:00
|
|
|
# No number means 1.0
|
|
|
|
return 1.0
|
|
|
|
|
|
|
|
|
2006-02-13 07:48:26 +00:00
|
|
|
def insert_ert(body, i, status, text, format, default_layout):
|
2006-08-02 14:19:22 +00:00
|
|
|
""" Convert text to ERT and insert it at body[i]
|
|
|
|
Return the index of the line after the inserted ERT"""
|
|
|
|
|
2005-09-20 12:47:45 +00:00
|
|
|
body[i:i] = ['\\begin_inset ERT', 'status ' + status, '']
|
|
|
|
i = i + 3
|
|
|
|
if format <= 224:
|
2006-02-13 07:48:26 +00:00
|
|
|
body[i:i] = ['\\layout %s' % default_layout, '']
|
2005-09-20 12:47:45 +00:00
|
|
|
else:
|
2006-02-13 07:48:26 +00:00
|
|
|
body[i:i] = ['\\begin_layout %s' % default_layout, '']
|
2005-09-20 12:47:45 +00:00
|
|
|
i = i + 1 # i points now to the just created empty line
|
2006-02-13 07:48:26 +00:00
|
|
|
i = convert_ertbackslash(body, i, text, format, default_layout) + 1
|
2005-09-20 12:47:45 +00:00
|
|
|
if format > 224:
|
|
|
|
body[i:i] = ['\\end_layout']
|
|
|
|
i = i + 1
|
2005-02-10 12:14:51 +00:00
|
|
|
body[i:i] = ['', '\\end_inset', '']
|
|
|
|
i = i + 3
|
|
|
|
return i
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def add_to_preamble(document, text):
|
|
|
|
""" Add text to the preamble if it is not already there.
|
|
|
|
Only the first line is checked!"""
|
|
|
|
|
|
|
|
if find_token(document.preamble, text[0], 0) != -1:
|
2005-02-10 12:14:51 +00:00
|
|
|
return
|
2005-07-12 21:27:12 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.preamble.extend(text)
|
2005-02-10 12:14:51 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_frameless_box(document):
|
|
|
|
" Convert frameless box."
|
2004-04-14 08:45:46 +00:00
|
|
|
pos = ['t', 'c', 'b']
|
|
|
|
inner_pos = ['c', 't', 'b', 's']
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, '\\begin_inset Frameless', i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
2006-07-01 19:16:09 +00:00
|
|
|
if j == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Missing '\\end_inset'.")
|
2006-07-01 19:16:09 +00:00
|
|
|
i = i + 1
|
|
|
|
continue
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
2006-07-01 19:16:09 +00:00
|
|
|
j = j - 1
|
|
|
|
|
|
|
|
# Gather parameters
|
|
|
|
params = {'position':0, 'hor_pos':'c', 'has_inner_box':'1',
|
2005-07-12 21:27:12 +00:00
|
|
|
'inner_pos':1, 'use_parbox':'0', 'width':'100col%',
|
2006-07-01 19:16:09 +00:00
|
|
|
'special':'none', 'height':'1in',
|
|
|
|
'height_special':'totalheight', 'collapsed':'false'}
|
|
|
|
for key in params.keys():
|
2006-08-02 15:45:44 +00:00
|
|
|
value = get_value(document.body, key, i, j).replace('"', '')
|
2006-07-01 19:16:09 +00:00
|
|
|
if value != "":
|
|
|
|
if key == 'position':
|
|
|
|
# convert new to old position: 'position "t"' -> 0
|
|
|
|
value = find_token(pos, value, 0)
|
|
|
|
if value != -1:
|
|
|
|
params[key] = value
|
|
|
|
elif key == 'inner_pos':
|
|
|
|
# convert inner position
|
|
|
|
value = find_token(inner_pos, value, 0)
|
|
|
|
if value != -1:
|
|
|
|
params[key] = value
|
|
|
|
else:
|
|
|
|
params[key] = value
|
2006-08-02 14:19:22 +00:00
|
|
|
j = del_token(document.body, key, i, j)
|
2006-07-01 19:16:09 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
# Convert to minipage or ERT?
|
|
|
|
# Note that the inner_position and height parameters of a minipage
|
|
|
|
# inset are ignored and not accessible for the user, although they
|
|
|
|
# are present in the file format and correctly read in and written.
|
|
|
|
# Therefore we convert to ERT if they do not have their LaTeX
|
|
|
|
# defaults. These are:
|
|
|
|
# - the value of "position" for "inner_pos"
|
|
|
|
# - "\totalheight" for "height"
|
|
|
|
if (params['use_parbox'] != '0' or
|
|
|
|
params['has_inner_box'] != '1' or
|
|
|
|
params['special'] != 'none' or
|
|
|
|
params['height_special'] != 'totalheight' or
|
|
|
|
len2value(params['height']) != 1.0):
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2005-02-10 12:14:51 +00:00
|
|
|
# Here we know that this box is not supported in file format 224.
|
|
|
|
# Therefore we need to convert it to ERT. We can't simply convert
|
|
|
|
# the beginning and end of the box to ERT, because the
|
|
|
|
# box inset may contain layouts that are different from the
|
|
|
|
# surrounding layout. After the conversion the contents of the
|
|
|
|
# box inset is on the same level as the surrounding text, and
|
|
|
|
# paragraph layouts and align parameters can get mixed up.
|
|
|
|
|
|
|
|
# A possible solution for this problem:
|
|
|
|
# Convert the box to a minipage and redefine the minipage
|
|
|
|
# environment in ERT so that the original box is simulated.
|
|
|
|
# For minipages we could do this in a way that the width and
|
|
|
|
# position can still be set from LyX, but this did not work well.
|
|
|
|
# This is not possible for parboxes either, so we convert the
|
|
|
|
# original box to ERT, put the minipage inset inside the box
|
|
|
|
# and redefine the minipage environment to be empty.
|
|
|
|
|
|
|
|
# Commands that are independant of a particular box can go to
|
|
|
|
# the preamble.
|
|
|
|
# We need to define lyxtolyxrealminipage with 3 optional
|
|
|
|
# arguments although LyX 1.3 uses only the first one.
|
|
|
|
# Otherwise we will get LaTeX errors if this document is
|
|
|
|
# converted to format 225 or above again (LyX 1.4 uses all
|
|
|
|
# optional arguments).
|
2006-08-02 14:19:22 +00:00
|
|
|
add_to_preamble(document,
|
2005-02-10 12:14:51 +00:00
|
|
|
['% Commands inserted by lyx2lyx for frameless boxes',
|
|
|
|
'% Save the original minipage environment',
|
|
|
|
'\\let\\lyxtolyxrealminipage\\minipage',
|
|
|
|
'\\let\\endlyxtolyxrealminipage\\endminipage',
|
|
|
|
'% Define an empty lyxtolyximinipage environment',
|
|
|
|
'% with 3 optional arguments',
|
|
|
|
'\\newenvironment{lyxtolyxiiiminipage}[4]{}{}',
|
|
|
|
'\\newenvironment{lyxtolyxiiminipage}[2][\\lyxtolyxargi]%',
|
|
|
|
' {\\begin{lyxtolyxiiiminipage}{\\lyxtolyxargi}{\\lyxtolyxargii}{#1}{#2}}%',
|
|
|
|
' {\\end{lyxtolyxiiiminipage}}',
|
|
|
|
'\\newenvironment{lyxtolyximinipage}[1][\\totalheight]%',
|
|
|
|
' {\\def\\lyxtolyxargii{{#1}}\\begin{lyxtolyxiiminipage}}%',
|
|
|
|
' {\\end{lyxtolyxiiminipage}}',
|
|
|
|
'\\newenvironment{lyxtolyxminipage}[1][c]%',
|
|
|
|
' {\\def\\lyxtolyxargi{{#1}}\\begin{lyxtolyximinipage}}',
|
|
|
|
' {\\end{lyxtolyximinipage}}'])
|
|
|
|
|
|
|
|
if params['use_parbox'] != '0':
|
|
|
|
ert = '\\parbox'
|
2004-04-14 08:45:46 +00:00
|
|
|
else:
|
2005-02-10 12:14:51 +00:00
|
|
|
ert = '\\begin{lyxtolyxrealminipage}'
|
|
|
|
|
|
|
|
# convert optional arguments only if not latex default
|
|
|
|
if (pos[params['position']] != 'c' or
|
|
|
|
inner_pos[params['inner_pos']] != pos[params['position']] or
|
|
|
|
params['height_special'] != 'totalheight' or
|
|
|
|
len2value(params['height']) != 1.0):
|
|
|
|
ert = ert + '[' + pos[params['position']] + ']'
|
|
|
|
if (inner_pos[params['inner_pos']] != pos[params['position']] or
|
|
|
|
params['height_special'] != 'totalheight' or
|
|
|
|
len2value(params['height']) != 1.0):
|
|
|
|
ert = ert + '[' + convert_len(params['height'],
|
|
|
|
params['height_special']) + ']'
|
|
|
|
if inner_pos[params['inner_pos']] != pos[params['position']]:
|
|
|
|
ert = ert + '[' + inner_pos[params['inner_pos']] + ']'
|
|
|
|
|
|
|
|
ert = ert + '{' + convert_len(params['width'],
|
|
|
|
params['special']) + '}'
|
|
|
|
|
|
|
|
if params['use_parbox'] != '0':
|
|
|
|
ert = ert + '{'
|
|
|
|
ert = ert + '\\let\\minipage\\lyxtolyxminipage%\n'
|
|
|
|
ert = ert + '\\let\\endminipage\\endlyxtolyxminipage%\n'
|
|
|
|
|
|
|
|
old_i = i
|
2006-08-02 14:19:22 +00:00
|
|
|
i = insert_ert(document.body, i, 'Collapsed', ert, document.format - 1, document.default_layout)
|
2005-02-10 12:14:51 +00:00
|
|
|
j = j + i - old_i - 1
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i] = ['\\begin_inset Minipage',
|
2005-02-10 12:14:51 +00:00
|
|
|
'position %d' % params['position'],
|
|
|
|
'inner_position 1',
|
|
|
|
'height "1in"',
|
|
|
|
'width "' + params['width'] + '"',
|
|
|
|
'collapsed ' + params['collapsed']]
|
|
|
|
i = i + 6
|
|
|
|
j = j + 6
|
|
|
|
|
|
|
|
# Restore the original minipage environment since we may have
|
|
|
|
# minipages inside this box.
|
|
|
|
# Start a new paragraph because the following may be nonstandard
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i] = ['\\layout %s' % document.default_layout, '', '']
|
2005-02-10 12:14:51 +00:00
|
|
|
i = i + 2
|
|
|
|
j = j + 3
|
|
|
|
ert = '\\let\\minipage\\lyxtolyxrealminipage%\n'
|
|
|
|
ert = ert + '\\let\\endminipage\\lyxtolyxrealendminipage%'
|
|
|
|
old_i = i
|
2006-08-02 14:19:22 +00:00
|
|
|
i = insert_ert(document.body, i, 'Collapsed', ert, document.format - 1, document.default_layout)
|
2005-02-10 12:14:51 +00:00
|
|
|
j = j + i - old_i - 1
|
|
|
|
|
|
|
|
# Redefine the minipage end before the inset end.
|
|
|
|
# Start a new paragraph because the previous may be nonstandard
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[j:j] = ['\\layout %s' % document.default_layout, '', '']
|
2005-02-10 12:14:51 +00:00
|
|
|
j = j + 2
|
|
|
|
ert = '\\let\\endminipage\\endlyxtolyxminipage'
|
2006-08-02 14:19:22 +00:00
|
|
|
j = insert_ert(document.body, j, 'Collapsed', ert, document.format - 1, document.default_layout)
|
2006-07-01 19:16:09 +00:00
|
|
|
j = j + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body.insert(j, '')
|
2006-07-01 19:16:09 +00:00
|
|
|
j = j + 1
|
2005-02-10 12:14:51 +00:00
|
|
|
|
|
|
|
# LyX writes '%\n' after each box. Therefore we need to end our
|
|
|
|
# ERT with '%\n', too, since this may swallow a following space.
|
|
|
|
if params['use_parbox'] != '0':
|
|
|
|
ert = '}%\n'
|
|
|
|
else:
|
|
|
|
ert = '\\end{lyxtolyxrealminipage}%\n'
|
2006-08-02 14:19:22 +00:00
|
|
|
j = insert_ert(document.body, j, 'Collapsed', ert, document.format - 1, document.default_layout)
|
2005-02-10 12:14:51 +00:00
|
|
|
|
|
|
|
# We don't need to restore the original minipage after the inset
|
|
|
|
# end because the scope of the redefinition is the original box.
|
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
else:
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-07-01 19:16:09 +00:00
|
|
|
# Convert to minipage
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i:i] = ['\\begin_inset Minipage',
|
2006-07-01 19:16:09 +00:00
|
|
|
'position %d' % params['position'],
|
|
|
|
'inner_position %d' % params['inner_pos'],
|
|
|
|
'height "' + params['height'] + '"',
|
|
|
|
'width "' + params['width'] + '"',
|
|
|
|
'collapsed ' + params['collapsed']]
|
|
|
|
i = i + 6
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2006-02-02 21:30:04 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_branches(document):
|
|
|
|
" Remove branches. "
|
2006-02-02 21:30:04 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.header, "\\branch", i)
|
2006-02-02 21:30:04 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 15:45:44 +00:00
|
|
|
document.warning("Removing branch %s." % document.header[i].split()[1])
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_token(document.header, "\\end_branch", i)
|
2006-02-02 21:30:04 +00:00
|
|
|
if j == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Missing '\\end_branch'.")
|
2006-02-02 21:30:04 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i:j+1]
|
2006-02-02 21:30:04 +00:00
|
|
|
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Branch", i)
|
2006-02-02 21:30:04 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
2006-02-02 21:30:04 +00:00
|
|
|
if j == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Missing '\\end_inset'.")
|
2006-02-02 21:30:04 +00:00
|
|
|
i = i + 1
|
|
|
|
continue
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
|
|
|
del document.body[j - 1]
|
2006-02-02 21:30:04 +00:00
|
|
|
# Seach for a line starting 'collapsed'
|
|
|
|
# If, however, we find a line starting '\layout'
|
|
|
|
# (_always_ present) then break with a warning message
|
|
|
|
collapsed_found = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
if (document.body[i][:9] == "collapsed"):
|
|
|
|
del document.body[i]
|
2006-02-02 21:30:04 +00:00
|
|
|
collapsed_found = 1
|
|
|
|
continue
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i][:7] == "\\layout"):
|
2006-02-02 21:30:04 +00:00
|
|
|
if collapsed_found == 0:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed LyX document: Missing 'collapsed'.")
|
2006-02-02 21:30:04 +00:00
|
|
|
# Delete this new paragraph, since it would not appear in
|
|
|
|
# .tex output. This avoids also empty paragraphs.
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
2006-02-02 21:30:04 +00:00
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_jurabib(document):
|
|
|
|
" Convert jurabib. "
|
|
|
|
i = find_token(document.header, '\\use_numerical_citations', 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\use_numerical_citations'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header.insert(i + 1, '\\use_jurabib 0')
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_jurabib(document):
|
|
|
|
" Revert jurabib. "
|
|
|
|
i = find_token(document.header, '\\use_jurabib', 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\use_jurabib'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
if get_value(document.header, '\\use_jurabib', 0) != "0":
|
|
|
|
document.warning("Conversion of '\\use_jurabib = 1' not yet implemented.")
|
2004-04-14 08:45:46 +00:00
|
|
|
# Don't remove '\\use_jurabib' so that people will get warnings by lyx
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_bibtopic(document):
|
|
|
|
" Convert bibtopic. "
|
|
|
|
i = find_token(document.header, '\\use_jurabib', 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\use_jurabib'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header.insert(i + 1, '\\use_bibtopic 0')
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_bibtopic(document):
|
|
|
|
" Revert bibtopic. "
|
|
|
|
i = find_token(document.header, '\\use_bibtopic', 0)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\use_bibtopic'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
if get_value(document.header, '\\use_bibtopic', 0) != "0":
|
|
|
|
document.warning("Conversion of '\\use_bibtopic = 1' not yet implemented.")
|
2004-04-14 08:45:46 +00:00
|
|
|
# Don't remove '\\use_jurabib' so that people will get warnings by lyx
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_float(document):
|
|
|
|
" Convert sideway floats. "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token_exact(document.body, '\\begin_inset Float', i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# Seach for a line starting 'wide'
|
|
|
|
# If, however, we find a line starting '\begin_layout'
|
|
|
|
# (_always_ present) then break with a warning message
|
|
|
|
i = i + 1
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
if (document.body[i][:4] == "wide"):
|
|
|
|
document.body.insert(i + 1, 'sideways false')
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
elif (document.body[i][:13] == "\\begin_layout"):
|
|
|
|
document.warning("Malformed lyx document: Missing 'wide'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_float(document):
|
|
|
|
" Revert sideway floats. "
|
2004-04-14 08:45:46 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token_exact(document.body, '\\begin_inset Float', i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
2004-04-14 08:45:46 +00:00
|
|
|
if j == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\end_inset'.")
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
continue
|
2006-08-02 14:19:22 +00:00
|
|
|
if get_value(document.body, 'sideways', i, j) != "false":
|
|
|
|
document.warning("Conversion of 'sideways true' not yet implemented.")
|
2004-04-14 08:45:46 +00:00
|
|
|
# Don't remove 'sideways' so that people will get warnings by lyx
|
|
|
|
i = i + 1
|
|
|
|
continue
|
2006-08-02 14:19:22 +00:00
|
|
|
del_token(document.body, 'sideways', i, j)
|
2004-04-14 08:45:46 +00:00
|
|
|
i = i + 1
|
|
|
|
|
2004-05-11 16:13:33 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_graphics(document):
|
|
|
|
""" Add extension to documentnames of insetgraphics if necessary.
|
2004-04-29 09:24:29 +00:00
|
|
|
"""
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Graphics", i)
|
2004-04-29 09:24:29 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_token_exact(document.body, "documentname", i)
|
2004-04-29 09:24:29 +00:00
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
i = i + 1
|
2006-08-02 15:45:44 +00:00
|
|
|
filename = document.body[j].split()[1]
|
2006-08-02 14:19:22 +00:00
|
|
|
absname = os.path.normpath(os.path.join(document.dir, filename))
|
|
|
|
if document.input == stdin and not os.path.isabs(filename):
|
|
|
|
# We don't know the directory and cannot check the document.
|
2006-07-01 19:16:09 +00:00
|
|
|
# We could use a heuristic and take the current directory,
|
2006-08-02 14:19:22 +00:00
|
|
|
# and we could try to find out if documentname has an extension,
|
2006-07-01 19:16:09 +00:00
|
|
|
# but that would be just guesses and could be wrong.
|
2006-11-19 12:32:38 +00:00
|
|
|
document.warning("""Warning: Cannot determine whether document
|
2004-04-29 09:24:29 +00:00
|
|
|
%s
|
|
|
|
needs an extension when reading from standard input.
|
2006-08-02 14:19:22 +00:00
|
|
|
You may need to correct the document manually or run
|
|
|
|
lyx2lyx again with the .lyx document as commandline argument.""" % filename)
|
2006-07-01 19:16:09 +00:00
|
|
|
continue
|
|
|
|
# This needs to be the same algorithm as in pre 233 insetgraphics
|
|
|
|
if access(absname, F_OK):
|
|
|
|
continue
|
|
|
|
if access(absname + ".ps", F_OK):
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[j] = document.body[j].replace(filename, filename + ".ps")
|
2006-07-01 19:16:09 +00:00
|
|
|
continue
|
|
|
|
if access(absname + ".eps", F_OK):
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[j] = document.body[j].replace(filename, filename + ".eps")
|
2004-04-29 09:24:29 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_names(document):
|
2004-05-11 16:13:33 +00:00
|
|
|
""" Convert in the docbook backend from firstname and surname style
|
|
|
|
to charstyles.
|
|
|
|
"""
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.backend != "docbook":
|
2004-05-11 16:13:33 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout Author", i)
|
2004-05-11 16:13:33 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = i + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
while document.body[i] == "":
|
2004-05-11 16:13:33 +00:00
|
|
|
i = i + 1
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.body[i][:11] != "\\end_layout" or document.body[i+2][:13] != "\\begin_deeper":
|
2004-05-11 16:13:33 +00:00
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
k = i
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_end_of( document.body, i+3, "\\begin_deeper","\\end_deeper")
|
2004-05-11 16:13:33 +00:00
|
|
|
if i == -1:
|
|
|
|
# something is really wrong, abort
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Missing \\end_deeper, after style Author.")
|
|
|
|
document.warning("Aborted attempt to parse FirstName and Surname.")
|
2004-05-11 16:13:33 +00:00
|
|
|
return
|
|
|
|
firstname, surname = "", ""
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
name = document.body[k:i]
|
2004-05-11 16:13:33 +00:00
|
|
|
|
|
|
|
j = find_token(name, "\\begin_layout FirstName", 0)
|
|
|
|
if j != -1:
|
|
|
|
j = j + 1
|
|
|
|
while(name[j] != "\\end_layout"):
|
|
|
|
firstname = firstname + name[j]
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
j = find_token(name, "\\begin_layout Surname", 0)
|
|
|
|
if j != -1:
|
|
|
|
j = j + 1
|
|
|
|
while(name[j] != "\\end_layout"):
|
|
|
|
surname = surname + name[j]
|
|
|
|
j = j + 1
|
|
|
|
|
|
|
|
# delete name
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[k+2:i+1]
|
2004-05-11 16:13:33 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[k-1:k-1] = ["", "",
|
2004-05-11 16:13:33 +00:00
|
|
|
"\\begin_inset CharStyle Firstname",
|
|
|
|
"status inlined",
|
|
|
|
"",
|
2006-08-02 14:19:22 +00:00
|
|
|
'\\begin_layout %s' % document.default_layout,
|
2004-05-11 16:13:33 +00:00
|
|
|
"",
|
|
|
|
"%s" % firstname,
|
|
|
|
"\end_layout",
|
|
|
|
"",
|
2004-08-16 11:27:51 +00:00
|
|
|
"\end_inset",
|
2004-05-11 16:13:33 +00:00
|
|
|
"",
|
|
|
|
"",
|
|
|
|
"\\begin_inset CharStyle Surname",
|
|
|
|
"status inlined",
|
|
|
|
"",
|
2006-08-02 14:19:22 +00:00
|
|
|
'\\begin_layout %s' % document.default_layout,
|
2004-05-11 16:13:33 +00:00
|
|
|
"",
|
|
|
|
"%s" % surname,
|
|
|
|
"\\end_layout",
|
|
|
|
"",
|
2004-08-16 11:27:51 +00:00
|
|
|
"\\end_inset",
|
2004-05-11 16:13:33 +00:00
|
|
|
""]
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_names(document):
|
2004-05-11 16:13:33 +00:00
|
|
|
""" Revert in the docbook backend from firstname and surname char style
|
|
|
|
to styles.
|
|
|
|
"""
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.backend != "docbook":
|
2004-05-11 16:13:33 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_cite_engine(document):
|
|
|
|
r""" \use_natbib 1 \cite_engine <style>
|
|
|
|
\use_numerical_citations 0 -> where <style> is one of
|
|
|
|
\use_jurabib 0 "basic", "natbib_authoryear","""
|
|
|
|
|
|
|
|
a = find_token(document.header, "\\use_natbib", 0)
|
2004-05-13 20:44:35 +00:00
|
|
|
if a == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\use_natbib'.")
|
2004-05-13 20:44:35 +00:00
|
|
|
return
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
b = find_token(document.header, "\\use_numerical_citations", 0)
|
2004-05-13 20:44:35 +00:00
|
|
|
if b == -1 or b != a+1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\use_numerical_citations'.")
|
2004-05-13 20:44:35 +00:00
|
|
|
return
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
c = find_token(document.header, "\\use_jurabib", 0)
|
2004-05-13 20:44:35 +00:00
|
|
|
if c == -1 or c != b+1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\use_jurabib'.")
|
2004-05-13 20:44:35 +00:00
|
|
|
return
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
use_natbib = int(document.header[a].split()[1])
|
|
|
|
use_numerical_citations = int(document.header[b].split()[1])
|
|
|
|
use_jurabib = int(document.header[c].split()[1])
|
2004-05-13 20:44:35 +00:00
|
|
|
|
|
|
|
cite_engine = "basic"
|
|
|
|
if use_natbib:
|
|
|
|
if use_numerical_citations:
|
|
|
|
cite_engine = "natbib_numerical"
|
|
|
|
else:
|
|
|
|
cite_engine = "natbib_authoryear"
|
|
|
|
elif use_jurabib:
|
|
|
|
cite_engine = "jurabib"
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[a:c+1]
|
|
|
|
document.header.insert(a, "\\cite_engine " + cite_engine)
|
2004-05-13 20:44:35 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_cite_engine(document):
|
|
|
|
" Revert the cite engine. "
|
|
|
|
i = find_token(document.header, "\\cite_engine", 0)
|
2004-05-13 20:44:35 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\cite_engine'.")
|
2004-05-13 20:44:35 +00:00
|
|
|
return
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
cite_engine = document.header[i].split()[1]
|
2004-05-13 20:44:35 +00:00
|
|
|
|
|
|
|
use_natbib = '0'
|
|
|
|
use_numerical = '0'
|
|
|
|
use_jurabib = '0'
|
|
|
|
if cite_engine == "natbib_numerical":
|
|
|
|
use_natbib = '1'
|
|
|
|
use_numerical = '1'
|
|
|
|
elif cite_engine == "natbib_authoryear":
|
|
|
|
use_natbib = '1'
|
|
|
|
elif cite_engine == "jurabib":
|
|
|
|
use_jurabib = '1'
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
|
|
|
document.header.insert(i, "\\use_jurabib " + use_jurabib)
|
|
|
|
document.header.insert(i, "\\use_numerical_citations " + use_numerical)
|
|
|
|
document.header.insert(i, "\\use_natbib " + use_natbib)
|
2004-05-13 20:44:35 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_paperpackage(document):
|
|
|
|
" Convert paper package. "
|
|
|
|
i = find_token(document.header, "\\paperpackage", 0)
|
2004-07-01 14:40:59 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2005-07-06 16:13:20 +00:00
|
|
|
packages = {'default':'none','a4':'none', 'a4wide':'a4', 'widemarginsa4':'a4wide'}
|
2006-08-02 15:45:44 +00:00
|
|
|
if len(document.header[i].split()) > 1:
|
|
|
|
paperpackage = document.header[i].split()[1]
|
|
|
|
document.header[i] = document.header[i].replace(paperpackage, packages[paperpackage])
|
2005-07-06 17:50:10 +00:00
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = document.header[i] + ' widemarginsa4'
|
2004-07-01 14:40:59 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_paperpackage(document):
|
|
|
|
" Revert paper package. "
|
|
|
|
i = find_token(document.header, "\\paperpackage", 0)
|
2004-07-01 14:40:59 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
packages = {'none':'a4', 'a4':'a4wide', 'a4wide':'widemarginsa4',
|
2005-07-12 21:27:12 +00:00
|
|
|
'widemarginsa4':'', 'default': 'default'}
|
2006-08-02 15:45:44 +00:00
|
|
|
if len(document.header[i].split()) > 1:
|
|
|
|
paperpackage = document.header[i].split()[1]
|
2005-07-12 21:27:12 +00:00
|
|
|
else:
|
|
|
|
paperpackage = 'default'
|
2006-08-02 15:45:44 +00:00
|
|
|
document.header[i] = document.header[i].replace(paperpackage, packages[paperpackage])
|
2004-07-01 14:40:59 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_bullets(document):
|
|
|
|
" Convert bullets. "
|
2004-08-14 18:41:27 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.header, "\\bullet", i)
|
2004-08-14 18:41:27 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.header[i][:12] == '\\bulletLaTeX':
|
2006-08-02 15:45:44 +00:00
|
|
|
document.header[i] = document.header[i] + ' ' + document.header[i+1].strip()
|
2004-08-14 18:41:27 +00:00
|
|
|
n = 3
|
|
|
|
else:
|
2006-08-02 15:45:44 +00:00
|
|
|
document.header[i] = document.header[i] + ' ' + document.header[i+1].strip() +\
|
|
|
|
' ' + document.header[i+2].strip() + ' ' + document.header[i+3].strip()
|
2004-08-14 18:41:27 +00:00
|
|
|
n = 5
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i+1:i + n]
|
2004-08-14 18:41:27 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_bullets(document):
|
|
|
|
" Revert bullets. "
|
2004-08-14 18:41:27 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.header, "\\bullet", i)
|
2004-08-14 18:41:27 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
if document.header[i][:12] == '\\bulletLaTeX':
|
2006-08-02 15:45:44 +00:00
|
|
|
n = document.header[i].find('"')
|
2004-08-14 18:41:27 +00:00
|
|
|
if n == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed header.")
|
2004-08-14 18:41:27 +00:00
|
|
|
return
|
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i:i+1] = [document.header[i][:n-1],'\t' + document.header[i][n:], '\\end_bullet']
|
2004-08-14 18:41:27 +00:00
|
|
|
i = i + 3
|
|
|
|
else:
|
2006-08-02 15:45:44 +00:00
|
|
|
frag = document.header[i].split()
|
2004-08-14 18:41:27 +00:00
|
|
|
if len(frag) != 5:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed header.")
|
2004-08-14 18:41:27 +00:00
|
|
|
return
|
|
|
|
else:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i:i+1] = [frag[0] + ' ' + frag[1],
|
2004-08-14 18:41:27 +00:00
|
|
|
'\t' + frag[2],
|
|
|
|
'\t' + frag[3],
|
|
|
|
'\t' + frag[4],
|
|
|
|
'\\end_bullet']
|
|
|
|
i = i + 5
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def add_begin_header(document):
|
|
|
|
r" Add \begin_header and \begin_document. "
|
|
|
|
i = find_token(document.header, '\\lyxformat', 0)
|
|
|
|
document.header.insert(i+1, '\\begin_header')
|
|
|
|
document.header.insert(i+1, '\\begin_document')
|
2004-08-14 18:41:27 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_begin_header(document):
|
|
|
|
r" Remove \begin_header and \begin_document. "
|
|
|
|
i = find_token(document.header, "\\begin_document", 0)
|
2004-08-14 18:41:27 +00:00
|
|
|
if i != -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
|
|
|
i = find_token(document.header, "\\begin_header", 0)
|
2004-08-14 18:41:27 +00:00
|
|
|
if i != -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
2004-04-14 08:45:46 +00:00
|
|
|
|
2004-08-14 18:41:27 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def add_begin_body(document):
|
|
|
|
r" Add and \begin_document and \end_document"
|
|
|
|
document.body.insert(0, '\\begin_body')
|
|
|
|
document.body.insert(1, '')
|
|
|
|
i = find_token(document.body, "\\end_document", 0)
|
|
|
|
document.body.insert(i, '\\end_body')
|
|
|
|
|
|
|
|
def remove_begin_body(document):
|
|
|
|
r" Remove \begin_body and \end_body"
|
|
|
|
i = find_token(document.body, "\\begin_body", 0)
|
2004-08-14 18:41:27 +00:00
|
|
|
if i != -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
|
|
|
if not document.body[i]:
|
|
|
|
del document.body[i]
|
|
|
|
i = find_token(document.body, "\\end_body", 0)
|
2004-08-14 18:41:27 +00:00
|
|
|
if i != -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[i]
|
2004-08-14 18:41:27 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def normalize_papersize(document):
|
|
|
|
r" Normalize \papersize"
|
|
|
|
i = find_token(document.header, '\\papersize', 0)
|
2004-08-15 21:52:13 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
tmp = document.header[i].split()
|
2004-08-15 21:52:13 +00:00
|
|
|
if tmp[1] == "Default":
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = '\\papersize default'
|
2004-08-15 21:52:13 +00:00
|
|
|
return
|
|
|
|
if tmp[1] == "Custom":
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = '\\papersize custom'
|
2004-08-15 21:52:13 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def denormalize_papersize(document):
|
|
|
|
r" Revert \papersize"
|
|
|
|
i = find_token(document.header, '\\papersize', 0)
|
2004-08-15 21:52:13 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
tmp = document.header[i].split()
|
2004-08-15 21:52:13 +00:00
|
|
|
if tmp[1] == "custom":
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = '\\papersize Custom'
|
2004-08-15 21:52:13 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def strip_end_space(document):
|
|
|
|
" Strip spaces at end of command line. "
|
|
|
|
for i in range(len(document.body)):
|
|
|
|
if document.body[i][:1] == '\\':
|
2006-08-02 15:45:44 +00:00
|
|
|
document.body[i] = document.body[i].strip()
|
2004-08-16 11:27:51 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def use_x_boolean(document):
|
|
|
|
r" Use boolean values for \use_geometry, \use_bibtopic and \tracking_changes"
|
2004-10-09 21:32:56 +00:00
|
|
|
bin2bool = {'0': 'false', '1': 'true'}
|
|
|
|
for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.header, use, 0)
|
2004-10-09 21:32:56 +00:00
|
|
|
if i == -1:
|
|
|
|
continue
|
2006-08-02 15:45:44 +00:00
|
|
|
decompose = document.header[i].split()
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = decompose[0] + ' ' + bin2bool[decompose[1]]
|
2004-10-09 21:32:56 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def use_x_binary(document):
|
|
|
|
r" Use digit values for \use_geometry, \use_bibtopic and \tracking_changes"
|
2004-10-09 21:32:56 +00:00
|
|
|
bool2bin = {'false': '0', 'true': '1'}
|
|
|
|
for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.header, use, 0)
|
2004-10-09 21:32:56 +00:00
|
|
|
if i == -1:
|
|
|
|
continue
|
2006-08-02 15:45:44 +00:00
|
|
|
decompose = document.header[i].split()
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = decompose[0] + ' ' + bool2bin[decompose[1]]
|
2004-10-09 21:32:56 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
|
|
|
|
def normalize_paragraph_params(document):
|
|
|
|
" Place all the paragraph parameters in their own line. "
|
|
|
|
body = document.body
|
|
|
|
|
|
|
|
allowed_parameters = '\\paragraph_spacing', '\\noindent', \
|
|
|
|
'\\align', '\\labelwidthstring', "\\start_of_appendix", \
|
|
|
|
"\\leftindent"
|
2005-01-06 16:52:08 +00:00
|
|
|
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, '\\begin_layout', i)
|
2005-01-06 16:52:08 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
while 1:
|
2006-08-02 15:45:44 +00:00
|
|
|
if body[i].strip() and body[i].split()[0] not in allowed_parameters:
|
2005-01-06 16:52:08 +00:00
|
|
|
break
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
j = body[i].find('\\', 1)
|
2005-01-06 16:52:08 +00:00
|
|
|
|
|
|
|
if j != -1:
|
2006-08-02 15:45:44 +00:00
|
|
|
body[i:i+1] = [body[i][:j].strip(), body[i][j:]]
|
2005-01-06 16:52:08 +00:00
|
|
|
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_output_changes (document):
|
|
|
|
" Add output_changes parameter. "
|
|
|
|
i = find_token(document.header, '\\tracking_changes', 0)
|
2005-01-24 17:12:19 +00:00
|
|
|
if i == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\tracking_changes'.")
|
2005-01-24 17:12:19 +00:00
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header.insert(i+1, '\\output_changes true')
|
2005-01-24 17:12:19 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_output_changes (document):
|
|
|
|
" Remove output_changes parameter. "
|
|
|
|
i = find_token(document.header, '\\output_changes', 0)
|
2005-01-24 17:12:19 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
2005-01-24 17:12:19 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_ert_paragraphs(document):
|
|
|
|
" Convert paragraph breaks and sanitize paragraphs. "
|
2005-02-03 17:24:40 +00:00
|
|
|
forbidden_settings = [
|
|
|
|
# paragraph parameters
|
|
|
|
'\\paragraph_spacing', '\\labelwidthstring',
|
|
|
|
'\\start_of_appendix', '\\noindent',
|
|
|
|
'\\leftindent', '\\align',
|
|
|
|
# font settings
|
|
|
|
'\\family', '\\series', '\\shape', '\\size',
|
|
|
|
'\\emph', '\\numeric', '\\bar', '\\noun',
|
|
|
|
'\\color', '\\lang']
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, '\\begin_inset ERT', i)
|
2005-02-03 17:24:40 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
2005-02-03 17:24:40 +00:00
|
|
|
if j == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\end_inset'.")
|
2005-02-03 17:24:40 +00:00
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# convert non-standard paragraphs to standard
|
|
|
|
k = i
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
k = find_token(document.body, "\\begin_layout", k, j)
|
2005-02-03 17:24:40 +00:00
|
|
|
if k == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[k] = '\\begin_layout %s' % document.default_layout
|
2005-02-03 17:24:40 +00:00
|
|
|
k = k + 1
|
|
|
|
|
|
|
|
# remove all paragraph parameters and font settings
|
|
|
|
k = i
|
|
|
|
while k < j:
|
2006-08-02 15:45:44 +00:00
|
|
|
if (document.body[k].strip() and
|
|
|
|
document.body[k].split()[0] in forbidden_settings):
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.body[k]
|
2005-02-03 17:24:40 +00:00
|
|
|
j = j - 1
|
|
|
|
else:
|
|
|
|
k = k + 1
|
|
|
|
|
|
|
|
# insert an empty paragraph before each paragraph but the first
|
|
|
|
k = i
|
|
|
|
first_pagraph = 1
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
k = find_token(document.body, "\\begin_layout", k, j)
|
2005-02-03 17:24:40 +00:00
|
|
|
if k == -1:
|
|
|
|
break
|
|
|
|
if first_pagraph:
|
|
|
|
first_pagraph = 0
|
|
|
|
k = k + 1
|
|
|
|
continue
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[k:k] = ['\\begin_layout %s' % document.default_layout, "",
|
2005-02-03 17:24:40 +00:00
|
|
|
"\\end_layout", ""]
|
|
|
|
k = k + 5
|
|
|
|
j = j + 4
|
|
|
|
|
|
|
|
# convert \\newline to new paragraph
|
|
|
|
k = i
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
k = find_token(document.body, "\\newline", k, j)
|
2005-02-03 17:24:40 +00:00
|
|
|
if k == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[k:k+1] = ["\\end_layout", "", '\\begin_layout %s' % document.default_layout]
|
2007-02-20 09:32:12 +00:00
|
|
|
k = k + 3
|
|
|
|
j = j + 2
|
2006-08-02 14:19:22 +00:00
|
|
|
# We need an empty line if document.default_layout == ''
|
2007-02-20 09:32:12 +00:00
|
|
|
if document.body[k] != '':
|
|
|
|
document.body.insert(k, '')
|
2006-02-13 07:48:26 +00:00
|
|
|
k = k + 1
|
|
|
|
j = j + 1
|
2005-02-03 17:24:40 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def revert_ert_paragraphs(document):
|
|
|
|
" Remove double paragraph breaks. "
|
2005-02-03 17:24:40 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, '\\begin_inset ERT', i)
|
2005-02-03 17:24:40 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
2005-02-03 17:24:40 +00:00
|
|
|
if j == -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.warning("Malformed lyx document: Missing '\\end_inset'.")
|
2005-02-03 17:24:40 +00:00
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# replace paragraph breaks with \newline
|
|
|
|
k = i
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
k = find_token(document.body, "\\end_layout", k, j)
|
|
|
|
l = find_token(document.body, "\\begin_layout", k, j)
|
2005-02-03 17:24:40 +00:00
|
|
|
if k == -1 or l == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[k:l+1] = ["\\newline"]
|
2005-02-03 17:24:40 +00:00
|
|
|
j = j - l + k
|
|
|
|
k = k + 1
|
|
|
|
|
|
|
|
# replace double \newlines with paragraph breaks
|
|
|
|
k = i
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
k = find_token(document.body, "\\newline", k, j)
|
2005-02-03 17:24:40 +00:00
|
|
|
if k == -1:
|
|
|
|
break
|
|
|
|
l = k + 1
|
2006-08-02 14:19:22 +00:00
|
|
|
while document.body[l] == "":
|
2005-02-03 17:24:40 +00:00
|
|
|
l = l + 1
|
2006-08-02 15:45:44 +00:00
|
|
|
if document.body[l].strip() and document.body[l].split()[0] == "\\newline":
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[k:l+1] = ["\\end_layout", "",
|
|
|
|
'\\begin_layout %s' % document.default_layout]
|
2005-02-03 17:24:40 +00:00
|
|
|
j = j - l + k + 2
|
|
|
|
k = k + 3
|
2006-08-02 14:19:22 +00:00
|
|
|
# We need an empty line if document.default_layout == ''
|
|
|
|
if document.body[l+1] != '':
|
|
|
|
document.body.insert(l+1, '')
|
2006-02-13 07:48:26 +00:00
|
|
|
k = k + 1
|
|
|
|
j = j + 1
|
2005-02-03 17:24:40 +00:00
|
|
|
else:
|
|
|
|
k = k + 1
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_french(document):
|
|
|
|
" Convert frenchb. "
|
2005-07-07 10:51:58 +00:00
|
|
|
regexp = re.compile(r'^\\language\s+frenchb')
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_re(document.header, regexp, 0)
|
2005-07-07 10:51:58 +00:00
|
|
|
if i != -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = "\\language french"
|
2005-07-07 10:51:58 +00:00
|
|
|
|
2005-07-17 23:55:05 +00:00
|
|
|
# Change language in the document body
|
2005-07-18 14:46:53 +00:00
|
|
|
regexp = re.compile(r'^\\lang\s+frenchb')
|
2005-07-17 23:55:05 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_re(document.body, regexp, i)
|
2005-07-17 23:55:05 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = "\\lang french"
|
2005-07-17 23:55:05 +00:00
|
|
|
i = i + 1
|
|
|
|
|
2005-07-07 10:51:58 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_paperpackage(document):
|
|
|
|
" Remove paper package. "
|
|
|
|
i = find_token(document.header, '\\paperpackage', 0)
|
2005-07-17 11:17:13 +00:00
|
|
|
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 15:45:44 +00:00
|
|
|
paperpackage = document.header[i].split()[1]
|
2005-07-17 11:17:13 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
2005-07-17 11:17:13 +00:00
|
|
|
|
2006-03-07 11:21:08 +00:00
|
|
|
if paperpackage not in ("a4", "a4wide", "widemarginsa4"):
|
|
|
|
return
|
|
|
|
|
|
|
|
conv = {"a4":"\\usepackage{a4}","a4wide": "\\usepackage{a4wide}",
|
|
|
|
"widemarginsa4": "\\usepackage[widemargins]{a4}"}
|
|
|
|
# for compatibility we ensure it is the first entry in preamble
|
2006-08-02 14:19:22 +00:00
|
|
|
document.preamble[0:0] = [conv[paperpackage]]
|
2006-03-07 11:21:08 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.header, '\\papersize', 0)
|
2005-07-17 11:17:13 +00:00
|
|
|
if i != -1:
|
2006-08-02 14:19:22 +00:00
|
|
|
document.header[i] = "\\papersize default"
|
2005-07-17 11:17:13 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def remove_quotestimes(document):
|
|
|
|
" Remove quotestimes. "
|
|
|
|
i = find_token(document.header, '\\quotes_times', 0)
|
2005-10-13 10:59:39 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2006-08-02 14:19:22 +00:00
|
|
|
del document.header[i]
|
2005-10-13 10:59:39 +00:00
|
|
|
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
def convert_sgml_paragraphs(document):
|
|
|
|
" Convert SGML paragraphs. "
|
|
|
|
if document.backend != "docbook":
|
2006-04-10 09:14:08 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while 1:
|
2006-08-02 14:19:22 +00:00
|
|
|
i = find_token(document.body, "\\begin_layout SGML", i)
|
2006-04-10 09:14:08 +00:00
|
|
|
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[i] = "\\begin_layout Standard"
|
|
|
|
j = find_token(document.body, "\\end_layout", i)
|
2006-04-10 09:14:08 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
document.body[j+1:j+1] = ['','\\end_inset','','','\\end_layout']
|
|
|
|
document.body[i+1:i+1] = ['\\begin_inset ERT','status inlined','','\\begin_layout Standard','']
|
2006-04-10 09:14:08 +00:00
|
|
|
|
|
|
|
i = i + 10
|
2006-08-02 14:19:22 +00:00
|
|
|
|
2004-08-14 18:41:27 +00:00
|
|
|
##
|
|
|
|
# Convertion hub
|
|
|
|
#
|
2004-08-15 16:29:04 +00:00
|
|
|
|
2006-08-02 14:19:22 +00:00
|
|
|
supported_versions = ["1.4.%d" % i for i in range(3)] + ["1.4"]
|
2006-02-22 17:05:12 +00:00
|
|
|
convert = [[222, [insert_tracking_changes, add_end_header, convert_amsmath]],
|
2005-07-29 09:39:17 +00:00
|
|
|
[223, [remove_color_default, convert_spaces, convert_bibtex, remove_insetparent]],
|
2005-01-05 18:52:59 +00:00
|
|
|
[224, [convert_external, convert_comment]],
|
|
|
|
[225, [add_end_layout, layout2begin_layout, convert_end_document,
|
|
|
|
convert_table_valignment_middle, convert_breaks]],
|
|
|
|
[226, [convert_note]],
|
|
|
|
[227, [convert_box]],
|
|
|
|
[228, [convert_collapsable, convert_ert]],
|
|
|
|
[229, [convert_minipage]],
|
|
|
|
[230, [convert_jurabib]],
|
|
|
|
[231, [convert_float]],
|
|
|
|
[232, [convert_bibtopic]],
|
|
|
|
[233, [convert_graphics, convert_names]],
|
|
|
|
[234, [convert_cite_engine]],
|
|
|
|
[235, [convert_paperpackage]],
|
|
|
|
[236, [convert_bullets, add_begin_header, add_begin_body,
|
|
|
|
normalize_papersize, strip_end_space]],
|
|
|
|
[237, [use_x_boolean]],
|
2005-01-06 16:52:08 +00:00
|
|
|
[238, [update_latexaccents]],
|
2005-01-24 17:12:19 +00:00
|
|
|
[239, [normalize_paragraph_params]],
|
2005-02-03 17:24:40 +00:00
|
|
|
[240, [convert_output_changes]],
|
2005-07-07 10:51:58 +00:00
|
|
|
[241, [convert_ert_paragraphs]],
|
2005-07-17 11:17:13 +00:00
|
|
|
[242, [convert_french]],
|
2005-09-28 09:40:50 +00:00
|
|
|
[243, [remove_paperpackage]],
|
2006-07-01 19:16:09 +00:00
|
|
|
[244, [rename_spaces]],
|
|
|
|
[245, [remove_quotestimes, convert_sgml_paragraphs]]]
|
2005-01-05 18:52:59 +00:00
|
|
|
|
2005-10-13 10:59:39 +00:00
|
|
|
revert = [[244, []],
|
2006-07-01 19:16:09 +00:00
|
|
|
[243, [revert_space_names]],
|
|
|
|
[242, []],
|
2005-07-17 11:17:13 +00:00
|
|
|
[241, []],
|
2005-07-07 10:51:58 +00:00
|
|
|
[240, [revert_ert_paragraphs]],
|
2005-02-03 17:24:40 +00:00
|
|
|
[239, [revert_output_changes]],
|
2005-01-24 17:12:19 +00:00
|
|
|
[238, []],
|
2005-01-06 16:52:08 +00:00
|
|
|
[237, []],
|
2005-01-05 18:52:59 +00:00
|
|
|
[236, [use_x_binary]],
|
|
|
|
[235, [denormalize_papersize, remove_begin_body,remove_begin_header,
|
|
|
|
revert_bullets]],
|
|
|
|
[234, [revert_paperpackage]],
|
|
|
|
[233, [revert_cite_engine]],
|
|
|
|
[232, [revert_names]],
|
|
|
|
[231, [revert_bibtopic]],
|
|
|
|
[230, [revert_float]],
|
|
|
|
[229, [revert_jurabib]],
|
|
|
|
[228, []],
|
|
|
|
[227, [revert_collapsable, revert_ert]],
|
|
|
|
[226, [revert_box, revert_external_2]],
|
|
|
|
[225, [revert_note]],
|
|
|
|
[224, [rm_end_layout, begin_layout2layout, revert_end_document,
|
2006-02-02 21:30:04 +00:00
|
|
|
revert_valignment_middle, revert_breaks, convert_frameless_box,
|
|
|
|
remove_branches]],
|
2005-02-21 12:04:23 +00:00
|
|
|
[223, [revert_external_2, revert_comment, revert_eqref]],
|
2005-07-29 09:39:17 +00:00
|
|
|
[222, [revert_spaces, revert_bibtex]],
|
2006-02-22 17:05:12 +00:00
|
|
|
[221, [revert_amsmath, rm_end_header, rm_tracking_changes, rm_body_changes]]]
|
2004-10-26 21:16:44 +00:00
|
|
|
|
2004-04-14 08:45:46 +00:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|