lyx_mirror/lib/lyx2lyx/lyx_0_10.py
José Matox 088bcef87b lyx_0_08.py:
lyx_0_10.py
LyX.py: add support for UserGuide convertion from lyx-0.8 (and 0.7).



git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/trunk@14532 a592a061-630c-0410-9148-cb99ea01b6c8
2006-08-01 22:54:57 +00:00

123 lines
3.7 KiB
Python

# This file is part of lyx2lyx
# -*- coding: utf-8 -*-
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" Convert files generated by lyx 0.10"""
def regularise_header(document):
" Place tokens in their separate line. "
i = 0
while i < len(document.header):
line = document.header[i]
if len(line.split('\\')) > 1:
tmp = [ '\\'+ token.strip() for token in line.split('\\')][1:]
document.header[i: i+1] = tmp
i += len(tmp)
i += 1
def find_next_space(line, j):
""" Return position of next space, starting from position k, if
not existing return last position in line."""
l = line.find(' ', j)
if l == -1:
l = len(line)
k = line.find('\\', j)
if k == -1:
k = len(line)
if k < l:
return k
return l
def regularise_body(document):
i = 0
while i < len(document.body):
line = document.body[i]
j = 0
tmp = []
while j < len(line):
k = line.find('\\', j)
if k == -1:
tmp += [line[j:]]
break
if k != j:
tmp += [line[j: k]]
j = k
k = find_next_space(line, j+1)
# These tokens take the rest of the line
token = line[j+1:k]
if token in ["added_space_bottom", "added_space_top", "align", "layout", "fill_bottom", "fill_top", "labelwidthstring", "pagebreak_top", "pagebreak_bottom", "noindent"]:
tmp += [line[j:]]
break
# These tokens take no arguments
if token in ["backslash", "begin_deeper", "end_deeper", "end_float", "end_inset", "hfill", "newline", "protected_separator"]:
tmp += [line[j:k]]
j = k
continue
# These tokens take one argument
if token in ["bar", "begin_float", "family", "latex", "shape", "size", "series", "cursor"]:
k = find_next_space(line, k + 1)
tmp += [line[j:k]]
j = k
continue
# Special treatment for insets
if token in ["begin_inset"]:
l = find_next_space(line, k + 1)
inset = line[k+1: l]
if inset == "Latex":
tmp += [line[j:l]]
j = l
continue
if inset in ["LatexCommand", "LatexDel"]:
tmp += [line[j:]]
break
if inset == "Quotes":
l = find_next_space(line, l + 1)
tmp += [line[j:l]]
j = l
continue
assert(False)
# We are inside a latex inset, pass the text verbatim
tmp += [line[j:]]
break
document.body[i: i+1] = tmp
i += len(tmp)
convert = [[210, [regularise_header, regularise_body]]]
revert = []
if __name__ == "__main__":
pass