mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-12-12 16:50:39 +00:00
Backport lyx2lyx from 1.5.0
git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/branches/BRANCH_1_4_X@19183 a592a061-630c-0410-9148-cb99ea01b6c8
This commit is contained in:
parent
d9be32a2fe
commit
948cfd3b7b
@ -1,3 +1,7 @@
|
||||
2007-07-24 José Matos <jamatos@lyx.org>
|
||||
|
||||
* unicodesymbols: new file to read documents from 1.5.x
|
||||
|
||||
2007-07-12 Jean-Pierre Chrétien <chretien@cert.fr>
|
||||
|
||||
* layouts/beamer.layout: reorder layouts (bug 3141).
|
||||
|
@ -1,3 +1,8 @@
|
||||
2007-07-24 José Matos <jamatos@lyx.org>
|
||||
|
||||
* (several) Backport lyx2lyx from 1.5.0 (modulo some files used
|
||||
for testing).
|
||||
|
||||
2006-08-09 Jean-Marc Lasgouttes <lasgouttes@lyx.org>
|
||||
|
||||
* Makefile.am (dist_lyx2lyx_DATA): rename to dist_lyx2lyx_PYTHON;
|
||||
@ -5,9 +10,11 @@
|
||||
(install-data-hook): new target: sets executable bit on lyx2lyx.
|
||||
|
||||
2006-07-15 Bo Peng <ben.bob@gmail.com>
|
||||
|
||||
* replace all tab in .py files by spaces
|
||||
|
||||
2006-04-10 José Matos <jamatos@lyx.org>
|
||||
|
||||
* parser_tools.py (find_tokens_exact, find_tokens): replace range
|
||||
with xrange.
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2002-2004 Dekel Tsur <dekel@lyx.org>, José Matos <jamatos@lyx.org>
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2004 Dekel Tsur <dekel@lyx.org>
|
||||
# Copyright (C) 2002-2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -17,36 +18,66 @@
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
from parser_tools import get_value, check_token, find_token,\
|
||||
find_tokens, find_end_of, find_end_of_inset
|
||||
find_tokens, find_end_of
|
||||
import os.path
|
||||
import gzip
|
||||
import locale
|
||||
import sys
|
||||
import re
|
||||
import string
|
||||
import time
|
||||
|
||||
version_lyx2lyx = "1.4.1"
|
||||
try:
|
||||
import lyx2lyx_version
|
||||
version_lyx2lyx = lyx2lyx_version.version
|
||||
except: # we are running from build directory so assume the last version
|
||||
version_lyx2lyx = '1.4.5'
|
||||
|
||||
default_debug_level = 2
|
||||
|
||||
####################################################################
|
||||
# Private helper functions
|
||||
|
||||
def find_end_of_inset(lines, i):
|
||||
" Find beginning of inset, where lines[i] is included."
|
||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||
|
||||
def generate_minor_versions(major, last_minor_version):
|
||||
""" Generate minor versions, using major as prefix and minor
|
||||
versions from 0 until last_minor_version, plus the generic version.
|
||||
|
||||
Example:
|
||||
|
||||
generate_minor_versions("1.2", 4) ->
|
||||
[ "1.2", "1.2.0", "1.2.1", "1.2.2", "1.2.3"]
|
||||
"""
|
||||
return [major] + [major + ".%d" % i for i in range(last_minor_version + 1)]
|
||||
|
||||
|
||||
# End of helper functions
|
||||
####################################################################
|
||||
|
||||
|
||||
# Regular expressions used
|
||||
format_re = re.compile(r"(\d)[\.,]?(\d\d)")
|
||||
fileformat = re.compile(r"\\lyxformat\s*(\S*)")
|
||||
original_version = re.compile(r"\#LyX (\S*)")
|
||||
original_version = re.compile(r".*?LyX ([\d.]*)")
|
||||
|
||||
##
|
||||
# file format information:
|
||||
# file, supported formats, stable release versions
|
||||
format_relation = [("0_10", [210], ["0.10.7","0.10"]),
|
||||
("0_12", [215], ["0.12","0.12.1","0.12"]),
|
||||
("1_0_0", [215], ["1.0.0","1.0"]),
|
||||
("1_0_1", [215], ["1.0.1","1.0.2","1.0.3","1.0.4", "1.1.2","1.1"]),
|
||||
("1_1_4", [215], ["1.1.4","1.1"]),
|
||||
("1_1_5", [216], ["1.1.5","1.1.5fix1","1.1.5fix2","1.1"]),
|
||||
("1_1_6", [217], ["1.1.6","1.1.6fix1","1.1.6fix2","1.1"]),
|
||||
("1_1_6fix3", [218], ["1.1.6fix3","1.1.6fix4","1.1"]),
|
||||
("1_2", [220], ["1.2.0","1.2.1","1.2.3","1.2.4","1.2"]),
|
||||
("1_3", [221], ["1.3.0","1.3.1","1.3.2","1.3.3","1.3.4","1.3.5","1.3.6","1.3"]),
|
||||
("1_4", range(222,246), ["1.4.0cvs","1.4"])]
|
||||
format_relation = [("0_06", [200], generate_minor_versions("0.6" , 4)),
|
||||
("0_08", [210], generate_minor_versions("0.8" , 6) + ["0.7"]),
|
||||
("0_10", [210], generate_minor_versions("0.10", 7) + ["0.9"]),
|
||||
("0_12", [215], generate_minor_versions("0.12", 1) + ["0.11"]),
|
||||
("1_0", [215], generate_minor_versions("1.0" , 4)),
|
||||
("1_1", [215], generate_minor_versions("1.1" , 4)),
|
||||
("1_1_5", [216], ["1.1.5","1.1.5.1","1.1.5.2","1.1"]),
|
||||
("1_1_6_0", [217], ["1.1.6","1.1.6.1","1.1.6.2","1.1"]),
|
||||
("1_1_6_3", [218], ["1.1.6.3","1.1.6.4","1.1"]),
|
||||
("1_2", [220], generate_minor_versions("1.2" , 4)),
|
||||
("1_3", [221], generate_minor_versions("1.3" , 7)),
|
||||
("1_4", range(222,246), generate_minor_versions("1.4" , 5)),
|
||||
("1_5", range(246,277), generate_minor_versions("1.5" , 0))]
|
||||
|
||||
|
||||
def formats_list():
|
||||
@ -81,12 +112,37 @@ def trim_eol(line):
|
||||
return line[:-1]
|
||||
|
||||
|
||||
def get_encoding(language, inputencoding, format, cjk_encoding):
|
||||
if format > 248:
|
||||
return "utf8"
|
||||
# CJK-LyX encodes files using the current locale encoding.
|
||||
# This means that files created by CJK-LyX can only be converted using
|
||||
# the correct locale settings unless the encoding is given as commandline
|
||||
# argument.
|
||||
if cjk_encoding == 'auto':
|
||||
return locale.getpreferredencoding()
|
||||
elif cjk_encoding != '':
|
||||
return cjk_encoding
|
||||
from lyx2lyx_lang import lang
|
||||
if inputencoding == "auto" or inputencoding == "default":
|
||||
return lang[language][3]
|
||||
if inputencoding == "":
|
||||
return "latin1"
|
||||
# python does not know the alias latin9
|
||||
if inputencoding == "latin9":
|
||||
return "iso-8859-15"
|
||||
return inputencoding
|
||||
|
||||
##
|
||||
# Class
|
||||
#
|
||||
class LyX_Base:
|
||||
"""This class carries all the information of the LyX file."""
|
||||
def __init__(self, end_format = 0, input = "", output = "", error = "", debug = default_debug_level, try_hard = 0):
|
||||
|
||||
def __init__(self, end_format = 0, input = "", output = "", error
|
||||
= "", debug = default_debug_level, try_hard = 0, cjk_encoding = '',
|
||||
language = "english", encoding = "auto"):
|
||||
|
||||
"""Arguments:
|
||||
end_format: final format that the file should be converted. (integer)
|
||||
input: the name of the input source, if empty resort to standard input.
|
||||
@ -103,6 +159,7 @@ class LyX_Base:
|
||||
|
||||
self.debug = debug
|
||||
self.try_hard = try_hard
|
||||
self.cjk_encoding = cjk_encoding
|
||||
|
||||
if end_format:
|
||||
self.end_format = self.lyxformat(end_format)
|
||||
@ -119,6 +176,8 @@ class LyX_Base:
|
||||
self.preamble = []
|
||||
self.body = []
|
||||
self.status = 0
|
||||
self.encoding = encoding
|
||||
self.language = language
|
||||
|
||||
|
||||
def warning(self, message, debug_level= default_debug_level):
|
||||
@ -156,7 +215,7 @@ class LyX_Base:
|
||||
if check_token(line, '\\end_preamble'):
|
||||
break
|
||||
|
||||
if string.split(line)[:0] in ("\\layout", "\\begin_layout", "\\begin_body"):
|
||||
if line.split()[:0] in ("\\layout", "\\begin_layout", "\\begin_body"):
|
||||
self.warning("Malformed LyX file: Missing '\\end_preamble'.")
|
||||
self.warning("Adding it now and hoping for the best.")
|
||||
|
||||
@ -165,35 +224,45 @@ class LyX_Base:
|
||||
if check_token(line, '\\end_preamble'):
|
||||
continue
|
||||
|
||||
line = string.strip(line)
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
if string.split(line)[0] in ("\\layout", "\\begin_layout", "\\begin_body"):
|
||||
if line.split()[0] in ("\\layout", "\\begin_layout", "\\begin_body", "\\begin_deeper"):
|
||||
self.body.append(line)
|
||||
break
|
||||
|
||||
self.header.append(line)
|
||||
|
||||
while 1:
|
||||
line = self.input.readline()
|
||||
if not line:
|
||||
break
|
||||
self.body.append(trim_eol(line))
|
||||
|
||||
self.textclass = get_value(self.header, "\\textclass", 0)
|
||||
self.backend = get_backend(self.textclass)
|
||||
self.format = self.read_format()
|
||||
self.language = get_value(self.header, "\\language", 0)
|
||||
if self.language == "":
|
||||
self.language = "english"
|
||||
self.language = get_value(self.header, "\\language", 0, default = "english")
|
||||
self.inputencoding = get_value(self.header, "\\inputencoding", 0, default = "auto")
|
||||
self.encoding = get_encoding(self.language, self.inputencoding, self.format, self.cjk_encoding)
|
||||
self.initial_version = self.read_version()
|
||||
|
||||
# Second pass over header and preamble, now we know the file encoding
|
||||
for i in range(len(self.header)):
|
||||
self.header[i] = self.header[i].decode(self.encoding)
|
||||
for i in range(len(self.preamble)):
|
||||
self.preamble[i] = self.preamble[i].decode(self.encoding)
|
||||
|
||||
# Read document body
|
||||
while 1:
|
||||
line = self.input.readline().decode(self.encoding)
|
||||
if not line:
|
||||
break
|
||||
self.body.append(trim_eol(line))
|
||||
|
||||
|
||||
def write(self):
|
||||
" Writes the LyX file to self.output."
|
||||
self.set_version()
|
||||
self.set_format()
|
||||
self.set_textclass()
|
||||
if self.encoding == "auto":
|
||||
self.encoding = get_encoding(self.language, self.encoding, self.format, self.cjk_encoding)
|
||||
|
||||
if self.preamble:
|
||||
i = find_token(self.header, '\\textclass', 0) + 1
|
||||
@ -206,7 +275,7 @@ class LyX_Base:
|
||||
header = self.header
|
||||
|
||||
for line in header + [''] + self.body:
|
||||
self.output.write(line+"\n")
|
||||
self.output.write(line.encode(self.encoding)+"\n")
|
||||
|
||||
|
||||
def choose_io(self, input, output):
|
||||
@ -227,6 +296,7 @@ class LyX_Base:
|
||||
except:
|
||||
self.input = open(input)
|
||||
else:
|
||||
self.dir = ''
|
||||
self.input = sys.stdin
|
||||
|
||||
|
||||
@ -235,6 +305,8 @@ class LyX_Base:
|
||||
result = format_re.match(format)
|
||||
if result:
|
||||
format = int(result.group(1) + result.group(2))
|
||||
elif format == '2':
|
||||
format = 200
|
||||
else:
|
||||
self.error(str(format) + ": " + "Invalid LyX file.")
|
||||
|
||||
@ -252,9 +324,19 @@ class LyX_Base:
|
||||
if line[0] != "#":
|
||||
return None
|
||||
|
||||
line = line.replace("fix",".")
|
||||
result = original_version.match(line)
|
||||
if result:
|
||||
return result.group(1)
|
||||
# Special know cases: reLyX and KLyX
|
||||
if line.find("reLyX") != -1 or line.find("KLyX") != -1:
|
||||
return "0.12"
|
||||
|
||||
res = result.group(1)
|
||||
if not res:
|
||||
self.warning(line)
|
||||
#self.warning("Version %s" % result.group(1))
|
||||
return res
|
||||
self.warning(str(self.header[:2]))
|
||||
return None
|
||||
|
||||
|
||||
@ -286,6 +368,11 @@ class LyX_Base:
|
||||
self.header[i] = "\\lyxformat %s" % format
|
||||
|
||||
|
||||
def set_textclass(self):
|
||||
i = find_token(self.header, "\\textclass", 0)
|
||||
self.header[i] = "\\textclass %s" % self.textclass
|
||||
|
||||
|
||||
def set_parameter(self, param, value):
|
||||
" Set the value of the header parameter."
|
||||
i = find_token(self.header, '\\' + param, 0)
|
||||
@ -359,7 +446,7 @@ class LyX_Base:
|
||||
|
||||
if not correct_version:
|
||||
if format <= 215:
|
||||
self.warning("Version does not match file format, discarding it.")
|
||||
self.warning("Version does not match file format, discarding it. (Version %s, format %d)" %(self.initial_version, self.format))
|
||||
for rel in format_relation:
|
||||
if format in rel[1]:
|
||||
initial_step = rel[0]
|
||||
@ -426,7 +513,7 @@ class LyX_Base:
|
||||
self.warning('Incomplete file.', 0)
|
||||
break
|
||||
|
||||
section = string.split(self.body[i])[1]
|
||||
section = self.body[i].split()[1]
|
||||
if section[-1] == '*':
|
||||
section = section[:-1]
|
||||
|
||||
@ -434,12 +521,12 @@ class LyX_Base:
|
||||
|
||||
k = i + 1
|
||||
# skip paragraph parameters
|
||||
while not string.strip(self.body[k]) or string.split(self.body[k])[0] in allowed_parameters:
|
||||
while not self.body[k].strip() or self.body[k].split()[0] in allowed_parameters:
|
||||
k = k +1
|
||||
|
||||
while k < j:
|
||||
if check_token(self.body[k], '\\begin_inset'):
|
||||
inset = string.split(self.body[k])[1]
|
||||
inset = self.body[k].split()[1]
|
||||
end = find_end_of_inset(self.body, k)
|
||||
if end == -1 or end > j:
|
||||
self.warning('Malformed file.', 0)
|
||||
@ -452,7 +539,7 @@ class LyX_Base:
|
||||
k = k + 1
|
||||
|
||||
# trim empty lines in the end.
|
||||
while string.strip(par[-1]) == '' and par:
|
||||
while par[-1].strip() == '' and par:
|
||||
par.pop()
|
||||
|
||||
toc_par.append(Paragraph(section, par))
|
||||
@ -464,8 +551,8 @@ class LyX_Base:
|
||||
|
||||
class File(LyX_Base):
|
||||
" This class reads existing LyX files."
|
||||
def __init__(self, end_format = 0, input = "", output = "", error = "", debug = default_debug_level, try_hard = 0):
|
||||
LyX_Base.__init__(self, end_format, input, output, error, debug, try_hard)
|
||||
def __init__(self, end_format = 0, input = "", output = "", error = "", debug = default_debug_level, try_hard = 0, cjk_encoding = ''):
|
||||
LyX_Base.__init__(self, end_format, input, output, error, debug, try_hard, cjk_encoding)
|
||||
self.read()
|
||||
|
||||
|
||||
@ -481,7 +568,14 @@ class NewFile(LyX_Base):
|
||||
"\\textclass article",
|
||||
"\\language english",
|
||||
"\\inputencoding auto",
|
||||
"\\fontscheme default",
|
||||
"\\font_roman default",
|
||||
"\\font_sans default",
|
||||
"\\font_typewriter default",
|
||||
"\\font_default_family default",
|
||||
"\\font_sc false",
|
||||
"\\font_osf false",
|
||||
"\\font_sf_scale 100",
|
||||
"\\font_tt_scale 100",
|
||||
"\\graphics default",
|
||||
"\\paperfontsize default",
|
||||
"\\papersize default",
|
||||
|
@ -7,18 +7,23 @@ CHMOD = chmod
|
||||
lyx2lyxdir = $(pkgdatadir)/lyx2lyx
|
||||
dist_lyx2lyx_PYTHON = \
|
||||
lyx2lyx \
|
||||
lyx2lyx_version.py \
|
||||
lyx2lyx_lang.py \
|
||||
parser_tools.py \
|
||||
LyX.py \
|
||||
lyx_0_06.py \
|
||||
lyx_0_08.py \
|
||||
lyx_0_10.py \
|
||||
lyx_0_12.py \
|
||||
lyx_1_0_0.py \
|
||||
lyx_1_0_1.py \
|
||||
lyx_1_1_4.py \
|
||||
lyx_1_0.py \
|
||||
lyx_1_1.py \
|
||||
lyx_1_1_5.py \
|
||||
lyx_1_1_6.py \
|
||||
lyx_1_1_6fix3.py \
|
||||
lyx_1_1_6_0.py \
|
||||
lyx_1_1_6_3.py \
|
||||
lyx_1_2.py \
|
||||
lyx_1_3.py \
|
||||
lyx_1_4.py \
|
||||
lyx_1_5.py \
|
||||
profiling.py
|
||||
|
||||
install-data-hook:
|
||||
|
@ -1,6 +1,6 @@
|
||||
#! /usr/bin/env python
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -36,25 +36,30 @@ Options:
|
||||
-t, --to version final version (optional)
|
||||
-o, --output name name of the output file or else goes to stdout
|
||||
-n, --try-hard try hard (ignore any convertion errors)
|
||||
-c, --cjk [encoding] files in format 248 and lower are read and
|
||||
written in the format of CJK-LyX.
|
||||
If encoding is not given or 'auto' the encoding
|
||||
is determined from the locale.
|
||||
-q, --quiet same as --debug=0"""
|
||||
|
||||
|
||||
def parse_options(argv):
|
||||
_options = ["help", "version", "list", "debug=", "err=", "from=", "to=", "output=", "try-hard", "quiet"]
|
||||
_options = ["help", "version", "list", "debug=", "err=", "from=", "to=", "output=", "try-hard", "cjk", "quiet"]
|
||||
try:
|
||||
opts, args = getopt.getopt(argv[1:], "d:e:f:hlno:qt:v", _options)
|
||||
opts, args = getopt.getopt(argv[1:], "c:d:e:f:hlno:qt:v", _options)
|
||||
except getopt.error:
|
||||
usage()
|
||||
sys.exit(2)
|
||||
|
||||
end_format, input, output, error, debug, try_hard = 0, "", "", "", LyX.default_debug_level, 0
|
||||
cjk_encoding = ''
|
||||
for o, a in opts:
|
||||
if o in ("-h", "--help"):
|
||||
usage()
|
||||
sys.exit()
|
||||
if o in ("-v", "--version"):
|
||||
print "lyx2lyx, version %s" %(LyX.version_lyx2lyx)
|
||||
print "Copyright (C) 2002-2004 José Matos and Dekel Tsur"
|
||||
print "Copyright (C) 2002-2004 José Matos and Dekel Tsur"
|
||||
sys.exit()
|
||||
if o in ("-d", "--debug"):
|
||||
debug = int(a)
|
||||
@ -71,15 +76,20 @@ def parse_options(argv):
|
||||
error = a
|
||||
if o in ("-n", "--try-hard"):
|
||||
try_hard = 1
|
||||
if o in ("-c", "--cjk"):
|
||||
if a == '':
|
||||
cjk_encoding = 'auto'
|
||||
else:
|
||||
cjk_encoding = a
|
||||
if args:
|
||||
input = args[0]
|
||||
|
||||
return end_format, input, output, error, debug, try_hard
|
||||
return end_format, input, output, error, debug, try_hard, cjk_encoding
|
||||
|
||||
|
||||
def main(argv):
|
||||
end_format, input, output, error, debug, try_hard = parse_options(argv)
|
||||
file = LyX.File(end_format, input, output, error, debug, try_hard)
|
||||
end_format, input, output, error, debug, try_hard, cjk_encoding = parse_options(argv)
|
||||
file = LyX.File(end_format, input, output, error, debug, try_hard, cjk_encoding)
|
||||
|
||||
file.convert()
|
||||
file.write()
|
||||
|
102
lib/lyx2lyx/lyx2lyx_lang.py
Normal file
102
lib/lyx2lyx/lyx2lyx_lang.py
Normal file
@ -0,0 +1,102 @@
|
||||
# This file is generated by generate_incoding_info.py from lib/languages file.
|
||||
# Do not change this file directly.
|
||||
|
||||
lang = {'afrikaans': ['afrikaans', 'Afrikaans', 'false', 'iso8859-1', 'af_ZA', ''],
|
||||
'american': ['american', 'American', 'false', 'iso8859-1', 'en_US', ''],
|
||||
'arabic': ['arabic', 'Arabic', 'true', 'iso8859-6', 'ar_SA', ''],
|
||||
'austrian': ['austrian', 'Austrian', 'false', 'iso8859-1', 'de_AT', ''],
|
||||
'bahasa': ['bahasa', 'Bahasa', 'false', 'iso8859-1', 'in_ID', ''],
|
||||
'basque': ['basque', 'Basque', 'false', 'iso8859-1', 'eu_ES', ''],
|
||||
'belarusian': ['belarusian', 'Belarusian', 'false', 'cp1251', 'be_BY', ''],
|
||||
'brazil': ['brazil',
|
||||
'Portuguese (Brazil)',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'pt_BR',
|
||||
''],
|
||||
'breton': ['breton', 'Breton', 'false', 'iso8859-1', 'br_FR', ''],
|
||||
'british': ['british', 'British', 'false', 'iso8859-1', 'en_GB', ''],
|
||||
'bulgarian': ['bulgarian', 'Bulgarian', 'false', 'cp1251', 'bg_BG', ''],
|
||||
'canadian': ['canadian', 'Canadian', 'false', 'iso8859-1', 'en_CA', ''],
|
||||
'canadien': ['canadien',
|
||||
'French Canadian',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'fr_CA',
|
||||
''],
|
||||
'catalan': ['catalan', 'Catalan', 'false', 'iso8859-1', 'ca_ES', ''],
|
||||
'croatian': ['croatian', 'Croatian', 'false', 'iso8859-2', 'hr_HR', ''],
|
||||
'czech': ['czech', 'Czech', 'false', 'iso8859-2', 'cs_CZ', ''],
|
||||
'danish': ['danish', 'Danish', 'false', 'iso8859-1', 'da_DK', ''],
|
||||
'dutch': ['dutch', 'Dutch', 'false', 'iso8859-1', 'nl_NL', ''],
|
||||
'english': ['english', 'English', 'false', 'iso8859-1', 'en_US', ''],
|
||||
'esperanto': ['esperanto', 'Esperanto', 'false', 'iso8859-3', 'eo', ''],
|
||||
'estonian': ['estonian', 'Estonian', 'false', 'iso8859-1', 'et_EE', ''],
|
||||
'finnish': ['finnish', 'Finnish', 'false', 'iso8859-1', 'fi_FI', ''],
|
||||
'french': ['french',
|
||||
'French',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'fr_FR',
|
||||
'\\addto\\extrasfrench{\\providecommand{\\og}{\\leavevmode\\flqq~}\\providecommand{\\fg}{\\ifdim\\lastskip>\\z@\\unskip\\fi~\\frqq}}'],
|
||||
'frenchb': ['french', 'French', 'false', 'iso8859-1', 'fr_FR', ''], # for compatibility reasons
|
||||
'galician': ['galician', 'Galician', 'false', 'iso8859-1', 'gl_ES', ''],
|
||||
'german': ['german', 'German', 'false', 'iso8859-1', 'de_DE', ''],
|
||||
'greek': ['greek', 'Greek', 'false', 'iso8859-7', 'el_GR', ''],
|
||||
'hebrew': ['hebrew', 'Hebrew', 'true', 'cp1255', 'he_IL', ''],
|
||||
'icelandic': ['icelandic', 'Icelandic', 'false', 'iso8859-1', 'is_IS', ''],
|
||||
'irish': ['irish', 'Irish', 'false', 'iso8859-1', 'ga_IE', ''],
|
||||
'italian': ['italian', 'Italian', 'false', 'iso8859-1', 'it_IT', ''],
|
||||
'kazakh': ['kazakh', 'Kazakh', 'false', 'pt154', 'kk_KZ', ''],
|
||||
'latvian': ['latvian', 'Latvian', 'false', 'iso8859-13', 'lv_LV', ''],
|
||||
'lithuanian': ['lithuanian',
|
||||
'Lithuanian',
|
||||
'false',
|
||||
'iso8859-13',
|
||||
'lt_LT',
|
||||
''],
|
||||
'magyar': ['magyar', 'Magyar', 'false', 'iso8859-2', 'hu_HU', ''],
|
||||
'naustrian': ['naustrian',
|
||||
'Austrian (new spelling)',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'de_AT',
|
||||
''],
|
||||
'ngerman': ['ngerman',
|
||||
'German (new spelling)',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'de_DE',
|
||||
''],
|
||||
'norsk': ['norsk', 'Norsk', 'false', 'iso8859-1', 'no_NO', ''],
|
||||
'nynorsk': ['nynorsk', 'Nynorsk', 'false', 'iso8859-1', 'nn_NO', ''],
|
||||
'polish': ['polish', 'Polish', 'false', 'iso8859-2', 'pl_PL', ''],
|
||||
'portuges': ['portuges', 'Portugese', 'false', 'iso8859-1', 'pt_PT', ''],
|
||||
'romanian': ['romanian', 'Romanian', 'false', 'iso8859-2', 'ro_RO', ''],
|
||||
'russian': ['russian', 'Russian', 'false', 'koi8', 'ru_RU', ''],
|
||||
'scottish': ['scottish', 'Scottish', 'false', 'iso8859-1', 'gd_GB', ''],
|
||||
'serbian': ['croatian', 'Serbian', 'false', 'iso8859-5', 'sr_HR', ''],
|
||||
'serbocroatian': ['croatian',
|
||||
'Serbo-Croatian',
|
||||
'false',
|
||||
'iso8859-2',
|
||||
'sh_HR',
|
||||
''],
|
||||
'slovak': ['slovak', 'Slovak', 'false', 'iso8859-2', 'sk_SK', ''],
|
||||
'slovene': ['slovene', 'Slovene', 'false', 'iso8859-2', 'sl_SI', ''],
|
||||
'spanish': ['spanish',
|
||||
'Spanish',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'es_ES',
|
||||
'\\deactivatetilden'],
|
||||
'swedish': ['swedish', 'Swedish', 'false', 'iso8859-1', 'sv_SE', ''],
|
||||
'thai': ['thai',
|
||||
'Thai',
|
||||
'false',
|
||||
'tis620-0',
|
||||
'th_TH',
|
||||
'\\usepackage{thswitch}'],
|
||||
'turkish': ['turkish', 'Turkish', 'false', 'iso8859-9', 'tr_TR', ''],
|
||||
'ukrainian': ['ukrainian', 'Ukrainian', 'false', 'koi8-u', 'uk_UA', ''],
|
||||
'welsh': ['welsh', 'Welsh', 'false', 'iso8859-1', 'cy_GB', '']}
|
@ -1,6 +1,6 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
# This file is part of lyx2lyx -*- python -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -16,9 +16,7 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
convert = [[215, []]]
|
||||
revert = []
|
||||
|
||||
version = "1.4.5"
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
28
lib/lyx2lyx/lyx_0_06.py
Normal file
28
lib/lyx2lyx/lyx_0_06.py
Normal file
@ -0,0 +1,28 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
""" Convert files to the file format generated by lyx 0.6"""
|
||||
|
||||
supported_versions = ["0.6.%d" % i for i in range(5)] + ["0.6"]
|
||||
convert = [[200, []]]
|
||||
revert = []
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
|
34
lib/lyx2lyx/lyx_0_08.py
Normal file
34
lib/lyx2lyx/lyx_0_08.py
Normal file
@ -0,0 +1,34 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
""" Convert files to the file format generated by lyx 0.8"""
|
||||
|
||||
def add_inputencoding(document):
|
||||
" Add the input encoding, latin1"
|
||||
document.header.append('\\inputencoding latin1')
|
||||
document.inputencoding = "latin1"
|
||||
|
||||
|
||||
supported_versions = ["0.8.%d" % i for i in range(7)] + ["0.8"]
|
||||
convert = [[210, [add_inputencoding]]]
|
||||
revert = []
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
|
138
lib/lyx2lyx/lyx_0_10.py
Normal file
138
lib/lyx2lyx/lyx_0_10.py
Normal file
@ -0,0 +1,138 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
""" Convert files to the file format generated by lyx 0.10"""
|
||||
|
||||
def regularise_header(document):
|
||||
" Put each entry in header into a separate line. "
|
||||
i = 0
|
||||
while i < len(document.header):
|
||||
line = document.header[i]
|
||||
if len(line.split('\\')) > 1:
|
||||
tmp = [ '\\'+ token.strip() for token in line.split('\\')][1:]
|
||||
document.header[i: i+1] = tmp
|
||||
i += len(tmp)
|
||||
i += 1
|
||||
|
||||
|
||||
def find_next_space(line, j):
|
||||
""" Return position of next space or backslash, which one comes
|
||||
first, starting from position k, if not existing return last
|
||||
position in line."""
|
||||
l = line.find(' ', j)
|
||||
if l == -1:
|
||||
l = len(line)
|
||||
k = line.find('\\', j)
|
||||
if k == -1:
|
||||
k = len(line)
|
||||
|
||||
if k < l:
|
||||
return k
|
||||
return l
|
||||
|
||||
|
||||
def regularise_body(document):
|
||||
""" Place tokens starting with a backslash into a separate line. """
|
||||
|
||||
getline_tokens = ["added_space_bottom", "added_space_top",
|
||||
"align", "layout", "fill_bottom", "fill_top",
|
||||
"labelwidthstring", "pagebreak_top",
|
||||
"pagebreak_bottom", "noindent"]
|
||||
|
||||
noargs_tokens = ["backslash", "begin_deeper", "end_deeper",
|
||||
"end_float", "end_inset", "hfill", "newline",
|
||||
"protected_separator"]
|
||||
|
||||
onearg_tokens = ["bar", "begin_float", "family", "latex", "shape",
|
||||
"size", "series", "cursor"]
|
||||
|
||||
i = 0
|
||||
while i < len(document.body):
|
||||
line = document.body[i]
|
||||
j = 0
|
||||
tmp = []
|
||||
while j < len(line):
|
||||
k = line.find('\\', j)
|
||||
|
||||
if k == -1:
|
||||
tmp += [line[j:]]
|
||||
break
|
||||
|
||||
if k != j:
|
||||
tmp += [line[j: k]]
|
||||
j = k
|
||||
|
||||
k = find_next_space(line, j+1)
|
||||
|
||||
# These tokens take the rest of the line
|
||||
token = line[j+1:k]
|
||||
if token in getline_tokens:
|
||||
tmp += [line[j:]]
|
||||
break
|
||||
|
||||
# These tokens take no arguments
|
||||
if token in noargs_tokens:
|
||||
tmp += [line[j:k]]
|
||||
j = k
|
||||
continue
|
||||
|
||||
# These tokens take one argument
|
||||
if token in onearg_tokens:
|
||||
k = find_next_space(line, k + 1)
|
||||
tmp += [line[j:k]]
|
||||
j = k
|
||||
continue
|
||||
|
||||
# Special treatment for insets
|
||||
if token in ["begin_inset"]:
|
||||
l = find_next_space(line, k + 1)
|
||||
inset = line[k+1: l]
|
||||
|
||||
if inset == "Latex":
|
||||
tmp += [line[j:l]]
|
||||
j = l
|
||||
continue
|
||||
|
||||
if inset in ["LatexCommand", "LatexDel"]:
|
||||
tmp += [line[j:]]
|
||||
break
|
||||
|
||||
if inset == "Quotes":
|
||||
l = find_next_space(line, l + 1)
|
||||
tmp += [line[j:l]]
|
||||
j = l
|
||||
continue
|
||||
|
||||
document.warning("unkown inset %s" % line)
|
||||
assert(False)
|
||||
|
||||
# We are inside a latex inset, pass the text verbatim
|
||||
tmp += [line[j:]]
|
||||
break
|
||||
|
||||
document.body[i: i+1] = tmp
|
||||
i += len(tmp)
|
||||
|
||||
|
||||
supported_versions = ["0.10.%d" % i for i in range(8)] + ["0.10"]
|
||||
convert = [[210, [regularise_header, regularise_body]]]
|
||||
revert = []
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
@ -1,6 +1,6 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2003-2004 José Matos <jamatos@lyx.org>
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2003-2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -16,41 +16,46 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
""" Convert files to the file format generated by lyx 0.12"""
|
||||
|
||||
import re
|
||||
import string
|
||||
from parser_tools import find_token, find_re, check_token
|
||||
|
||||
|
||||
def space_before_layout(file):
|
||||
lines = file.body
|
||||
def space_before_layout(document):
|
||||
" Remove empty line before \\layout. "
|
||||
lines = document.body
|
||||
i = 2 # skip first layout
|
||||
while 1:
|
||||
i = find_token(lines, '\\layout', i)
|
||||
if i == -1:
|
||||
break
|
||||
|
||||
if lines[i - 1] == '' and string.find(lines[i-2],'\\protected_separator') == -1:
|
||||
prot_space = lines[i-2].find('\\protected_separator')
|
||||
if lines[i - 1] == '' and prot_space == -1:
|
||||
del lines[i-1]
|
||||
i = i + 1
|
||||
|
||||
|
||||
def formula_inset_space_eat(file):
|
||||
lines = file.body
|
||||
i=0
|
||||
def formula_inset_space_eat(document):
|
||||
" Remove space after inset formula."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Formula", i)
|
||||
if i == -1: break
|
||||
if i == -1:
|
||||
break
|
||||
|
||||
if len(lines[i]) > 22 and lines[i][21] == ' ':
|
||||
lines[i] = lines[i][:20] + lines[i][21:]
|
||||
i = i + 1
|
||||
|
||||
|
||||
# Update from tabular format 1 or 2 to 4
|
||||
def update_tabular(file):
|
||||
lines = file.body
|
||||
def update_tabular(document):
|
||||
" Update from tabular format 1 or 2 to 4."
|
||||
lines = document.body
|
||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||
i=0
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_re(lines, lyxtable_re, i)
|
||||
if i == -1:
|
||||
@ -58,10 +63,10 @@ def update_tabular(file):
|
||||
i = i + 1
|
||||
format = lines[i][8:]
|
||||
|
||||
lines[i]='multicol4'
|
||||
lines[i] = 'multicol4'
|
||||
i = i + 1
|
||||
rows = int(string.split(lines[i])[0])
|
||||
columns = int(string.split(lines[i])[1])
|
||||
rows = int(lines[i].split()[0])
|
||||
columns = int(lines[i].split()[1])
|
||||
|
||||
lines[i] = lines[i] + ' 0 0 -1 -1 -1 -1'
|
||||
i = i + 1
|
||||
@ -74,27 +79,34 @@ def update_tabular(file):
|
||||
lines[i] = lines[i] + ' '
|
||||
i = i + 1
|
||||
|
||||
while string.strip(lines[i]):
|
||||
while lines[i].strip():
|
||||
if not format:
|
||||
lines[i] = lines[i] + ' 1 1'
|
||||
lines[i] = lines[i] + ' 0 0 0'
|
||||
i = i + 1
|
||||
|
||||
lines[i] = string.strip(lines[i])
|
||||
lines[i] = lines[i].strip()
|
||||
|
||||
def final_dot(file):
|
||||
lines = file.body
|
||||
|
||||
def final_dot(document):
|
||||
" Merge lines if the dot is the final character."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
if lines[i][-1:] == '.' and lines[i+1][:1] != '\\' and lines[i+1][:1] != ' ' and len(lines[i]) + len(lines[i+1])<= 72 and lines[i+1] != '':
|
||||
|
||||
if lines[i][-1:] == '.' and lines[i+1][:1] != '\\' and \
|
||||
lines[i+1][:1] != ' ' and len(lines[i]) + len(lines[i+1])<= 72 \
|
||||
and lines[i+1] != '':
|
||||
|
||||
lines[i] = lines[i] + lines[i+1]
|
||||
del lines[i+1]
|
||||
else:
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_inset_label(file):
|
||||
lines = file.body
|
||||
def update_inset_label(document):
|
||||
" Update inset Label."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset Label', i)
|
||||
@ -104,26 +116,32 @@ def update_inset_label(file):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_latexdel(file):
|
||||
lines = file.body
|
||||
def update_latexdel(document):
|
||||
" Update inset LatexDel."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset LatexDel', i)
|
||||
if i == -1:
|
||||
return
|
||||
lines[i] = string.replace(lines[i],'\\begin_inset LatexDel', '\\begin_inset LatexCommand')
|
||||
lines[i] = lines[i].replace('\\begin_inset LatexDel',
|
||||
'\\begin_inset LatexCommand')
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_vfill(file):
|
||||
lines = file.body
|
||||
def update_vfill(document):
|
||||
" Update fill_top and fill_bottom."
|
||||
lines = document.body
|
||||
for i in range(len(lines)):
|
||||
lines[i] = string.replace(lines[i],'\\fill_top','\\added_space_top vfill')
|
||||
lines[i] = string.replace(lines[i],'\\fill_bottom','\\added_space_bottom vfill')
|
||||
lines[i] = lines[i].replace('\\fill_top',
|
||||
'\\added_space_top vfill')
|
||||
lines[i] = lines[i].replace('\\fill_bottom',
|
||||
'\\added_space_bottom vfill')
|
||||
|
||||
|
||||
def update_space_units(file):
|
||||
lines = file.body
|
||||
def update_space_units(document):
|
||||
" Update space units."
|
||||
lines = document.body
|
||||
added_space_bottom = re.compile(r'\\added_space_bottom ([^ ]*)')
|
||||
added_space_top = re.compile(r'\\added_space_top ([^ ]*)')
|
||||
for i in range(len(lines)):
|
||||
@ -131,17 +149,18 @@ def update_space_units(file):
|
||||
if result:
|
||||
old = '\\added_space_bottom ' + result.group(1)
|
||||
new = '\\added_space_bottom ' + str(float(result.group(1))) + 'cm'
|
||||
lines[i] = string.replace(lines[i], old, new)
|
||||
lines[i] = lines[i].replace(old, new)
|
||||
|
||||
result = added_space_top.search(lines[i])
|
||||
if result:
|
||||
old = '\\added_space_top ' + result.group(1)
|
||||
new = '\\added_space_top ' + str(float(result.group(1))) + 'cm'
|
||||
lines[i] = string.replace(lines[i], old, new)
|
||||
lines[i] = lines[i].replace(old, new)
|
||||
|
||||
|
||||
def remove_cursor(file):
|
||||
lines = file.body
|
||||
def remove_cursor(document):
|
||||
" Remove cursor, it is not saved on the file anymore."
|
||||
lines = document.body
|
||||
i = 0
|
||||
cursor_re = re.compile(r'.*(\\cursor \d*)')
|
||||
while 1:
|
||||
@ -149,15 +168,16 @@ def remove_cursor(file):
|
||||
if i == -1:
|
||||
break
|
||||
cursor = cursor_re.search(lines[i]).group(1)
|
||||
lines[i]= string.replace(lines[i], cursor, '')
|
||||
lines[i] = lines[i].replace(cursor, '')
|
||||
i = i + 1
|
||||
|
||||
|
||||
def remove_empty_insets(file):
|
||||
lines = file.body
|
||||
def remove_empty_insets(document):
|
||||
" Remove empty insets."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset ',i)
|
||||
i = find_token(lines, '\\begin_inset ', i)
|
||||
if i == -1:
|
||||
break
|
||||
if lines[i] == '\\begin_inset ' and lines[i+1] == '\\end_inset ':
|
||||
@ -166,8 +186,9 @@ def remove_empty_insets(file):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def remove_formula_latex(file):
|
||||
lines = file.body
|
||||
def remove_formula_latex(document):
|
||||
" Remove formula latex."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\latex formula_latex ', i)
|
||||
@ -181,15 +202,17 @@ def remove_formula_latex(file):
|
||||
del lines[i]
|
||||
|
||||
|
||||
def add_end_document(file):
|
||||
lines = file.body
|
||||
def add_end_document(document):
|
||||
" Add \\the_end to the end of the document."
|
||||
lines = document.body
|
||||
i = find_token(lines, '\\the_end', 0)
|
||||
if i == -1:
|
||||
lines.append('\\the_end')
|
||||
|
||||
|
||||
def header_update(file):
|
||||
lines = file.header
|
||||
def header_update(document):
|
||||
" Update document header."
|
||||
lines = document.header
|
||||
i = 0
|
||||
l = len(lines)
|
||||
while i < l:
|
||||
@ -197,12 +220,12 @@ def header_update(file):
|
||||
lines[i] = lines[i][:-1]
|
||||
|
||||
if check_token(lines[i], '\\epsfig'):
|
||||
lines[i] = string.replace(lines[i], '\\epsfig', '\\graphics')
|
||||
lines[i] = lines[i].replace('\\epsfig', '\\graphics')
|
||||
i = i + 1
|
||||
continue
|
||||
|
||||
if check_token(lines[i], '\\papersize'):
|
||||
size = string.split(lines[i])[1]
|
||||
size = lines[i].split()[1]
|
||||
new_size = size
|
||||
paperpackage = ""
|
||||
|
||||
@ -225,7 +248,7 @@ def header_update(file):
|
||||
|
||||
|
||||
if check_token(lines[i], '\\baselinestretch'):
|
||||
size = string.split(lines[i])[1]
|
||||
size = lines[i].split()[1]
|
||||
if size == '1.00':
|
||||
name = 'single'
|
||||
elif size == '1.50':
|
||||
@ -241,17 +264,18 @@ def header_update(file):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_latexaccents(file):
|
||||
body = file.body
|
||||
def update_latexaccents(document):
|
||||
" Update latex accent insets."
|
||||
body = document.body
|
||||
i = 1
|
||||
while 1:
|
||||
i = find_token(body, '\\i ', i)
|
||||
if i == -1:
|
||||
return
|
||||
|
||||
contents = string.strip(body[i][2:])
|
||||
contents = body[i][2:].strip()
|
||||
|
||||
if string.find(contents, '{') != -1 and string.find(contents, '}') != -1:
|
||||
if contents.find('{') != -1 and contents.find('}') != -1:
|
||||
i = i + 1
|
||||
continue
|
||||
|
||||
@ -269,26 +293,47 @@ def update_latexaccents(file):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def obsolete_latex_title(file):
|
||||
body = file.body
|
||||
def obsolete_latex_title(document):
|
||||
" Replace layout Latex_Title with Title."
|
||||
body = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(body, '\\layout', i)
|
||||
if i == -1:
|
||||
return
|
||||
|
||||
if string.find(string.lower(body[i]),'latex_title') != -1:
|
||||
if body[i].lower().find('latex_title') != -1:
|
||||
body[i] = '\\layout Title'
|
||||
|
||||
i = i + 1
|
||||
|
||||
|
||||
def remove_inset_latex(document):
|
||||
"Replace inset latex with layout LaTeX"
|
||||
body = document.body
|
||||
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(body, '\\begin_inset Latex', i)
|
||||
if i == -1:
|
||||
return
|
||||
|
||||
body[i] = body[i].replace('\\begin_inset Latex', '\\layout LaTeX')
|
||||
i = find_token(body, '\\end_inset', i)
|
||||
if i == -1:
|
||||
#this should not happen
|
||||
return
|
||||
del body[i]
|
||||
|
||||
|
||||
supported_versions = ["0.12.0","0.12.1","0.12"]
|
||||
convert = [[215, [header_update, add_end_document, remove_cursor,
|
||||
final_dot, update_inset_label, update_latexdel,
|
||||
update_space_units, space_before_layout,
|
||||
formula_inset_space_eat, update_tabular,
|
||||
update_vfill, remove_empty_insets,
|
||||
remove_formula_latex, update_latexaccents, obsolete_latex_title]]]
|
||||
remove_formula_latex, update_latexaccents,
|
||||
obsolete_latex_title, remove_inset_latex]]]
|
||||
revert = []
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -16,29 +16,33 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
""" Convert files to the file format generated by lyx 1.0"""
|
||||
|
||||
import re
|
||||
import string
|
||||
from parser_tools import find_token, find_re
|
||||
|
||||
def obsolete_latex_title(file):
|
||||
body = file.body
|
||||
def obsolete_latex_title(document):
|
||||
" Replace LatexTitle layout with Title. "
|
||||
|
||||
body = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(body, '\\layout', i)
|
||||
if i == -1:
|
||||
return
|
||||
|
||||
if string.find(string.lower(body[i]),'latex title') != -1:
|
||||
if body[i].lower().find('latex title') != -1:
|
||||
body[i] = '\\layout Title'
|
||||
|
||||
i = i + 1
|
||||
|
||||
|
||||
# Update from tabular format 3 to 4 if necessary
|
||||
def update_tabular(file):
|
||||
lines = file.body
|
||||
def update_tabular(document):
|
||||
" Update from tabular format 3 to 4 if necessary."
|
||||
|
||||
lines = document.body
|
||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||
i=0
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_re(lines, lyxtable_re, i)
|
||||
if i == -1:
|
||||
@ -49,10 +53,10 @@ def update_tabular(file):
|
||||
if format != '3':
|
||||
continue
|
||||
|
||||
lines[i]='multicol4'
|
||||
lines[i] = 'multicol4'
|
||||
i = i + 1
|
||||
rows = int(string.split(lines[i])[0])
|
||||
columns = int(string.split(lines[i])[1])
|
||||
rows = int(lines[i].split()[0])
|
||||
columns = int(lines[i].split()[1])
|
||||
|
||||
lines[i] = lines[i] + ' 0 0 -1 -1 -1 -1'
|
||||
i = i + 1
|
||||
@ -65,13 +69,14 @@ def update_tabular(file):
|
||||
lines[i] = lines[i] + ' '
|
||||
i = i + 1
|
||||
|
||||
while string.strip(lines[i]):
|
||||
while lines[i].strip():
|
||||
lines[i] = lines[i] + ' 0 0 0'
|
||||
i = i + 1
|
||||
|
||||
lines[i] = string.strip(lines[i])
|
||||
lines[i] = lines[i].strip()
|
||||
|
||||
|
||||
supported_versions = ["1.0.%d" % i for i in range(5)] + ["1.0"]
|
||||
convert = [[215, [obsolete_latex_title, update_tabular]]]
|
||||
revert = []
|
||||
|
@ -1,6 +1,6 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -16,6 +16,9 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
""" Convert files to the file format generated by lyx 1.1 series, until 1.1.4"""
|
||||
|
||||
supported_versions = ["1.1.%d" % i for i in range(5)] + ["1.1"]
|
||||
convert = [[215, []]]
|
||||
revert = []
|
||||
|
@ -1,6 +1,6 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
# This document is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -16,15 +16,29 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
import re
|
||||
import string
|
||||
from parser_tools import find_token, find_token_backwards, find_re, get_layout
|
||||
""" Convert files to the file format generated by lyx 1.1.5"""
|
||||
|
||||
import re
|
||||
from parser_tools import find_token, find_token_backwards, find_re
|
||||
|
||||
####################################################################
|
||||
# Private helper functions
|
||||
|
||||
def get_layout(line, default_layout):
|
||||
" Get the line layout, beware of the empty layout."
|
||||
tokens = line.split()
|
||||
if len(tokens) > 1:
|
||||
return tokens[1]
|
||||
return default_layout
|
||||
|
||||
|
||||
####################################################################
|
||||
|
||||
math_env = ["\\[","\\begin{eqnarray*}","\\begin{eqnarray}","\\begin{equation}"]
|
||||
|
||||
def replace_protected_separator(file):
|
||||
lines = file.body
|
||||
def replace_protected_separator(document):
|
||||
" Replace protected separator. "
|
||||
lines = document.body
|
||||
i=0
|
||||
while 1:
|
||||
i = find_token(lines, "\\protected_separator", i)
|
||||
@ -32,7 +46,7 @@ def replace_protected_separator(file):
|
||||
break
|
||||
j = find_token_backwards(lines, "\\layout", i)
|
||||
#if j == -1: print error
|
||||
layout = get_layout(lines[j], file.default_layout)
|
||||
layout = get_layout(lines[j], document.default_layout)
|
||||
|
||||
if layout == "LyX-Code":
|
||||
result = ""
|
||||
@ -47,8 +61,9 @@ def replace_protected_separator(file):
|
||||
del lines[i]
|
||||
|
||||
|
||||
def merge_formula_inset(file):
|
||||
lines = file.body
|
||||
def merge_formula_inset(document):
|
||||
" Merge formula insets. "
|
||||
lines = document.body
|
||||
i=0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Formula", i)
|
||||
@ -59,9 +74,9 @@ def merge_formula_inset(file):
|
||||
i = i + 1
|
||||
|
||||
|
||||
# Update from tabular format 4 to 5 if necessary
|
||||
def update_tabular(file):
|
||||
lines = file.body
|
||||
def update_tabular(document):
|
||||
" Update from tabular format 4 to 5 if necessary. "
|
||||
lines = document.body
|
||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||
i=0
|
||||
while 1:
|
||||
@ -75,16 +90,16 @@ def update_tabular(file):
|
||||
|
||||
lines[i]='multicol5'
|
||||
i = i + 1
|
||||
rows = int(string.split(lines[i])[0])
|
||||
columns = int(string.split(lines[i])[1])
|
||||
rows = int(lines[i].split()[0])
|
||||
columns = int(lines[i].split()[1])
|
||||
|
||||
i = i + rows + 1
|
||||
for j in range(columns):
|
||||
col_info = string.split(lines[i])
|
||||
col_info = lines[i].split()
|
||||
if len(col_info) == 3:
|
||||
lines[i] = lines[i] + '"" ""'
|
||||
else:
|
||||
lines[i] = string.join(col_info[:3]) + ' "%s" ""' % col_info[3]
|
||||
lines[i] = " ".join(col_info[:3]) + ' "%s" ""' % col_info[3]
|
||||
i = i + 1
|
||||
|
||||
while lines[i]:
|
||||
@ -92,26 +107,30 @@ def update_tabular(file):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_toc(file):
|
||||
lines = file.body
|
||||
def update_toc(document):
|
||||
" Update table of contents. "
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset LatexCommand \\tableofcontents', i)
|
||||
i = find_token(lines,
|
||||
'\\begin_inset LatexCommand \\tableofcontents', i)
|
||||
if i == -1:
|
||||
break
|
||||
lines[i] = lines[i] + '{}'
|
||||
i = i + 1
|
||||
|
||||
|
||||
def remove_cursor(file):
|
||||
lines = file.body
|
||||
def remove_cursor(document):
|
||||
" Remove cursor. "
|
||||
lines = document.body
|
||||
i = find_token(lines, '\\cursor', 0)
|
||||
if i != -1:
|
||||
del lines[i]
|
||||
|
||||
|
||||
def remove_vcid(file):
|
||||
lines = file.header
|
||||
def remove_vcid(document):
|
||||
" Remove \\lyxvcid and \\lyxrcsid. "
|
||||
lines = document.header
|
||||
i = find_token(lines, '\\lyxvcid', 0)
|
||||
if i != -1:
|
||||
del lines[i]
|
||||
@ -120,16 +139,18 @@ def remove_vcid(file):
|
||||
del lines[i]
|
||||
|
||||
|
||||
def first_layout(file):
|
||||
lines = file.body
|
||||
def first_layout(document):
|
||||
" Fix first layout, if empty use the default layout."
|
||||
lines = document.body
|
||||
while (lines[0] == ""):
|
||||
del lines[0]
|
||||
if lines[0][:7] != "\\layout":
|
||||
lines[:0] = ['\\layout %s' % file.default_layout, '']
|
||||
lines[:0] = ['\\layout %s' % document.default_layout, '']
|
||||
|
||||
|
||||
def remove_space_in_units(file):
|
||||
lines = file.header
|
||||
def remove_space_in_units(document):
|
||||
" Remove space in units. "
|
||||
lines = document.header
|
||||
margins = ["\\topmargin","\\rightmargin",
|
||||
"\\leftmargin","\\bottommargin"]
|
||||
|
||||
@ -148,8 +169,9 @@ def remove_space_in_units(file):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def latexdel_getargs(file, i):
|
||||
lines = file.body
|
||||
def latexdel_getargs(document, i):
|
||||
" Get arguments from latexdel insets. "
|
||||
lines = document.body
|
||||
|
||||
# play safe, clean empty lines
|
||||
while 1:
|
||||
@ -162,10 +184,10 @@ def latexdel_getargs(file, i):
|
||||
if i == j:
|
||||
del lines[i]
|
||||
else:
|
||||
file.warning("Unexpected end of inset.")
|
||||
document.warning("Unexpected end of inset.")
|
||||
j = find_token(lines, '\\begin_inset LatexDel }{', i)
|
||||
|
||||
ref = string.join(lines[i:j])
|
||||
ref = " ".join(lines[i:j])
|
||||
del lines[i:j + 1]
|
||||
|
||||
# play safe, clean empty lines
|
||||
@ -178,57 +200,64 @@ def latexdel_getargs(file, i):
|
||||
if i == j:
|
||||
del lines[i]
|
||||
else:
|
||||
file.warning("Unexpected end of inset.")
|
||||
document.warning("Unexpected end of inset.")
|
||||
j = find_token(lines, '\\begin_inset LatexDel }', i)
|
||||
label = string.join(lines[i:j])
|
||||
label = " ".join(lines[i:j])
|
||||
del lines[i:j + 1]
|
||||
|
||||
return ref, label
|
||||
|
||||
|
||||
def update_ref(file):
|
||||
lines = file.body
|
||||
def update_ref(document):
|
||||
" Update reference inset. "
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, '\\begin_inset LatexCommand', i)
|
||||
if i == -1:
|
||||
return
|
||||
|
||||
if string.split(lines[i])[-1] == "\\ref{":
|
||||
if lines[i].split()[-1] == "\\ref{":
|
||||
i = i + 1
|
||||
ref, label = latexdel_getargs(file, i)
|
||||
ref, label = latexdel_getargs(document, i)
|
||||
lines[i - 1] = "%s[%s]{%s}" % (lines[i - 1][:-1], ref, label)
|
||||
|
||||
i = i + 1
|
||||
|
||||
|
||||
def update_latexdel(file):
|
||||
lines = file.body
|
||||
def update_latexdel(document):
|
||||
" Remove latexdel insets. "
|
||||
lines = document.body
|
||||
i = 0
|
||||
latexdel_re = re.compile(r".*\\begin_inset LatexDel")
|
||||
while 1:
|
||||
i = find_re(lines, latexdel_re, i)
|
||||
if i == -1:
|
||||
return
|
||||
lines[i] = string.replace(lines[i],'\\begin_inset LatexDel', '\\begin_inset LatexCommand')
|
||||
lines[i] = lines[i].replace('\\begin_inset LatexDel',
|
||||
'\\begin_inset LatexCommand')
|
||||
|
||||
j = string.find(lines[i],'\\begin_inset')
|
||||
j = lines[i].find('\\begin_inset')
|
||||
lines.insert(i+1, lines[i][j:])
|
||||
lines[i] = string.strip(lines[i][:j])
|
||||
lines[i] = lines[i][:j].strip()
|
||||
i = i + 1
|
||||
|
||||
if string.split(lines[i])[-1] in ("\\url{", "\\htmlurl{"):
|
||||
if lines[i].split()[-1] in ("\\url{", "\\htmlurl{"):
|
||||
i = i + 1
|
||||
|
||||
ref, label = latexdel_getargs(file, i)
|
||||
ref, label = latexdel_getargs(document, i)
|
||||
lines[i -1] = "%s[%s]{%s}" % (lines[i-1][:-1], label, ref)
|
||||
|
||||
i = i + 1
|
||||
|
||||
|
||||
convert = [[216, [first_layout, remove_vcid, remove_cursor, update_toc,
|
||||
replace_protected_separator, merge_formula_inset,
|
||||
update_tabular, remove_space_in_units, update_ref, update_latexdel]]]
|
||||
supported_versions = ["1.1.5","1.1.5fix1","1.1.5fix2","1.1"]
|
||||
convert = [[216, [first_layout, remove_vcid, remove_cursor,
|
||||
update_toc, replace_protected_separator,
|
||||
merge_formula_inset, update_tabular,
|
||||
remove_space_in_units, update_ref,
|
||||
update_latexdel]]]
|
||||
|
||||
revert = []
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -1,6 +1,6 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -16,14 +16,15 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
""" Convert files to the file format generated by lyx 1.1.6, until fix2"""
|
||||
|
||||
import re
|
||||
import string
|
||||
from parser_tools import find_re, find_tokens, find_token, check_token
|
||||
|
||||
|
||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||
def update_tabular(file):
|
||||
lines = file.body
|
||||
def update_tabular(document):
|
||||
" Update tabular to version 1 (xml like syntax). "
|
||||
lines = document.body
|
||||
i=0
|
||||
while 1:
|
||||
i = find_re(lines, lyxtable_re, i)
|
||||
@ -41,7 +42,7 @@ def update_tabular(file):
|
||||
i = i + 1
|
||||
lines[i] = "\\begin_inset Tabular"
|
||||
i = i + 1
|
||||
head = string.split(lines[i])
|
||||
head = lines[i].split()
|
||||
rows = int(head[0])
|
||||
columns = int(head[1])
|
||||
|
||||
@ -54,8 +55,8 @@ def update_tabular(file):
|
||||
row_info = []
|
||||
cont_row = []
|
||||
for j in range(rows):
|
||||
row_info.append(string.split(lines[i]))
|
||||
if string.split(lines[i])[2] == '1':
|
||||
row_info.append(lines[i].split())
|
||||
if lines[i].split()[2] == '1':
|
||||
cont_row.append(j)
|
||||
del lines[i]
|
||||
|
||||
@ -100,13 +101,13 @@ def update_tabular(file):
|
||||
continue
|
||||
|
||||
if l == ncells -1:
|
||||
# the end variable refers to cell end, not to file end.
|
||||
# the end variable refers to cell end, not to document end.
|
||||
end = find_tokens(lines, ['\\layout','\\the_end','\\end_deeper','\\end_float'], i)
|
||||
else:
|
||||
end = find_token(lines, '\\newline', i)
|
||||
|
||||
if end == -1:
|
||||
file.error("Malformed LyX file.")
|
||||
document.error("Malformed LyX file.")
|
||||
|
||||
end = end - i
|
||||
while end > 0:
|
||||
@ -114,7 +115,7 @@ def update_tabular(file):
|
||||
del lines[i]
|
||||
end = end -1
|
||||
|
||||
if string.find(lines[i],'\\newline') != -1:
|
||||
if lines[i].find('\\newline') != -1:
|
||||
del lines[i]
|
||||
l = l + 1
|
||||
|
||||
@ -146,7 +147,7 @@ def update_tabular(file):
|
||||
tmp.append('<Cell multicolumn="%s" alignment="%s" valignment="0" topline="%s" bottomline="%s" leftline="%d" rightline="%d" rotate="%s" usebox="%s" width=%s special=%s>' % (cell_info[m][0],cell_info[m][1],cell_info[m][2],cell_info[m][3],leftline,rightline,cell_info[m][5],cell_info[m][6],cell_info[m][7],cell_info[m][8]))
|
||||
tmp.append('\\begin_inset Text')
|
||||
tmp.append('')
|
||||
tmp.append('\\layout %s' % file.default_layout)
|
||||
tmp.append('\\layout %s' % document.default_layout)
|
||||
tmp.append('')
|
||||
|
||||
if cell_info[m][0] != '2':
|
||||
@ -179,8 +180,8 @@ def update_tabular(file):
|
||||
|
||||
|
||||
prop_exp = re.compile(r"\\(\S*)\s*(\S*)")
|
||||
|
||||
def set_paragraph_properties(lines, prop_dict):
|
||||
" Set paragraph properties."
|
||||
# we need to preserve the order of options
|
||||
properties = ["family","series","shape","size",
|
||||
"emph","bar","noun","latex","color"]
|
||||
@ -263,19 +264,22 @@ def set_paragraph_properties(lines, prop_dict):
|
||||
return result[:]
|
||||
|
||||
|
||||
def update_language(file):
|
||||
header = file.header
|
||||
def update_language(document):
|
||||
""" Update document language, if language is default convert it to
|
||||
english."""
|
||||
header = document.header
|
||||
i = find_token(header, "\\language", 0)
|
||||
if i == -1:
|
||||
# no language, should emit a warning
|
||||
header.append('\\language english')
|
||||
return
|
||||
# This is the lyx behaviour: defaults to english
|
||||
if string.split(header[i])[1] == 'default':
|
||||
if header[i].split()[1] == 'default':
|
||||
header[i] = '\\language english'
|
||||
return
|
||||
|
||||
|
||||
supported_versions = ["1.1.6","1.1.6fix1","1.1.6fix2","1.1"]
|
||||
convert = [[217, [update_tabular, update_language]]]
|
||||
revert = []
|
||||
|
@ -1,6 +1,6 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -16,11 +16,13 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
""" Convert files to the file format generated by lyx 1.1.6, fix3 and fix4"""
|
||||
|
||||
import re
|
||||
import string
|
||||
from parser_tools import find_token, find_re
|
||||
|
||||
def bool_table(item):
|
||||
" Convert 0, 1 to false, true."
|
||||
if item == "0":
|
||||
return "false"
|
||||
# should emit a warning if item != "1"
|
||||
@ -32,9 +34,10 @@ align_table = {"0": "top", "2": "left", "4": "right", "8": "center"}
|
||||
use_table = {"0": "none", "1": "parbox"}
|
||||
table_meta_re = re.compile(r'<LyXTabular version="?1"? rows="?(\d*)"? columns="?(\d*)"?>')
|
||||
|
||||
def update_tabular(file):
|
||||
def update_tabular(document):
|
||||
" Update tabular format to version 2 (xml like syntax)."
|
||||
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
||||
lines = file.body
|
||||
lines = document.body
|
||||
i=0
|
||||
while 1:
|
||||
i = find_re(lines, regexp, i)
|
||||
@ -51,7 +54,7 @@ def update_tabular(file):
|
||||
|
||||
j = find_token(lines, '</LyXTabular>', i) + 1
|
||||
if j == 0:
|
||||
file.warning( "Error: Bad lyx format i=%d j=%d" % (i,j))
|
||||
document.warning( "Error: Bad lyx format i=%d j=%d" % (i,j))
|
||||
break
|
||||
|
||||
new_table = table_update(lines[i:j])
|
||||
@ -65,7 +68,8 @@ features_re = re.compile(r'<features rotate="?(\d)"? islongtable="?(\d)"? endhea
|
||||
row_re = re.compile(r'<row topline="?(\d)"? bottomline="?(\d)"? newpage="?(\d)"?>')
|
||||
|
||||
def table_update(lines):
|
||||
lines[1] = string.replace(lines[1], '<Features', '<features')
|
||||
" Update table's internal content to format 2."
|
||||
lines[1] = lines[1].replace('<Features', '<features')
|
||||
res = features_re.match( lines[1] )
|
||||
if res:
|
||||
val = res.groups()
|
||||
@ -76,14 +80,14 @@ def table_update(lines):
|
||||
i = 2
|
||||
col_info = []
|
||||
while i < len(lines):
|
||||
lines[i] = string.replace(lines[i], '<Cell', '<cell')
|
||||
lines[i] = string.replace(lines[i], '</Cell', '</cell')
|
||||
lines[i] = string.replace(lines[i], '<Row', '<row')
|
||||
lines[i] = string.replace(lines[i], '</Row', '</row')
|
||||
lines[i] = string.replace(lines[i], '<Column', '<column')
|
||||
lines[i] = string.replace(lines[i], '</Column', '</column')
|
||||
lines[i] = string.replace(lines[i], '</LyXTabular', '</lyxtabular')
|
||||
k = string.find (lines[i], '<column ')
|
||||
lines[i] = lines[i].replace('<Cell', '<cell')
|
||||
lines[i] = lines[i].replace('</Cell', '</cell')
|
||||
lines[i] = lines[i].replace('<Row', '<row')
|
||||
lines[i] = lines[i].replace('</Row', '</row')
|
||||
lines[i] = lines[i].replace('<Column', '<column')
|
||||
lines[i] = lines[i].replace('</Column', '</column')
|
||||
lines[i] = lines[i].replace('</LyXTabular', '</lyxtabular')
|
||||
k = lines[i].find ('<column ')
|
||||
if k != -1:
|
||||
col_info.append(lines[i])
|
||||
del lines[i]
|
||||
@ -116,6 +120,7 @@ def table_update(lines):
|
||||
return lines[:2] + col_info + lines[2:]
|
||||
|
||||
|
||||
supported_versions = ["1.1.6fix3","1.1.6fix4","1.1"]
|
||||
convert = [[218, [update_tabular]]]
|
||||
revert = []
|
||||
|
@ -1,7 +1,7 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -17,13 +17,86 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
import string
|
||||
""" Convert files to the file format generated by lyx 1.2"""
|
||||
|
||||
import re
|
||||
|
||||
from parser_tools import find_token, find_token_backwards, get_next_paragraph,\
|
||||
find_tokens, find_end_of_inset, find_re, \
|
||||
is_nonempty_line, get_paragraph, find_nonempty_line, \
|
||||
get_value, get_tabular_lines, check_token, get_layout
|
||||
from parser_tools import find_token, find_token_backwards, \
|
||||
find_tokens, find_tokens_backwards, \
|
||||
find_beginning_of, find_end_of, find_re, \
|
||||
is_nonempty_line, find_nonempty_line, \
|
||||
get_value, check_token
|
||||
|
||||
####################################################################
|
||||
# Private helper functions
|
||||
|
||||
def get_layout(line, default_layout):
|
||||
" Get layout, if empty return the default layout."
|
||||
tokens = line.split()
|
||||
if len(tokens) > 1:
|
||||
return tokens[1]
|
||||
return default_layout
|
||||
|
||||
|
||||
def get_paragraph(lines, i, format):
|
||||
" Finds the paragraph that contains line i."
|
||||
begin_layout = "\\layout"
|
||||
|
||||
while i != -1:
|
||||
i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
|
||||
if i == -1: return -1
|
||||
if check_token(lines[i], begin_layout):
|
||||
return i
|
||||
i = find_beginning_of_inset(lines, i)
|
||||
return -1
|
||||
|
||||
|
||||
def get_next_paragraph(lines, i, format):
|
||||
" Finds the paragraph after the paragraph that contains line i."
|
||||
tokens = ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"]
|
||||
|
||||
while i != -1:
|
||||
i = find_tokens(lines, tokens, i)
|
||||
if not check_token(lines[i], "\\begin_inset"):
|
||||
return i
|
||||
i = find_end_of_inset(lines, i)
|
||||
return -1
|
||||
|
||||
|
||||
def find_beginning_of_inset(lines, i):
|
||||
" Find beginning of inset, where lines[i] is included."
|
||||
return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||
|
||||
|
||||
def find_end_of_inset(lines, i):
|
||||
" Finds the matching \end_inset"
|
||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||
|
||||
|
||||
def find_end_of_tabular(lines, i):
|
||||
" Finds the matching end of tabular."
|
||||
return find_end_of(lines, i, "<lyxtabular", "</lyxtabular")
|
||||
|
||||
|
||||
def get_tabular_lines(lines, i):
|
||||
" Returns a lists of tabular lines."
|
||||
result = []
|
||||
i = i+1
|
||||
j = find_end_of_tabular(lines, i)
|
||||
if j == -1:
|
||||
return []
|
||||
|
||||
while i <= j:
|
||||
if check_token(lines[i], "\\begin_inset"):
|
||||
i = find_end_of_inset(lines, i)+1
|
||||
else:
|
||||
result.append(i)
|
||||
i = i+1
|
||||
return result
|
||||
|
||||
# End of helper functions
|
||||
####################################################################
|
||||
|
||||
|
||||
floats = {
|
||||
"footnote": ["\\begin_inset Foot",
|
||||
@ -59,6 +132,7 @@ pextra_rexp = re.compile(r"\\pextra_type\s+(\S+)"+\
|
||||
|
||||
|
||||
def get_width(mo):
|
||||
" Get width from a regular expression. "
|
||||
if mo.group(10):
|
||||
if mo.group(9) == "\\pextra_widthp":
|
||||
return mo.group(10)+"col%"
|
||||
@ -68,11 +142,9 @@ def get_width(mo):
|
||||
return "100col%"
|
||||
|
||||
|
||||
#
|
||||
# Change \begin_float .. \end_float into \begin_inset Float .. \end_inset
|
||||
#
|
||||
def remove_oldfloat(file):
|
||||
lines = file.body
|
||||
def remove_oldfloat(document):
|
||||
" Change \begin_float .. \end_float into \begin_inset Float .. \end_inset"
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_float", i)
|
||||
@ -81,9 +153,9 @@ def remove_oldfloat(file):
|
||||
# There are no nested floats, so finding the end of the float is simple
|
||||
j = find_token(lines, "\\end_float", i+1)
|
||||
|
||||
floattype = string.split(lines[i])[1]
|
||||
floattype = lines[i].split()[1]
|
||||
if not floats.has_key(floattype):
|
||||
file.warning("Error! Unknown float type " + floattype)
|
||||
document.warning("Error! Unknown float type " + floattype)
|
||||
floattype = "fig"
|
||||
|
||||
# skip \end_deeper tokens
|
||||
@ -91,7 +163,7 @@ def remove_oldfloat(file):
|
||||
while check_token(lines[i2], "\\end_deeper"):
|
||||
i2 = i2+1
|
||||
if i2 > i+1:
|
||||
j2 = get_next_paragraph(lines, j + 1, file.format + 1)
|
||||
j2 = get_next_paragraph(lines, j + 1, document.format + 1)
|
||||
lines[j2:j2] = ["\\end_deeper "]*(i2-(i+1))
|
||||
|
||||
new = floats[floattype]+[""]
|
||||
@ -116,7 +188,7 @@ def remove_oldfloat(file):
|
||||
# as extra '\foo default' commands are ignored.
|
||||
# In fact, it might be safer to output '\foo default' for all
|
||||
# font attributes.
|
||||
k = get_paragraph(lines, i, file.format + 1)
|
||||
k = get_paragraph(lines, i, document.format + 1)
|
||||
flag = 0
|
||||
for token in font_tokens:
|
||||
if find_token(lines, token, k, i) != -1:
|
||||
@ -126,7 +198,7 @@ def remove_oldfloat(file):
|
||||
flag = 1
|
||||
new.append("")
|
||||
if token == "\\lang":
|
||||
new.append(token+" "+ file.language)
|
||||
new.append(token+" "+ document.language)
|
||||
else:
|
||||
new.append(token+" default ")
|
||||
|
||||
@ -138,8 +210,9 @@ pextra_type2_rexp = re.compile(r".*\\pextra_type\s+[12]")
|
||||
pextra_type2_rexp2 = re.compile(r".*(\\layout|\\pextra_type\s+2)")
|
||||
pextra_widthp = re.compile(r"\\pextra_widthp")
|
||||
|
||||
def remove_pextra(file):
|
||||
lines = file.body
|
||||
def remove_pextra(document):
|
||||
" Remove pextra token."
|
||||
lines = document.body
|
||||
i = 0
|
||||
flag = 0
|
||||
while 1:
|
||||
@ -179,10 +252,10 @@ def remove_pextra(file):
|
||||
if hfill:
|
||||
start = ["","\hfill",""]+start
|
||||
else:
|
||||
start = ['\\layout %s' % file.default_layout,''] + start
|
||||
start = ['\\layout %s' % document.default_layout,''] + start
|
||||
|
||||
j0 = find_token_backwards(lines,"\\layout", i-1)
|
||||
j = get_next_paragraph(lines, i, file.format + 1)
|
||||
j = get_next_paragraph(lines, i, document.format + 1)
|
||||
|
||||
count = 0
|
||||
while 1:
|
||||
@ -210,6 +283,7 @@ def remove_pextra(file):
|
||||
|
||||
|
||||
def is_empty(lines):
|
||||
" Are all the lines empty?"
|
||||
return filter(is_nonempty_line, lines) == []
|
||||
|
||||
|
||||
@ -218,13 +292,14 @@ ert_rexp = re.compile(r"\\begin_inset|\\hfill|.*\\SpecialChar")
|
||||
spchar_rexp = re.compile(r"(.*)(\\SpecialChar.*)")
|
||||
|
||||
|
||||
def remove_oldert(file):
|
||||
def remove_oldert(document):
|
||||
" Remove old ERT inset."
|
||||
ert_begin = ["\\begin_inset ERT",
|
||||
"status Collapsed",
|
||||
"",
|
||||
'\\layout %s' % file.default_layout,
|
||||
'\\layout %s' % document.default_layout,
|
||||
""]
|
||||
lines = file.body
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
|
||||
@ -249,7 +324,7 @@ def remove_oldert(file):
|
||||
new = []
|
||||
new2 = []
|
||||
if check_token(lines[i], "\\layout LaTeX"):
|
||||
new = ['\layout %s' % file.default_layout, "", ""]
|
||||
new = ['\layout %s' % document.default_layout, "", ""]
|
||||
|
||||
k = i+1
|
||||
while 1:
|
||||
@ -331,9 +406,9 @@ def remove_oldert(file):
|
||||
del lines[i]
|
||||
|
||||
|
||||
# ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future.
|
||||
def remove_oldertinset(file):
|
||||
lines = file.body
|
||||
def remove_oldertinset(document):
|
||||
" ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset ERT", i)
|
||||
@ -341,7 +416,7 @@ def remove_oldertinset(file):
|
||||
break
|
||||
j = find_end_of_inset(lines, i)
|
||||
k = find_token(lines, "\\layout", i+1)
|
||||
l = get_paragraph(lines, i, file.format + 1)
|
||||
l = get_paragraph(lines, i, document.format + 1)
|
||||
if lines[k] == lines[l]: # same layout
|
||||
k = k+1
|
||||
new = lines[k:j]
|
||||
@ -349,11 +424,12 @@ def remove_oldertinset(file):
|
||||
i = i+1
|
||||
|
||||
|
||||
def is_ert_paragraph(file, i):
|
||||
lines = file.body
|
||||
def is_ert_paragraph(document, i):
|
||||
" Is this a ert paragraph? "
|
||||
lines = document.body
|
||||
if not check_token(lines[i], "\\layout"):
|
||||
return 0
|
||||
if not file.is_default_layout(get_layout(lines[i], file.default_layout)):
|
||||
if not document.is_default_layout(get_layout(lines[i], document.default_layout)):
|
||||
return 0
|
||||
|
||||
i = find_nonempty_line(lines, i+1)
|
||||
@ -365,17 +441,18 @@ def is_ert_paragraph(file, i):
|
||||
return check_token(lines[k], "\\layout")
|
||||
|
||||
|
||||
def combine_ert(file):
|
||||
lines = file.body
|
||||
def combine_ert(document):
|
||||
" Combine ERT paragraphs."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset ERT", i)
|
||||
if i == -1:
|
||||
break
|
||||
j = get_paragraph(lines, i, file.format + 1)
|
||||
j = get_paragraph(lines, i, document.format + 1)
|
||||
count = 0
|
||||
text = []
|
||||
while is_ert_paragraph(file, j):
|
||||
while is_ert_paragraph(document, j):
|
||||
|
||||
count = count+1
|
||||
i2 = find_token(lines, "\\layout", j+1)
|
||||
@ -395,20 +472,23 @@ def combine_ert(file):
|
||||
oldunits = ["pt", "cm", "in", "text%", "col%"]
|
||||
|
||||
def get_length(lines, name, start, end):
|
||||
" Get lenght."
|
||||
i = find_token(lines, name, start, end)
|
||||
if i == -1:
|
||||
return ""
|
||||
x = string.split(lines[i])
|
||||
x = lines[i].split()
|
||||
return x[2]+oldunits[int(x[1])]
|
||||
|
||||
|
||||
def write_attribute(x, token, value):
|
||||
" Write attribute."
|
||||
if value != "":
|
||||
x.append("\t"+token+" "+value)
|
||||
|
||||
|
||||
def remove_figinset(file):
|
||||
lines = file.body
|
||||
def remove_figinset(document):
|
||||
" Remove figinset."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Figure", i)
|
||||
@ -416,9 +496,9 @@ def remove_figinset(file):
|
||||
break
|
||||
j = find_end_of_inset(lines, i)
|
||||
|
||||
if ( len(string.split(lines[i])) > 2 ):
|
||||
lyxwidth = string.split(lines[i])[3]+"pt"
|
||||
lyxheight = string.split(lines[i])[4]+"pt"
|
||||
if ( len(lines[i].split()) > 2 ):
|
||||
lyxwidth = lines[i].split()[3]+"pt"
|
||||
lyxheight = lines[i].split()[4]+"pt"
|
||||
else:
|
||||
lyxwidth = ""
|
||||
lyxheight = ""
|
||||
@ -476,15 +556,13 @@ def remove_figinset(file):
|
||||
lines[i:j+1] = new
|
||||
|
||||
|
||||
##
|
||||
# Convert tabular format 2 to 3
|
||||
#
|
||||
attr_re = re.compile(r' \w*="(false|0|)"')
|
||||
line_re = re.compile(r'<(features|column|row|cell)')
|
||||
|
||||
def update_tabular(file):
|
||||
def update_tabular(document):
|
||||
" Convert tabular format 2 to 3."
|
||||
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
||||
lines = file.body
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_re(lines, regexp, i)
|
||||
@ -493,9 +571,9 @@ def update_tabular(file):
|
||||
|
||||
for k in get_tabular_lines(lines, i):
|
||||
if check_token(lines[k], "<lyxtabular"):
|
||||
lines[k] = string.replace(lines[k], 'version="2"', 'version="3"')
|
||||
lines[k] = lines[k].replace('version="2"', 'version="3"')
|
||||
elif check_token(lines[k], "<column"):
|
||||
lines[k] = string.replace(lines[k], 'width=""', 'width="0pt"')
|
||||
lines[k] = lines[k].replace('width=""', 'width="0pt"')
|
||||
|
||||
if line_re.match(lines[k]):
|
||||
lines[k] = re.sub(attr_re, "", lines[k])
|
||||
@ -520,8 +598,8 @@ def update_tabular(file):
|
||||
false = 0
|
||||
true = 1
|
||||
|
||||
# simple data structure to deal with long table info
|
||||
class row:
|
||||
" Simple data structure to deal with long table info."
|
||||
def __init__(self):
|
||||
self.endhead = false # header row
|
||||
self.endfirsthead = false # first header row
|
||||
@ -530,6 +608,7 @@ class row:
|
||||
|
||||
|
||||
def haveLTFoot(row_info):
|
||||
" Does row has LTFoot?"
|
||||
for row_ in row_info:
|
||||
if row_.endfoot:
|
||||
return true
|
||||
@ -537,6 +616,7 @@ def haveLTFoot(row_info):
|
||||
|
||||
|
||||
def setHeaderFooterRows(hr, fhr, fr, lfr, rows_, row_info):
|
||||
" Set Header/Footer rows."
|
||||
endfirsthead_empty = false
|
||||
endlastfoot_empty = false
|
||||
# set header info
|
||||
@ -603,7 +683,8 @@ def setHeaderFooterRows(hr, fhr, fr, lfr, rows_, row_info):
|
||||
|
||||
|
||||
def insert_attribute(lines, i, attribute):
|
||||
last = string.find(lines[i],'>')
|
||||
" Insert attribute in lines[i]."
|
||||
last = lines[i].find('>')
|
||||
lines[i] = lines[i][:last] + ' ' + attribute + lines[i][last:]
|
||||
|
||||
|
||||
@ -611,9 +692,10 @@ rows_re = re.compile(r'rows="(\d*)"')
|
||||
longtable_re = re.compile(r'islongtable="(\w)"')
|
||||
ltvalues_re = re.compile(r'endhead="(-?\d*)" endfirsthead="(-?\d*)" endfoot="(-?\d*)" endlastfoot="(-?\d*)"')
|
||||
lt_features_re = re.compile(r'(endhead="-?\d*" endfirsthead="-?\d*" endfoot="-?\d*" endlastfoot="-?\d*")')
|
||||
def update_longtables(file):
|
||||
def update_longtables(document):
|
||||
" Update longtables to new format."
|
||||
regexp = re.compile(r'^\\begin_inset\s+Tabular')
|
||||
body = file.body
|
||||
body = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_re(body, regexp, i)
|
||||
@ -643,7 +725,7 @@ def update_longtables(file):
|
||||
# remove longtable elements from features
|
||||
features = lt_features_re.search(body[i])
|
||||
if features:
|
||||
body[i] = string.replace(body[i], features.group(1), "")
|
||||
body[i] = body[i].replace(features.group(1), "")
|
||||
continue
|
||||
|
||||
row_info = row() * rows
|
||||
@ -680,9 +762,9 @@ def update_longtables(file):
|
||||
i = i + 1
|
||||
|
||||
|
||||
# Figure insert are hidden feature of lyx 1.1.6. This might be removed in the future.
|
||||
def fix_oldfloatinset(file):
|
||||
lines = file.body
|
||||
def fix_oldfloatinset(document):
|
||||
" Figure insert are hidden feature of lyx 1.1.6. This might be removed in the future."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Float ", i)
|
||||
@ -694,8 +776,9 @@ def fix_oldfloatinset(file):
|
||||
i = i+1
|
||||
|
||||
|
||||
def change_listof(file):
|
||||
lines = file.body
|
||||
def change_listof(document):
|
||||
" Change listof insets."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset LatexCommand \\listof", i)
|
||||
@ -706,14 +789,15 @@ def change_listof(file):
|
||||
i = i+1
|
||||
|
||||
|
||||
def change_infoinset(file):
|
||||
lines = file.body
|
||||
def change_infoinset(document):
|
||||
" Change info inset."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Info", i)
|
||||
if i == -1:
|
||||
break
|
||||
txt = string.lstrip(lines[i][18:])
|
||||
txt = lines[i][18:].lstrip()
|
||||
new = ["\\begin_inset Note", "collapsed true", ""]
|
||||
j = find_token(lines, "\\end_inset", i)
|
||||
if j == -1:
|
||||
@ -724,8 +808,8 @@ def change_infoinset(file):
|
||||
note_lines = [txt]+note_lines
|
||||
|
||||
for line in note_lines:
|
||||
new = new + ['\layout %s' % file.default_layout, ""]
|
||||
tmp = string.split(line, '\\')
|
||||
new = new + ['\layout %s' % document.default_layout, ""]
|
||||
tmp = line.split('\\')
|
||||
new = new + [tmp[0]]
|
||||
for x in tmp[1:]:
|
||||
new = new + ["\\backslash ", x]
|
||||
@ -733,8 +817,9 @@ def change_infoinset(file):
|
||||
i = i+5
|
||||
|
||||
|
||||
def change_header(file):
|
||||
lines = file.header
|
||||
def change_header(document):
|
||||
" Update header."
|
||||
lines = document.header
|
||||
i = find_token(lines, "\\use_amsmath", 0)
|
||||
if i == -1:
|
||||
return
|
||||
@ -742,6 +827,7 @@ def change_header(file):
|
||||
"\use_numerical_citations 0"]
|
||||
|
||||
|
||||
supported_versions = ["1.2.%d" % i for i in range(5)] + ["1.2"]
|
||||
convert = [[220, [change_header, change_listof, fix_oldfloatinset,
|
||||
update_tabular, update_longtables, remove_pextra,
|
||||
remove_oldfloat, remove_figinset, remove_oldertinset,
|
||||
|
@ -1,7 +1,7 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -17,13 +17,26 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
import string
|
||||
""" Convert files to the file format generated by lyx 1.3"""
|
||||
|
||||
import re
|
||||
from parser_tools import find_token, find_end_of_inset, get_value,\
|
||||
from parser_tools import find_token, find_end_of, get_value,\
|
||||
find_token_exact, del_token
|
||||
|
||||
def change_insetgraphics(file):
|
||||
lines = file.body
|
||||
####################################################################
|
||||
# Private helper functions
|
||||
|
||||
def find_end_of_inset(lines, i):
|
||||
"Finds the matching \end_inset"
|
||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||
|
||||
# End of helper functions
|
||||
####################################################################
|
||||
|
||||
|
||||
def change_insetgraphics(document):
|
||||
" Change inset Graphics."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "\\begin_inset Graphics", i)
|
||||
@ -49,7 +62,7 @@ def change_insetgraphics(file):
|
||||
if k == -1:
|
||||
k = find_token_exact(lines, "size_kind", i, j)
|
||||
if k != -1:
|
||||
size_type = string.split(lines[k])[1]
|
||||
size_type = lines[k].split()[1]
|
||||
del lines[k]
|
||||
j = j-1
|
||||
if size_type in ["0", "original"]:
|
||||
@ -68,7 +81,7 @@ def change_insetgraphics(file):
|
||||
if k == -1:
|
||||
k = find_token_exact(lines, "lyxsize_kind", i, j)
|
||||
if k != -1:
|
||||
lyxsize_type = string.split(lines[k])[1]
|
||||
lyxsize_type = lines[k].split()[1]
|
||||
del lines[k]
|
||||
j = j-1
|
||||
j = del_token(lines, "lyxwidth", i, j)
|
||||
@ -80,8 +93,9 @@ def change_insetgraphics(file):
|
||||
i = i+1
|
||||
|
||||
|
||||
def change_tabular(file):
|
||||
lines = file.body
|
||||
def change_tabular(document):
|
||||
" Change tabular."
|
||||
lines = document.body
|
||||
i = 0
|
||||
while 1:
|
||||
i = find_token(lines, "<column", i)
|
||||
@ -92,6 +106,7 @@ def change_tabular(file):
|
||||
i = i+1
|
||||
|
||||
|
||||
supported_versions = ["1.3.%d" % i for i in range(8)] + ["1.3"]
|
||||
convert = [[221, [change_insetgraphics, change_tabular]]]
|
||||
revert = []
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
2080
lib/lyx2lyx/lyx_1_5.py
Normal file
2080
lib/lyx2lyx/lyx_1_5.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2002-2004 Dekel Tsur <dekel@lyx.org>, José Matos <jamatos@lyx.org>
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2002-2004 Dekel Tsur <dekel@lyx.org>, José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
@ -16,65 +16,93 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
import string
|
||||
import re
|
||||
" This modules offer several free functions to help parse lines."
|
||||
|
||||
# Utilities for one line
|
||||
def check_token(line, token):
|
||||
if line[:len(token)] == token:
|
||||
return 1
|
||||
return 0
|
||||
""" check_token(line, token) -> bool
|
||||
|
||||
Return True if token is present in line and is the first element
|
||||
else returns False."""
|
||||
|
||||
return line[:len(token)] == token
|
||||
|
||||
|
||||
# We need to check that the char after the token is space, but I think
|
||||
# we can ignore this
|
||||
def find_token(lines, token, start, end = 0):
|
||||
def is_nonempty_line(line):
|
||||
""" is_nonempty_line(line) -> bool
|
||||
|
||||
Return False if line is either empty or it has only whitespaces,
|
||||
else return True."""
|
||||
return line != " "*len(line)
|
||||
|
||||
|
||||
# Utilities for a list of lines
|
||||
def find_token(lines, token, start, end = 0, exact = False):
|
||||
""" find_token(lines, token, start[[, end], exact]) -> int
|
||||
|
||||
Return the lowest line where token is found, and is the first
|
||||
element, in lines[start, end].
|
||||
|
||||
Return -1 on failure."""
|
||||
|
||||
if end == 0:
|
||||
end = len(lines)
|
||||
m = len(token)
|
||||
for i in xrange(start, end):
|
||||
if lines[i][:m] == token:
|
||||
return i
|
||||
return -1
|
||||
|
||||
|
||||
def find_token_exact(lines, token, start, end = 0):
|
||||
if end == 0:
|
||||
end = len(lines)
|
||||
for i in xrange(start, end):
|
||||
x = string.split(lines[i])
|
||||
y = string.split(token)
|
||||
if len(x) < len(y):
|
||||
continue
|
||||
if x[:len(y)] == y:
|
||||
return i
|
||||
return -1
|
||||
|
||||
|
||||
def find_tokens(lines, tokens, start, end = 0):
|
||||
if end == 0:
|
||||
end = len(lines)
|
||||
for i in xrange(start, end):
|
||||
for token in tokens:
|
||||
if lines[i][:len(token)] == token:
|
||||
return i
|
||||
return -1
|
||||
|
||||
|
||||
def find_tokens_exact(lines, tokens, start, end = 0):
|
||||
if end == 0:
|
||||
end = len(lines)
|
||||
for i in xrange(start, end):
|
||||
for token in tokens:
|
||||
x = string.split(lines[i])
|
||||
y = string.split(token)
|
||||
if exact:
|
||||
x = lines[i].split()
|
||||
y = token.split()
|
||||
if len(x) < len(y):
|
||||
continue
|
||||
if x[:len(y)] == y:
|
||||
return i
|
||||
else:
|
||||
if lines[i][:m] == token:
|
||||
return i
|
||||
return -1
|
||||
|
||||
|
||||
def find_token_exact(lines, token, start, end = 0):
|
||||
return find_token(lines, token, start, end, True)
|
||||
|
||||
|
||||
def find_tokens(lines, tokens, start, end = 0, exact = False):
|
||||
""" find_tokens(lines, tokens, start[[, end], exact]) -> int
|
||||
|
||||
Return the lowest line where one token in tokens is found, and is
|
||||
the first element, in lines[start, end].
|
||||
|
||||
Return -1 on failure."""
|
||||
if end == 0:
|
||||
end = len(lines)
|
||||
|
||||
for i in xrange(start, end):
|
||||
for token in tokens:
|
||||
if exact:
|
||||
x = lines[i].split()
|
||||
y = token.split()
|
||||
if len(x) < len(y):
|
||||
continue
|
||||
if x[:len(y)] == y:
|
||||
return i
|
||||
else:
|
||||
if lines[i][:len(token)] == token:
|
||||
return i
|
||||
return -1
|
||||
|
||||
|
||||
def find_tokens_exact(lines, tokens, start, end = 0):
|
||||
return find_tokens(lines, tokens, start, end, True)
|
||||
|
||||
|
||||
def find_re(lines, rexp, start, end = 0):
|
||||
""" find_token_re(lines, rexp, start[, end]) -> int
|
||||
|
||||
Return the lowest line where rexp, a regular expression, is found
|
||||
in lines[start, end].
|
||||
|
||||
Return -1 on failure."""
|
||||
|
||||
if end == 0:
|
||||
end = len(lines)
|
||||
for i in xrange(start, end):
|
||||
@ -84,6 +112,12 @@ def find_re(lines, rexp, start, end = 0):
|
||||
|
||||
|
||||
def find_token_backwards(lines, token, start):
|
||||
""" find_token_backwards(lines, token, start) -> int
|
||||
|
||||
Return the highest line where token is found, and is the first
|
||||
element, in lines[start, end].
|
||||
|
||||
Return -1 on failure."""
|
||||
m = len(token)
|
||||
for i in xrange(start, -1, -1):
|
||||
line = lines[i]
|
||||
@ -93,6 +127,12 @@ def find_token_backwards(lines, token, start):
|
||||
|
||||
|
||||
def find_tokens_backwards(lines, tokens, start):
|
||||
""" find_tokens_backwards(lines, token, start) -> int
|
||||
|
||||
Return the highest line where token is found, and is the first
|
||||
element, in lines[end, start].
|
||||
|
||||
Return -1 on failure."""
|
||||
for i in xrange(start, -1, -1):
|
||||
line = lines[i]
|
||||
for token in tokens:
|
||||
@ -101,82 +141,43 @@ def find_tokens_backwards(lines, tokens, start):
|
||||
return -1
|
||||
|
||||
|
||||
def get_value(lines, token, start, end = 0):
|
||||
def get_value(lines, token, start, end = 0, default = ""):
|
||||
""" get_value(lines, token, start[[, end], default]) -> list of strings
|
||||
|
||||
Return tokens after token for the first line, in lines, where
|
||||
token is the first element."""
|
||||
|
||||
i = find_token_exact(lines, token, start, end)
|
||||
if i == -1:
|
||||
return ""
|
||||
if len(string.split(lines[i])) > 1:
|
||||
return string.split(lines[i])[1]
|
||||
if len(lines[i].split()) > 1:
|
||||
return lines[i].split()[1]
|
||||
else:
|
||||
return ""
|
||||
return default
|
||||
|
||||
|
||||
def get_layout(line, default_layout):
|
||||
tokens = string.split(line)
|
||||
if len(tokens) > 1:
|
||||
return tokens[1]
|
||||
return default_layout
|
||||
def del_token(lines, token, start, end):
|
||||
""" del_token(lines, token, start, end) -> int
|
||||
|
||||
Find the lower line in lines where token is the first element and
|
||||
delete that line.
|
||||
|
||||
def del_token(lines, token, i, j):
|
||||
k = find_token_exact(lines, token, i, j)
|
||||
Returns the number of lines remaining."""
|
||||
|
||||
k = find_token_exact(lines, token, start, end)
|
||||
if k == -1:
|
||||
return j
|
||||
return end
|
||||
else:
|
||||
del lines[k]
|
||||
return j-1
|
||||
return end - 1
|
||||
|
||||
|
||||
# Finds the paragraph that contains line i.
|
||||
def get_paragraph(lines, i, format):
|
||||
if format < 225:
|
||||
begin_layout = "\\layout"
|
||||
else:
|
||||
begin_layout = "\\begin_layout"
|
||||
while i != -1:
|
||||
i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
|
||||
if i == -1: return -1
|
||||
if check_token(lines[i], begin_layout):
|
||||
return i
|
||||
i = find_beginning_of_inset(lines, i)
|
||||
return -1
|
||||
|
||||
|
||||
# Finds the paragraph after the paragraph that contains line i.
|
||||
def get_next_paragraph(lines, i, format):
|
||||
if format < 225:
|
||||
tokens = ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"]
|
||||
elif format < 236:
|
||||
tokens = ["\\begin_inset", "\\begin_layout", "\\end_float", "\\end_document"]
|
||||
else:
|
||||
tokens = ["\\begin_inset", "\\begin_layout", "\\end_float", "\\end_body", "\\end_document"]
|
||||
while i != -1:
|
||||
i = find_tokens(lines, tokens, i)
|
||||
if not check_token(lines[i], "\\begin_inset"):
|
||||
return i
|
||||
i = find_end_of_inset(lines, i)
|
||||
return -1
|
||||
|
||||
|
||||
def find_end_of(lines, i, start_token, end_token):
|
||||
count = 1
|
||||
n = len(lines)
|
||||
while i < n:
|
||||
i = find_tokens(lines, [end_token, start_token], i+1)
|
||||
if check_token(lines[i], start_token):
|
||||
count = count+1
|
||||
else:
|
||||
count = count-1
|
||||
if count == 0:
|
||||
return i
|
||||
return -1
|
||||
|
||||
|
||||
# Finds the matching \end_inset
|
||||
def find_beginning_of(lines, i, start_token, end_token):
|
||||
count = 1
|
||||
while i > 0:
|
||||
i = find_tokens_backwards(lines, [start_token, end_token], i-1)
|
||||
if i == -1:
|
||||
return -1
|
||||
if check_token(lines[i], end_token):
|
||||
count = count+1
|
||||
else:
|
||||
@ -186,38 +187,20 @@ def find_beginning_of(lines, i, start_token, end_token):
|
||||
return -1
|
||||
|
||||
|
||||
# Finds the matching \end_inset
|
||||
def find_end_of_inset(lines, i):
|
||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||
|
||||
|
||||
# Finds the matching \end_inset
|
||||
def find_beginning_of_inset(lines, i):
|
||||
return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||
|
||||
|
||||
def find_end_of_tabular(lines, i):
|
||||
return find_end_of(lines, i, "<lyxtabular", "</lyxtabular")
|
||||
|
||||
|
||||
def get_tabular_lines(lines, i):
|
||||
result = []
|
||||
i = i+1
|
||||
j = find_end_of_tabular(lines, i)
|
||||
if j == -1:
|
||||
return []
|
||||
|
||||
while i <= j:
|
||||
if check_token(lines[i], "\\begin_inset"):
|
||||
i = find_end_of_inset(lines, i)+1
|
||||
def find_end_of(lines, i, start_token, end_token):
|
||||
count = 1
|
||||
n = len(lines)
|
||||
while i < n:
|
||||
i = find_tokens(lines, [end_token, start_token], i+1)
|
||||
if i == -1:
|
||||
return -1
|
||||
if check_token(lines[i], start_token):
|
||||
count = count+1
|
||||
else:
|
||||
result.append(i)
|
||||
i = i+1
|
||||
return result
|
||||
|
||||
|
||||
def is_nonempty_line(line):
|
||||
return line != " "*len(line)
|
||||
count = count-1
|
||||
if count == 0:
|
||||
return i
|
||||
return -1
|
||||
|
||||
|
||||
def find_nonempty_line(lines, start, end = 0):
|
||||
|
@ -1,6 +1,6 @@
|
||||
#! /usr/bin/env python
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
|
1399
lib/unicodesymbols
Normal file
1399
lib/unicodesymbols
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user