lyx_mirror/lib/lyx2lyx/lyx_2_1.py
Uwe Stöhr fe8c185a6e support for Australian and Newzealand as document language; fileformat change
git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/trunk@40452 a592a061-630c-0410-9148-cb99ea01b6c8
2011-12-08 23:58:30 +00:00

251 lines
8.2 KiB
Python

# -*- coding: utf-8 -*-
# This file is part of lyx2lyx
# -*- coding: utf-8 -*-
# Copyright (C) 2011 The LyX team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
""" Convert files to the file format generated by lyx 2.1"""
import re, string
import unicodedata
import sys, os
# Uncomment only what you need to import, please.
from parser_tools import find_token, find_end_of_inset, get_value, \
del_token
#from parser_tools import find_token, find_end_of, find_tokens, \
#find_token_exact, find_end_of_inset, find_end_of_layout, \
#find_token_backwards, is_in_inset, get_value, get_quoted_value, \
#del_token, check_token, get_option_value
from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert
#from lyx2lyx_tools import add_to_preamble, insert_to_preamble, \
# put_cmd_in_ert, lyx2latex, latex_length, revert_flex_inset, \
# revert_font_attrs, hex2ratio, str2bool
####################################################################
# Private helper functions
#def remove_option(lines, m, option):
#''' removes option from line m. returns whether we did anything '''
#l = lines[m].find(option)
#if l == -1:
#return False
#val = lines[m][l:].split('"')[1]
#lines[m] = lines[m][:l - 1] + lines[m][l+len(option + '="' + val + '"'):]
#return True
###############################################################################
###
### Conversion and reversion routines
###
###############################################################################
def revert_visible_space(document):
"Revert InsetSpace visible into its ERT counterpart"
i = 0
while True:
i = find_token(document.body, "\\begin_inset space \\textvisiblespace{}", i)
if i == -1:
return
end = find_end_of_inset(document.body, i)
subst = put_cmd_in_ert("\\textvisiblespace{}")
document.body[i:end + 1] = subst
def convert_undertilde(document):
" Load undertilde automatically "
i = find_token(document.header, "\\use_mathdots" , 0)
if i != -1:
document.header.insert(i + 1, "\\use_undertilde 1")
def revert_undertilde(document):
" Load undertilde if used in the document "
undertilde = find_token(document.header, "\\use_undertilde" , 0)
if undertilde == -1:
document.warning("No \\use_undertilde line. Assuming auto.")
else:
val = get_value(document.header, "\\use_undertilde", undertilde)
del document.header[undertilde]
try:
usetilde = int(val)
except:
document.warning("Invalid \\use_undertilde value: " + val + ". Assuming auto.")
# probably usedots has not been changed, but be safe.
usetilde = 1
if usetilde == 0:
# do not load case
return
if usetilde == 2:
# force load case
add_to_preamble(document, ["\\usepackage{undertilde}"])
return
# so we are in the auto case. we want to load undertilde if \utilde is used.
i = 0
while True:
i = find_token(document.body, '\\begin_inset Formula', i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
i += 1
continue
code = "\n".join(document.body[i:j])
if code.find("\\utilde") != -1:
add_to_preamble(document, ["\\@ifundefined{utilde}{\\usepackage{undertilde}}"])
return
i = j
def revert_negative_space(document):
"Revert InsetSpace negmedspace and negthickspace into its TeX-code counterpart"
i = 0
j = 0
reverted = False
while True:
i = find_token(document.body, "\\begin_inset space \\negmedspace{}", i)
if i == -1:
j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
if j == -1:
# load amsmath in the preamble if not already loaded if we are at the end of checking
if reverted == True:
i = find_token(document.header, "\\use_amsmath 2", 0)
if i == -1:
add_to_preamble(document, ["\\@ifundefined{negthickspace}{\\usepackage{amsmath}}"])
return
if i == -1:
return
end = find_end_of_inset(document.body, i)
subst = put_cmd_in_ert("\\negmedspace{}")
document.body[i:end + 1] = subst
j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
if j == -1:
return
end = find_end_of_inset(document.body, j)
subst = put_cmd_in_ert("\\negthickspace{}")
document.body[j:end + 1] = subst
reverted = True
def revert_math_spaces(document):
"Revert formulas with protected custom space and protected hfills to TeX-code"
i = 0
while True:
i = find_token(document.body, "\\begin_inset Formula", i)
if i == -1:
return
j = document.body[i].find("\\hspace*")
if j != -1:
end = find_end_of_inset(document.body, i)
subst = put_cmd_in_ert(document.body[i][21:])
document.body[i:end + 1] = subst
i = i + 1
def convert_japanese_encodings(document):
" Rename the japanese encodings to names understood by platex "
jap_enc_dict = {
"EUC-JP-pLaTeX": "euc",
"JIS-pLaTeX": "jis",
"SJIS-pLaTeX": "sjis"
}
i = find_token(document.header, "\\inputencoding" , 0)
if i == -1:
return
val = get_value(document.header, "\\inputencoding", i)
if val in jap_enc_dict.keys():
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
def revert_japanese_encodings(document):
" Revert the japanese encodings name changes "
jap_enc_dict = {
"euc": "EUC-JP-pLaTeX",
"jis": "JIS-pLaTeX",
"sjis": "SJIS-pLaTeX"
}
i = find_token(document.header, "\\inputencoding" , 0)
if i == -1:
return
val = get_value(document.header, "\\inputencoding", i)
if val in jap_enc_dict.keys():
document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
def revert_justification(document):
" Revert the \\justification buffer param"
if not del_token(document.header, '\\justification', 0):
document.warning("Malformed LyX document: Missing \\justification.")
def revert_australian(document):
"Set English language variants Australian and Newzealand to English"
if document.language == "australian" or document.language == "newzealand":
document.language = "english"
i = find_token(document.header, "\\language", 0)
if i != -1:
document.header[i] = "\\language english"
j = 0
while True:
j = find_token(document.body, "\\lang australian", j)
if j == -1:
j = find_token(document.body, "\\lang newzealand", 0)
if j == -1:
return
else:
document.body[j] = document.body[j].replace("\\lang newzealand", "\\lang english")
else:
document.body[j] = document.body[j].replace("\\lang australian", "\\lang english")
j += 1
##
# Conversion hub
#
supported_versions = ["2.1.0","2.1"]
convert = [
[414, []],
[415, [convert_undertilde]],
[416, []],
[417, [convert_japanese_encodings]],
[418, []],
[419, []]
]
revert = [
[418, [revert_australian]],
[417, [revert_justification]],
[416, [revert_japanese_encodings]],
[415, [revert_negative_space,revert_math_spaces]],
[414, [revert_undertilde]],
[413, [revert_visible_space]]
]
if __name__ == "__main__":
pass