2010-01-10 13:25:41 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2008-11-15 22:55:53 +00:00
|
|
|
# This file is part of lyx2lyx
|
|
|
|
# -*- coding: utf-8 -*-
|
2011-01-21 13:24:23 +00:00
|
|
|
# Copyright (C) 2011 The LyX team
|
2008-11-15 22:55:53 +00:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
|
|
|
|
""" Convert files to the file format generated by lyx 2.0"""
|
|
|
|
|
2009-04-11 21:40:11 +00:00
|
|
|
import re, string
|
2009-04-16 07:29:01 +00:00
|
|
|
import unicodedata
|
|
|
|
import sys, os
|
2009-01-03 18:33:09 +00:00
|
|
|
|
2010-11-04 15:44:32 +00:00
|
|
|
from parser_tools import find_token, find_end_of, find_tokens, \
|
2010-11-05 19:59:24 +00:00
|
|
|
find_token_exact, find_end_of_inset, find_end_of_layout, \
|
|
|
|
find_token_backwards, is_in_inset, get_value, get_quoted_value, \
|
2010-11-10 13:41:43 +00:00
|
|
|
del_token, check_token, get_option_value
|
2010-11-04 15:59:38 +00:00
|
|
|
|
|
|
|
from lyx2lyx_tools import add_to_preamble, insert_to_preamble, \
|
|
|
|
put_cmd_in_ert, lyx2latex, latex_length, revert_flex_inset, \
|
2010-11-05 17:35:53 +00:00
|
|
|
revert_font_attrs, hex2ratio, str2bool
|
2008-11-15 22:55:53 +00:00
|
|
|
|
|
|
|
####################################################################
|
|
|
|
# Private helper functions
|
|
|
|
|
2010-11-04 20:54:58 +00:00
|
|
|
def remove_option(lines, m, option):
|
2010-11-04 15:42:28 +00:00
|
|
|
''' removes option from line m. returns whether we did anything '''
|
2010-11-04 20:54:58 +00:00
|
|
|
l = lines[m].find(option)
|
2010-11-04 15:42:28 +00:00
|
|
|
if l == -1:
|
|
|
|
return False
|
2010-11-04 20:54:58 +00:00
|
|
|
val = lines[m][l:].split('"')[1]
|
|
|
|
lines[m] = lines[m][:l - 1] + lines[m][l+len(option + '="' + val + '"'):]
|
2010-11-04 15:42:28 +00:00
|
|
|
return True
|
|
|
|
|
2010-06-05 07:44:44 +00:00
|
|
|
|
2010-11-03 22:18:17 +00:00
|
|
|
###############################################################################
|
|
|
|
###
|
|
|
|
### Conversion and reversion routines
|
|
|
|
###
|
|
|
|
###############################################################################
|
2008-11-28 13:44:03 +00:00
|
|
|
|
|
|
|
def revert_swiss(document):
|
2009-04-07 08:03:16 +00:00
|
|
|
" Set language german-ch to ngerman "
|
2008-11-28 13:44:03 +00:00
|
|
|
i = 0
|
|
|
|
if document.language == "german-ch":
|
|
|
|
document.language = "ngerman"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language ngerman"
|
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang german-ch", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang german-ch", "\\lang ngerman")
|
|
|
|
j = j + 1
|
2008-11-15 22:55:53 +00:00
|
|
|
|
2009-01-30 00:56:37 +00:00
|
|
|
|
|
|
|
def revert_tabularvalign(document):
|
2009-04-07 08:03:16 +00:00
|
|
|
" Revert the tabular valign option "
|
2009-01-30 00:56:37 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2010-11-03 22:22:00 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-03 22:53:15 +00:00
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
if end == -1:
|
|
|
|
document.warning("Can't find end of inset at line " + str(i))
|
2010-11-04 12:32:09 +00:00
|
|
|
i += 1
|
2010-11-03 22:22:00 +00:00
|
|
|
continue
|
2010-11-03 22:53:15 +00:00
|
|
|
fline = find_token(document.body, "<features", i, end)
|
|
|
|
if fline == -1:
|
|
|
|
document.warning("Can't find features for inset at line " + str(i))
|
2010-11-04 12:32:09 +00:00
|
|
|
i += 1
|
2010-11-03 22:53:15 +00:00
|
|
|
continue
|
|
|
|
p = document.body[fline].find("islongtable")
|
|
|
|
if p != -1:
|
|
|
|
q = document.body[fline].find("tabularvalignment")
|
|
|
|
if q != -1:
|
|
|
|
# FIXME
|
|
|
|
# This seems wrong: It removes everything after
|
|
|
|
# tabularvalignment, too.
|
|
|
|
document.body[fline] = document.body[fline][:q - 1] + '>'
|
2010-11-04 12:32:09 +00:00
|
|
|
i += 1
|
2010-11-03 22:22:00 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# no longtable
|
|
|
|
tabularvalignment = 'c'
|
|
|
|
# which valignment is specified?
|
2010-11-03 22:53:15 +00:00
|
|
|
m = document.body[fline].find('tabularvalignment="top"')
|
|
|
|
if m != -1:
|
2010-11-03 22:22:00 +00:00
|
|
|
tabularvalignment = 't'
|
2010-11-03 22:53:15 +00:00
|
|
|
m = document.body[fline].find('tabularvalignment="bottom"')
|
|
|
|
if m != -1:
|
2010-11-03 22:22:00 +00:00
|
|
|
tabularvalignment = 'b'
|
|
|
|
# delete tabularvalignment
|
2010-11-03 22:53:15 +00:00
|
|
|
q = document.body[fline].find("tabularvalignment")
|
|
|
|
if q != -1:
|
|
|
|
# FIXME
|
|
|
|
# This seems wrong: It removes everything after
|
|
|
|
# tabularvalignment, too.
|
|
|
|
document.body[fline] = document.body[fline][:q - 1] + '>'
|
2010-11-03 22:22:00 +00:00
|
|
|
|
|
|
|
# don't add a box when centered
|
|
|
|
if tabularvalignment == 'c':
|
2010-11-03 22:53:15 +00:00
|
|
|
i = end
|
2010-11-03 22:22:00 +00:00
|
|
|
continue
|
|
|
|
subst = ['\\end_layout', '\\end_inset']
|
2010-11-03 22:53:15 +00:00
|
|
|
document.body[end:end] = subst # just inserts those lines
|
2010-11-03 22:22:00 +00:00
|
|
|
subst = ['\\begin_inset Box Frameless',
|
|
|
|
'position "' + tabularvalignment +'"',
|
|
|
|
'hor_pos "c"',
|
|
|
|
'has_inner_box 1',
|
|
|
|
'inner_pos "c"',
|
|
|
|
'use_parbox 0',
|
|
|
|
# we don't know the width, assume 50%
|
|
|
|
'width "50col%"',
|
|
|
|
'special "none"',
|
|
|
|
'height "1in"',
|
|
|
|
'height_special "totalheight"',
|
|
|
|
'status open',
|
|
|
|
'',
|
|
|
|
'\\begin_layout Plain Layout']
|
|
|
|
document.body[i:i] = subst # this just inserts the array at i
|
2010-11-04 12:32:09 +00:00
|
|
|
# since there could be a tabular inside a tabular, we cannot
|
|
|
|
# jump to end
|
|
|
|
i += len(subst)
|
2009-01-30 00:56:37 +00:00
|
|
|
|
|
|
|
|
2010-11-03 23:14:32 +00:00
|
|
|
def revert_phantom_types(document, ptype, cmd):
|
2009-04-07 08:03:16 +00:00
|
|
|
" Reverts phantom to ERT "
|
2009-01-30 00:56:37 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2010-11-03 23:14:32 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Phantom " + ptype, i)
|
2009-01-30 00:56:37 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-03 23:11:01 +00:00
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
if end == -1:
|
|
|
|
document.warning("Can't find end of inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
blay = find_token(document.body, "\\begin_layout Plain Layout", i, end)
|
|
|
|
if blay == -1:
|
|
|
|
document.warning("Can't find layout for inset at line " + str(i))
|
|
|
|
i = end
|
|
|
|
continue
|
2010-11-05 20:11:51 +00:00
|
|
|
bend = find_end_of_layout(document.body, blay)
|
2010-11-03 23:11:01 +00:00
|
|
|
if bend == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Phantom inset's layout.")
|
|
|
|
i = end
|
|
|
|
continue
|
|
|
|
substi = ["\\begin_inset ERT", "status collapsed", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "", "\\backslash",
|
2010-11-03 23:14:32 +00:00
|
|
|
cmd + "{", "\\end_layout", "", "\\end_inset"]
|
2010-11-03 23:11:01 +00:00
|
|
|
substj = ["\\size default", "", "\\begin_inset ERT", "status collapsed", "",
|
|
|
|
"\\begin_layout Plain Layout", "", "}", "\\end_layout", "", "\\end_inset"]
|
|
|
|
# do the later one first so as not to mess up the numbering
|
|
|
|
document.body[bend:end + 1] = substj
|
|
|
|
document.body[i:blay + 1] = substi
|
|
|
|
i = end + len(substi) + len(substj) - (end - bend) - (blay - i) - 2
|
2009-01-30 00:56:37 +00:00
|
|
|
|
|
|
|
|
2010-11-03 23:14:32 +00:00
|
|
|
def revert_phantom(document):
|
|
|
|
revert_phantom_types(document, "Phantom", "phantom")
|
|
|
|
|
2009-01-30 00:56:37 +00:00
|
|
|
def revert_hphantom(document):
|
2010-11-03 23:14:32 +00:00
|
|
|
revert_phantom_types(document, "HPhantom", "hphantom")
|
2009-01-30 00:56:37 +00:00
|
|
|
|
|
|
|
def revert_vphantom(document):
|
2010-11-03 23:14:32 +00:00
|
|
|
revert_phantom_types(document, "VPhantom", "vphantom")
|
2009-01-30 00:56:37 +00:00
|
|
|
|
2009-01-03 18:33:09 +00:00
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
def revert_xetex(document):
|
|
|
|
" Reverts documents that use XeTeX "
|
2010-11-05 14:44:19 +00:00
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
i = find_token(document.header, '\\use_xetex', 0)
|
|
|
|
if i == -1:
|
2009-04-08 08:07:42 +00:00
|
|
|
document.warning("Malformed LyX document: Missing \\use_xetex.")
|
|
|
|
return
|
2010-11-05 14:44:19 +00:00
|
|
|
if not str2bool(get_value(document.header, "\\use_xetex", i)):
|
2009-04-08 08:07:42 +00:00
|
|
|
del document.header[i]
|
2009-04-07 08:03:16 +00:00
|
|
|
return
|
|
|
|
del document.header[i]
|
2010-11-05 14:44:19 +00:00
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
# 1.) set doc encoding to utf8-plain
|
|
|
|
i = find_token(document.header, "\\inputencoding", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\inputencoding.")
|
2010-11-05 14:44:19 +00:00
|
|
|
else:
|
|
|
|
document.header[i] = "\\inputencoding utf8-plain"
|
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
# 2.) check font settings
|
2010-11-05 14:44:19 +00:00
|
|
|
# defaults
|
2011-02-09 09:26:33 +00:00
|
|
|
roman = sans = typew = "default"
|
2010-11-05 14:44:19 +00:00
|
|
|
osf = False
|
|
|
|
sf_scale = tt_scale = 100.0
|
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
i = find_token(document.header, "\\font_roman", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_roman.")
|
2010-11-05 14:44:19 +00:00
|
|
|
else:
|
|
|
|
roman = get_value(document.header, "\\font_roman", i)
|
|
|
|
document.header[i] = "\\font_roman default"
|
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
i = find_token(document.header, "\\font_sans", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sans.")
|
2010-11-05 14:44:19 +00:00
|
|
|
else:
|
|
|
|
sans = get_value(document.header, "\\font_sans", i)
|
|
|
|
document.header[i] = "\\font_sans default"
|
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
i = find_token(document.header, "\\font_typewriter", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_typewriter.")
|
2010-11-05 14:44:19 +00:00
|
|
|
else:
|
|
|
|
typew = get_value(document.header, "\\font_typewriter", i)
|
|
|
|
document.header[i] = "\\font_typewriter default"
|
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
i = find_token(document.header, "\\font_osf", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_osf.")
|
2010-11-05 14:44:19 +00:00
|
|
|
else:
|
|
|
|
osf = str2bool(get_value(document.header, "\\font_osf", i))
|
|
|
|
document.header[i] = "\\font_osf false"
|
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
i = find_token(document.header, "\\font_sc", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sc.")
|
2010-11-05 14:44:19 +00:00
|
|
|
else:
|
2010-11-05 15:48:45 +00:00
|
|
|
# we do not need this value.
|
2010-11-05 14:44:19 +00:00
|
|
|
document.header[i] = "\\font_sc false"
|
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
i = find_token(document.header, "\\font_sf_scale", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
|
2010-11-05 14:44:19 +00:00
|
|
|
else:
|
|
|
|
val = get_value(document.header, '\\font_sf_scale', i)
|
|
|
|
try:
|
|
|
|
# float() can throw
|
|
|
|
sf_scale = float(val)
|
|
|
|
except:
|
|
|
|
document.warning("Invalid font_sf_scale value: " + val)
|
|
|
|
document.header[i] = "\\font_sf_scale 100"
|
|
|
|
|
2009-04-07 08:03:16 +00:00
|
|
|
i = find_token(document.header, "\\font_tt_scale", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\font_tt_scale.")
|
2010-11-05 14:44:19 +00:00
|
|
|
else:
|
|
|
|
val = get_value(document.header, '\\font_tt_scale', i)
|
|
|
|
try:
|
|
|
|
# float() can throw
|
|
|
|
tt_scale = float(val)
|
|
|
|
except:
|
|
|
|
document.warning("Invalid font_tt_scale value: " + val)
|
|
|
|
document.header[i] = "\\font_tt_scale 100"
|
|
|
|
|
|
|
|
# 3.) set preamble stuff
|
|
|
|
pretext = ['%% This document must be processed with xelatex!']
|
|
|
|
pretext.append('\\usepackage{fontspec}')
|
|
|
|
if roman != "default":
|
|
|
|
pretext.append('\\setmainfont[Mapping=tex-text]{' + roman + '}')
|
|
|
|
if sans != "default":
|
|
|
|
sf = '\\setsansfont['
|
|
|
|
if sf_scale != 100.0:
|
|
|
|
sf += 'Scale=' + str(sf_scale / 100.0) + ','
|
|
|
|
sf += 'Mapping=tex-text]{' + sans + '}'
|
|
|
|
pretext.append(sf)
|
2011-02-09 09:26:33 +00:00
|
|
|
if typew != "default":
|
2010-11-05 14:44:19 +00:00
|
|
|
tw = '\\setmonofont'
|
|
|
|
if tt_scale != 100.0:
|
|
|
|
tw += '[Scale=' + str(tt_scale / 100.0) + ']'
|
2011-02-09 09:26:33 +00:00
|
|
|
tw += '{' + typew + '}'
|
2010-11-05 14:44:19 +00:00
|
|
|
pretext.append(tw)
|
|
|
|
if osf:
|
|
|
|
pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
|
|
|
|
pretext.append('\usepackage{xunicode}')
|
|
|
|
pretext.append('\usepackage{xltxtra}')
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document, pretext)
|
2009-04-07 08:03:16 +00:00
|
|
|
|
|
|
|
|
2009-04-10 11:06:53 +00:00
|
|
|
def revert_outputformat(document):
|
|
|
|
" Remove default output format param "
|
2010-11-05 16:33:29 +00:00
|
|
|
|
|
|
|
if not del_token(document.header, '\\default_output_format', 0):
|
2009-04-10 11:06:53 +00:00
|
|
|
document.warning("Malformed LyX document: Missing \\default_output_format.")
|
|
|
|
|
|
|
|
|
2009-04-11 21:40:11 +00:00
|
|
|
def revert_backgroundcolor(document):
|
2009-04-16 07:29:01 +00:00
|
|
|
" Reverts background color to preamble code "
|
2010-11-03 23:34:45 +00:00
|
|
|
i = find_token(document.header, "\\backgroundcolor", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-03 23:35:40 +00:00
|
|
|
colorcode = get_value(document.header, '\\backgroundcolor', i)
|
2010-11-03 23:34:45 +00:00
|
|
|
del document.header[i]
|
|
|
|
# don't clutter the preamble if backgroundcolor is not set
|
|
|
|
if colorcode == "#ffffff":
|
|
|
|
return
|
|
|
|
red = hex2ratio(colorcode[1:3])
|
|
|
|
green = hex2ratio(colorcode[3:5])
|
|
|
|
blue = hex2ratio(colorcode[5:7])
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document, \
|
|
|
|
['% To set the background color',
|
2010-11-05 17:39:21 +00:00
|
|
|
'\\@ifundefined{definecolor}{\\usepackage{color}}{}',
|
|
|
|
'\\definecolor{page_backgroundcolor}{rgb}{' + red + ',' + green + ',' + blue + '}',
|
|
|
|
'\\pagecolor{page_backgroundcolor}'])
|
2009-04-11 21:40:11 +00:00
|
|
|
|
|
|
|
|
2009-04-16 07:29:01 +00:00
|
|
|
def revert_splitindex(document):
|
|
|
|
" Reverts splitindex-aware documents "
|
|
|
|
i = find_token(document.header, '\\use_indices', 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\use_indices.")
|
|
|
|
return
|
2010-11-05 14:58:32 +00:00
|
|
|
useindices = str2bool(get_value(document.header, "\\use_indices", i))
|
2009-04-16 07:29:01 +00:00
|
|
|
del document.header[i]
|
2010-11-05 14:58:32 +00:00
|
|
|
preamble = []
|
|
|
|
if useindices:
|
|
|
|
preamble.append("\\usepackage{splitidx})")
|
2010-11-04 01:04:39 +00:00
|
|
|
|
|
|
|
# deal with index declarations in the preamble
|
2009-04-16 07:29:01 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.header, "\\index", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
k = find_token(document.header, "\\end_index", i)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\end_index.")
|
|
|
|
return
|
2010-11-04 01:04:39 +00:00
|
|
|
if useindices:
|
|
|
|
line = document.header[i]
|
|
|
|
l = re.compile(r'\\index (.*)$')
|
|
|
|
m = l.match(line)
|
|
|
|
iname = m.group(1)
|
|
|
|
ishortcut = get_value(document.header, '\\shortcut', i, k)
|
|
|
|
if ishortcut != "":
|
2010-11-05 14:58:32 +00:00
|
|
|
preamble.append("\\newindex[" + iname + "]{" + ishortcut + "}")
|
2010-07-06 23:56:47 +00:00
|
|
|
del document.header[i:k + 1]
|
2010-11-05 14:58:32 +00:00
|
|
|
if preamble:
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document, preamble)
|
2010-11-04 01:04:39 +00:00
|
|
|
|
|
|
|
# deal with index insets
|
|
|
|
# these need to have the argument removed
|
2009-04-16 07:29:01 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Index", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
line = document.body[i]
|
|
|
|
l = re.compile(r'\\begin_inset Index (.*)$')
|
|
|
|
m = l.match(line)
|
|
|
|
itype = m.group(1)
|
|
|
|
if itype == "idx" or indices == "false":
|
|
|
|
document.body[i] = "\\begin_inset Index"
|
|
|
|
else:
|
|
|
|
k = find_end_of_inset(document.body, i)
|
|
|
|
if k == -1:
|
2010-11-04 01:04:39 +00:00
|
|
|
document.warning("Can't find end of index inset!")
|
|
|
|
i += 1
|
|
|
|
continue
|
2009-04-16 07:29:01 +00:00
|
|
|
content = lyx2latex(document, document.body[i:k])
|
|
|
|
# escape quotes
|
|
|
|
content = content.replace('"', r'\"')
|
2010-11-04 01:04:39 +00:00
|
|
|
subst = put_cmd_in_ert("\\sindex[" + itype + "]{" + content + "}")
|
2010-07-06 23:56:47 +00:00
|
|
|
document.body[i:k + 1] = subst
|
2009-04-16 07:29:01 +00:00
|
|
|
i = i + 1
|
2010-11-04 01:04:39 +00:00
|
|
|
|
|
|
|
# deal with index_print insets
|
2009-04-16 07:29:01 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset index_print", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
k = find_end_of_inset(document.body, i)
|
2010-11-05 15:11:46 +00:00
|
|
|
ptype = get_quoted_value(document.body, 'type', i, k)
|
2009-04-16 07:29:01 +00:00
|
|
|
if ptype == "idx":
|
|
|
|
j = find_token(document.body, "type", i, k)
|
|
|
|
del document.body[j]
|
2010-11-04 01:04:39 +00:00
|
|
|
elif not useindices:
|
2010-07-06 23:56:47 +00:00
|
|
|
del document.body[i:k + 1]
|
2009-04-16 07:29:01 +00:00
|
|
|
else:
|
2010-11-04 01:04:39 +00:00
|
|
|
subst = put_cmd_in_ert("\\printindex[" + ptype + "]{}")
|
2010-07-06 23:56:47 +00:00
|
|
|
document.body[i:k + 1] = subst
|
2009-04-16 07:29:01 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
|
|
|
def convert_splitindex(document):
|
|
|
|
" Converts index and printindex insets to splitindex-aware format "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Index", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
document.body[i] = document.body[i].replace("\\begin_inset Index",
|
|
|
|
"\\begin_inset Index idx")
|
|
|
|
i = i + 1
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset index_print", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
if document.body[i + 1].find('LatexCommand printindex') == -1:
|
|
|
|
document.warning("Malformed LyX document: Incomplete printindex inset.")
|
|
|
|
return
|
|
|
|
subst = ["LatexCommand printindex",
|
|
|
|
"type \"idx\""]
|
|
|
|
document.body[i + 1:i + 2] = subst
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2009-04-26 11:24:38 +00:00
|
|
|
def revert_subindex(document):
|
|
|
|
" Reverts \\printsubindex CommandInset types "
|
|
|
|
i = find_token(document.header, '\\use_indices', 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\use_indices.")
|
|
|
|
return
|
2010-11-05 15:01:42 +00:00
|
|
|
useindices = str2bool(get_value(document.header, "\\use_indices", i))
|
2009-04-26 11:24:38 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset index_print", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
k = find_end_of_inset(document.body, i)
|
|
|
|
ctype = get_value(document.body, 'LatexCommand', i, k)
|
|
|
|
if ctype != "printsubindex":
|
2010-11-04 01:07:42 +00:00
|
|
|
i = k + 1
|
2009-04-26 11:24:38 +00:00
|
|
|
continue
|
2010-11-05 15:11:46 +00:00
|
|
|
ptype = get_quoted_value(document.body, 'type', i, k)
|
2010-11-04 01:07:42 +00:00
|
|
|
if not useindices:
|
2010-07-06 23:56:47 +00:00
|
|
|
del document.body[i:k + 1]
|
2009-04-26 11:24:38 +00:00
|
|
|
else:
|
2010-11-04 01:07:42 +00:00
|
|
|
subst = put_cmd_in_ert("\\printsubindex[" + ptype + "]{}")
|
2010-07-06 23:56:47 +00:00
|
|
|
document.body[i:k + 1] = subst
|
2009-04-26 11:24:38 +00:00
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
2009-04-27 06:10:25 +00:00
|
|
|
def revert_printindexall(document):
|
|
|
|
" Reverts \\print[sub]index* CommandInset types "
|
|
|
|
i = find_token(document.header, '\\use_indices', 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Missing \\use_indices.")
|
|
|
|
return
|
2010-11-05 15:01:42 +00:00
|
|
|
useindices = str2bool(get_value(document.header, "\\use_indices", i))
|
2009-04-27 06:10:25 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset index_print", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
k = find_end_of_inset(document.body, i)
|
|
|
|
ctype = get_value(document.body, 'LatexCommand', i, k)
|
|
|
|
if ctype != "printindex*" and ctype != "printsubindex*":
|
2010-11-04 01:07:42 +00:00
|
|
|
i = k
|
2009-04-27 06:10:25 +00:00
|
|
|
continue
|
2010-11-04 01:07:42 +00:00
|
|
|
if not useindices:
|
2010-07-06 23:56:47 +00:00
|
|
|
del document.body[i:k + 1]
|
2009-04-27 06:10:25 +00:00
|
|
|
else:
|
2010-11-04 01:07:42 +00:00
|
|
|
subst = put_cmd_in_ert("\\" + ctype + "{}")
|
2010-07-06 23:56:47 +00:00
|
|
|
document.body[i:k + 1] = subst
|
2009-04-27 06:10:25 +00:00
|
|
|
i = i + 1
|
|
|
|
|
2009-05-05 09:26:38 +00:00
|
|
|
|
2009-05-03 22:45:14 +00:00
|
|
|
def revert_strikeout(document):
|
2010-11-04 12:19:14 +00:00
|
|
|
" Reverts \\strikeout font attribute "
|
2010-11-05 17:35:53 +00:00
|
|
|
changed = revert_font_attrs(document.body, "\\uuline", "\\uuline")
|
|
|
|
changed = revert_font_attrs(document.body, "\\uwave", "\\uwave") or changed
|
|
|
|
changed = revert_font_attrs(document.body, "\\strikeout", "\\sout") or changed
|
2010-07-05 03:19:54 +00:00
|
|
|
if changed == True:
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document, \
|
|
|
|
['% for proper underlining',
|
2010-11-05 17:39:21 +00:00
|
|
|
'\\PassOptionsToPackage{normalem}{ulem}',
|
|
|
|
'\\usepackage{ulem}'])
|
2009-05-06 16:58:15 +00:00
|
|
|
|
2009-05-22 07:20:00 +00:00
|
|
|
|
2009-05-06 16:58:15 +00:00
|
|
|
def revert_ulinelatex(document):
|
2010-11-04 12:19:14 +00:00
|
|
|
" Reverts \\uline font attribute "
|
2009-05-06 10:20:40 +00:00
|
|
|
i = find_token(document.body, '\\bar under', 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document,\
|
|
|
|
['% for proper underlining',
|
2010-11-05 17:39:21 +00:00
|
|
|
'\\PassOptionsToPackage{normalem}{ulem}',
|
|
|
|
'\\usepackage{ulem}',
|
|
|
|
'\\let\\cite@rig\\cite',
|
|
|
|
'\\newcommand{\\b@xcite}[2][\\%]{\\def\\def@pt{\\%}\\def\\pas@pt{#1}',
|
|
|
|
' \\mbox{\\ifx\\def@pt\\pas@pt\\cite@rig{#2}\\else\\cite@rig[#1]{#2}\\fi}}',
|
|
|
|
'\\renewcommand{\\underbar}[1]{{\\let\\cite\\b@xcite\\uline{#1}}}'])
|
2009-05-05 09:26:28 +00:00
|
|
|
|
|
|
|
|
2009-05-22 07:20:00 +00:00
|
|
|
def revert_custom_processors(document):
|
|
|
|
" Remove bibtex_command and index_command params "
|
2010-11-05 16:33:29 +00:00
|
|
|
|
|
|
|
if not del_token(document.header, '\\bibtex_command', 0):
|
2009-05-22 07:20:00 +00:00
|
|
|
document.warning("Malformed LyX document: Missing \\bibtex_command.")
|
2010-11-05 16:33:29 +00:00
|
|
|
|
|
|
|
if not del_token(document.header, '\\index_command', 0):
|
2009-05-22 07:20:00 +00:00
|
|
|
document.warning("Malformed LyX document: Missing \\index_command.")
|
|
|
|
|
|
|
|
|
2009-05-22 08:31:43 +00:00
|
|
|
def convert_nomencl_width(document):
|
|
|
|
" Add set_width param to nomencl_print "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset nomencl_print", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
document.body.insert(i + 2, "set_width \"none\"")
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_nomencl_width(document):
|
|
|
|
" Remove set_width param from nomencl_print "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset nomencl_print", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
2010-11-05 16:33:29 +00:00
|
|
|
if not del_token(document.body, "set_width", i, j):
|
2010-11-05 21:12:44 +00:00
|
|
|
document.warning("Can't find set_width option for nomencl_print!")
|
2010-11-05 16:33:29 +00:00
|
|
|
i = j
|
2009-05-22 08:31:43 +00:00
|
|
|
|
|
|
|
|
2009-05-22 16:26:15 +00:00
|
|
|
def revert_nomencl_cwidth(document):
|
|
|
|
" Remove width param from nomencl_print "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset nomencl_print", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
l = find_token(document.body, "width", i, j)
|
|
|
|
if l == -1:
|
2010-11-04 12:23:36 +00:00
|
|
|
document.warning("Can't find width option for nomencl_print!")
|
|
|
|
i = j
|
|
|
|
continue
|
2010-11-05 15:11:46 +00:00
|
|
|
width = get_quoted_value(document.body, "width", i, j)
|
2009-05-22 16:26:15 +00:00
|
|
|
del document.body[l]
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document, ["\\setlength{\\nomlabelwidth}{" + width + "}"])
|
2010-11-04 12:23:36 +00:00
|
|
|
i = j - 1
|
2009-05-22 16:26:15 +00:00
|
|
|
|
|
|
|
|
2009-06-12 08:47:38 +00:00
|
|
|
def revert_applemac(document):
|
|
|
|
" Revert applemac encoding to auto "
|
2010-11-04 12:24:33 +00:00
|
|
|
if document.encoding != "applemac":
|
|
|
|
return
|
|
|
|
document.encoding = "auto"
|
|
|
|
i = find_token(document.header, "\\encoding", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\encoding auto"
|
2009-06-12 08:47:38 +00:00
|
|
|
|
|
|
|
|
2009-07-11 13:09:45 +00:00
|
|
|
def revert_longtable_align(document):
|
|
|
|
" Remove longtable alignment setting "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2010-11-04 12:32:09 +00:00
|
|
|
end = find_end_of_inset(document.body, i)
|
|
|
|
if end == -1:
|
|
|
|
document.warning("Can't find end of inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
fline = find_token(document.body, "<features", i, end)
|
|
|
|
if fline == -1:
|
|
|
|
document.warning("Can't find features for inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
j = document.body[fline].find("longtabularalignment")
|
2009-07-11 13:09:45 +00:00
|
|
|
if j == -1:
|
2010-11-04 12:32:09 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
# FIXME Is this correct? It wipes out everything after the
|
|
|
|
# one we found.
|
|
|
|
document.body[fline] = document.body[fline][:j - 1] + '>'
|
|
|
|
# since there could be a tabular inside this one, we
|
|
|
|
# cannot jump to end.
|
|
|
|
i += 1
|
2009-07-11 13:09:45 +00:00
|
|
|
|
|
|
|
|
2009-07-13 14:30:08 +00:00
|
|
|
def revert_branch_filename(document):
|
|
|
|
" Remove \\filename_suffix parameter from branches "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.header, "\\filename_suffix", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
|
2009-07-19 21:13:27 +00:00
|
|
|
def revert_paragraph_indentation(document):
|
|
|
|
" Revert custom paragraph indentation to preamble code "
|
2010-11-04 12:47:18 +00:00
|
|
|
i = find_token(document.header, "\\paragraph_indentation", 0)
|
2010-11-04 12:38:56 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
length = get_value(document.header, "\\paragraph_indentation", i)
|
|
|
|
# we need only remove the line if indentation is default
|
|
|
|
if length != "default":
|
|
|
|
# handle percent lengths
|
2010-11-04 13:05:19 +00:00
|
|
|
length = latex_length(length)[1]
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document, ["\\setlength{\\parindent}{" + length + "}"])
|
2010-11-04 12:38:56 +00:00
|
|
|
del document.header[i]
|
2009-07-19 21:13:27 +00:00
|
|
|
|
|
|
|
|
2009-07-19 22:08:58 +00:00
|
|
|
def revert_percent_skip_lengths(document):
|
|
|
|
" Revert relative lengths for paragraph skip separation to preamble code "
|
2010-11-04 12:47:18 +00:00
|
|
|
i = find_token(document.header, "\\defskip", 0)
|
2010-11-04 12:43:19 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
length = get_value(document.header, "\\defskip", i)
|
|
|
|
# only revert when a custom length was set and when
|
|
|
|
# it used a percent length
|
|
|
|
if length in ('smallskip', 'medskip', 'bigskip'):
|
|
|
|
return
|
|
|
|
# handle percent lengths
|
2010-11-04 13:05:19 +00:00
|
|
|
percent, length = latex_length(length)
|
2010-11-04 16:00:33 +00:00
|
|
|
if percent:
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document, ["\\setlength{\\parskip}{" + length + "}"])
|
2010-11-04 12:43:19 +00:00
|
|
|
# set defskip to medskip as default
|
|
|
|
document.header[i] = "\\defskip medskip"
|
2009-07-19 22:08:58 +00:00
|
|
|
|
|
|
|
|
2009-07-20 11:22:51 +00:00
|
|
|
def revert_percent_vspace_lengths(document):
|
|
|
|
" Revert relative VSpace lengths to ERT "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset VSpace", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2009-07-22 06:34:34 +00:00
|
|
|
# only revert if a custom length was set and if
|
2009-07-20 11:22:51 +00:00
|
|
|
# it used a percent length
|
2009-07-22 06:34:34 +00:00
|
|
|
r = re.compile(r'\\begin_inset VSpace (.*)$')
|
2010-11-04 12:51:57 +00:00
|
|
|
m = r.match(document.body[i])
|
2009-07-22 06:34:34 +00:00
|
|
|
length = m.group(1)
|
2010-11-04 12:51:57 +00:00
|
|
|
if length in ('defskip', 'smallskip', 'medskip', 'bigskip', 'vfill'):
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
# check if the space has a star (protected space)
|
|
|
|
protected = (document.body[i].rfind("*") != -1)
|
|
|
|
if protected:
|
|
|
|
length = length.rstrip('*')
|
|
|
|
# handle percent lengths
|
2010-11-04 13:05:19 +00:00
|
|
|
percent, length = latex_length(length)
|
2010-11-04 12:51:57 +00:00
|
|
|
# revert the VSpace inset to ERT
|
2010-11-04 16:00:33 +00:00
|
|
|
if percent:
|
2009-07-22 06:34:34 +00:00
|
|
|
if protected:
|
2010-11-04 12:51:57 +00:00
|
|
|
subst = put_cmd_in_ert("\\vspace*{" + length + "}")
|
|
|
|
else:
|
|
|
|
subst = put_cmd_in_ert("\\vspace{" + length + "}")
|
|
|
|
document.body[i:i + 2] = subst
|
|
|
|
i += 1
|
2009-07-20 11:22:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_percent_hspace_lengths(document):
|
|
|
|
" Revert relative HSpace lengths to ERT "
|
|
|
|
i = 0
|
|
|
|
while True:
|
2010-11-05 19:59:24 +00:00
|
|
|
i = find_token_exact(document.body, "\\begin_inset space \\hspace", i)
|
2009-07-20 11:22:51 +00:00
|
|
|
if i == -1:
|
2009-07-21 17:19:33 +00:00
|
|
|
break
|
2010-11-04 12:59:44 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
# only revert if a custom length was set...
|
|
|
|
length = get_value(document.body, '\\length', i + 1, j)
|
2009-07-21 17:02:26 +00:00
|
|
|
if length == '':
|
|
|
|
document.warning("Malformed lyx document: Missing '\\length' in Space inset.")
|
2010-11-04 12:59:44 +00:00
|
|
|
i = j
|
|
|
|
continue
|
2010-11-04 13:22:00 +00:00
|
|
|
protected = ""
|
|
|
|
if document.body[i].find("\\hspace*{}") != -1:
|
|
|
|
protected = "*"
|
2010-11-04 12:59:44 +00:00
|
|
|
# ...and if it used a percent length
|
2010-11-04 13:05:19 +00:00
|
|
|
percent, length = latex_length(length)
|
2009-07-20 11:22:51 +00:00
|
|
|
# revert the HSpace inset to ERT
|
2010-11-04 16:00:33 +00:00
|
|
|
if percent:
|
2010-11-04 13:22:00 +00:00
|
|
|
subst = put_cmd_in_ert("\\hspace" + protected + "{" + length + "}")
|
2010-11-04 12:59:44 +00:00
|
|
|
document.body[i:j + 1] = subst
|
|
|
|
# if we did a substitution, this will still be ok
|
|
|
|
i = j
|
2009-07-20 11:22:51 +00:00
|
|
|
|
|
|
|
|
2009-07-21 11:51:43 +00:00
|
|
|
def revert_hspace_glue_lengths(document):
|
|
|
|
" Revert HSpace glue lengths to ERT "
|
|
|
|
i = 0
|
|
|
|
while True:
|
2010-11-05 19:59:24 +00:00
|
|
|
i = find_token_exact(document.body, "\\begin_inset space \\hspace", i)
|
2009-07-21 11:51:43 +00:00
|
|
|
if i == -1:
|
2009-07-21 17:19:33 +00:00
|
|
|
break
|
2010-11-04 13:22:00 +00:00
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Can't find end of inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
length = get_value(document.body, '\\length', i + 1, j)
|
2009-07-21 17:02:26 +00:00
|
|
|
if length == '':
|
|
|
|
document.warning("Malformed lyx document: Missing '\\length' in Space inset.")
|
2010-11-04 13:22:00 +00:00
|
|
|
i = j
|
|
|
|
continue
|
|
|
|
protected = ""
|
|
|
|
if document.body[i].find("\\hspace*{}") != -1:
|
|
|
|
protected = "*"
|
2009-07-21 17:41:00 +00:00
|
|
|
# only revert if the length contains a plus or minus at pos != 0
|
2010-11-04 13:22:00 +00:00
|
|
|
if length.find('-',1) != -1 or length.find('+',1) != -1:
|
2009-07-21 17:48:03 +00:00
|
|
|
# handle percent lengths
|
2010-11-04 13:05:19 +00:00
|
|
|
length = latex_length(length)[1]
|
2009-07-21 17:48:03 +00:00
|
|
|
# revert the HSpace inset to ERT
|
2010-11-04 13:22:00 +00:00
|
|
|
subst = put_cmd_in_ert("\\hspace" + protected + "{" + length + "}")
|
|
|
|
document.body[i:j+1] = subst
|
|
|
|
i = j
|
|
|
|
|
2009-07-21 11:51:43 +00:00
|
|
|
|
2009-07-23 20:08:05 +00:00
|
|
|
def convert_author_id(document):
|
|
|
|
" Add the author_id to the \\author definition and make sure 0 is not used"
|
|
|
|
i = 0
|
2010-11-04 13:34:56 +00:00
|
|
|
anum = 1
|
|
|
|
re_author = re.compile(r'(\\author) (\".*\")\s*(.*)$')
|
|
|
|
|
2009-07-23 20:08:05 +00:00
|
|
|
while True:
|
|
|
|
i = find_token(document.header, "\\author", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2010-11-04 13:34:56 +00:00
|
|
|
m = re_author.match(document.header[i])
|
|
|
|
if m:
|
2009-08-14 00:52:33 +00:00
|
|
|
name = m.group(2)
|
2010-11-04 13:42:19 +00:00
|
|
|
email = m.group(3)
|
2010-11-04 13:34:56 +00:00
|
|
|
document.header[i] = "\\author %i %s %s" % (anum, name, email)
|
|
|
|
anum += 1
|
|
|
|
i += 1
|
2009-08-14 00:52:33 +00:00
|
|
|
|
2010-11-04 13:34:56 +00:00
|
|
|
i = 0
|
2009-08-14 00:52:33 +00:00
|
|
|
while True:
|
2010-11-04 13:34:56 +00:00
|
|
|
i = find_token(document.body, "\\change_", i)
|
|
|
|
if i == -1:
|
2009-08-14 00:52:33 +00:00
|
|
|
break
|
2010-11-04 13:34:56 +00:00
|
|
|
change = document.body[i].split(' ');
|
2009-08-14 00:52:33 +00:00
|
|
|
if len(change) == 3:
|
|
|
|
type = change[0]
|
|
|
|
author_id = int(change[1])
|
|
|
|
time = change[2]
|
2010-11-04 13:34:56 +00:00
|
|
|
document.body[i] = "%s %i %s" % (type, author_id + 1, time)
|
|
|
|
i += 1
|
|
|
|
|
2009-07-23 20:08:05 +00:00
|
|
|
|
|
|
|
def revert_author_id(document):
|
|
|
|
" Remove the author_id from the \\author definition "
|
|
|
|
i = 0
|
2010-11-04 13:42:19 +00:00
|
|
|
anum = 0
|
2011-04-25 01:47:10 +00:00
|
|
|
rx = re.compile(r'(\\author)\s+(-?\d+)\s+(\".*\")\s*(.*)$')
|
2009-07-23 20:08:05 +00:00
|
|
|
idmap = dict()
|
2010-11-04 13:42:19 +00:00
|
|
|
|
2009-07-23 20:08:05 +00:00
|
|
|
while True:
|
|
|
|
i = find_token(document.header, "\\author", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2010-11-04 13:42:19 +00:00
|
|
|
m = rx.match(document.header[i])
|
|
|
|
if m:
|
2009-07-24 20:48:49 +00:00
|
|
|
author_id = int(m.group(2))
|
2010-11-04 13:42:19 +00:00
|
|
|
idmap[author_id] = anum
|
2009-08-14 00:52:33 +00:00
|
|
|
name = m.group(3)
|
2010-11-04 13:42:19 +00:00
|
|
|
email = m.group(4)
|
2009-07-24 20:48:49 +00:00
|
|
|
document.header[i] = "\\author %s %s" % (name, email)
|
2010-11-04 13:42:19 +00:00
|
|
|
i += 1
|
|
|
|
# FIXME Should this be incremented if we didn't match?
|
|
|
|
anum += 1
|
2009-07-23 20:08:05 +00:00
|
|
|
|
2010-11-04 13:42:19 +00:00
|
|
|
i = 0
|
2009-07-23 20:08:05 +00:00
|
|
|
while True:
|
2010-11-04 13:42:19 +00:00
|
|
|
i = find_token(document.body, "\\change_", i)
|
|
|
|
if i == -1:
|
2009-07-23 20:08:05 +00:00
|
|
|
break
|
2010-11-04 13:42:19 +00:00
|
|
|
change = document.body[i].split(' ');
|
2009-08-14 00:52:33 +00:00
|
|
|
if len(change) == 3:
|
|
|
|
type = change[0]
|
|
|
|
author_id = int(change[1])
|
|
|
|
time = change[2]
|
2010-11-04 13:42:19 +00:00
|
|
|
document.body[i] = "%s %i %s" % (type, idmap[author_id], time)
|
|
|
|
i += 1
|
2009-07-23 20:08:05 +00:00
|
|
|
|
2009-07-21 11:51:43 +00:00
|
|
|
|
2009-08-14 00:52:33 +00:00
|
|
|
def revert_suppress_date(document):
|
|
|
|
" Revert suppressing of default document date to preamble code "
|
2010-11-04 13:43:33 +00:00
|
|
|
i = find_token(document.header, "\\suppress_date", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
# remove the preamble line and write to the preamble
|
|
|
|
# when suppress_date was true
|
2010-11-05 15:01:42 +00:00
|
|
|
date = str2bool(get_value(document.header, "\\suppress_date", i))
|
|
|
|
if date:
|
2010-11-04 13:43:33 +00:00
|
|
|
add_to_preamble(document, ["\\date{}"])
|
|
|
|
del document.header[i]
|
2009-08-14 00:52:33 +00:00
|
|
|
|
|
|
|
|
2010-11-22 21:07:20 +00:00
|
|
|
def convert_mhchem(document):
|
|
|
|
"Set mhchem to off for versions older than 1.6.x"
|
|
|
|
if document.start < 277:
|
|
|
|
# LyX 1.5.x and older did never load mhchem.
|
|
|
|
# Therefore we must switch it off: Documents that use mhchem have
|
|
|
|
# a manual \usepackage anyway, and documents not using mhchem but
|
|
|
|
# custom macros with the same names as mhchem commands might get
|
|
|
|
# corrupted if mhchem is automatically loaded.
|
|
|
|
mhchem = 0 # off
|
|
|
|
else:
|
|
|
|
# LyX 1.6.x did always load mhchem automatically.
|
|
|
|
mhchem = 1 # auto
|
|
|
|
i = find_token(document.header, "\\use_esint", 0)
|
|
|
|
if i == -1:
|
|
|
|
# pre-1.5.x document
|
|
|
|
i = find_token(document.header, "\\use_amsmath", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find amsmath os esint setting.")
|
|
|
|
return
|
|
|
|
document.header.insert(i + 1, "\\use_mhchem %d" % mhchem)
|
|
|
|
|
|
|
|
|
2009-11-11 01:48:07 +00:00
|
|
|
def revert_mhchem(document):
|
|
|
|
"Revert mhchem loading to preamble code"
|
2010-11-04 13:56:46 +00:00
|
|
|
|
2010-05-22 01:15:50 +00:00
|
|
|
mhchem = "off"
|
2010-11-04 13:56:46 +00:00
|
|
|
i = find_token(document.header, "\\use_mhchem", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find mhchem setting.")
|
2009-11-11 01:48:07 +00:00
|
|
|
mhchem = "auto"
|
|
|
|
else:
|
2010-11-04 13:56:46 +00:00
|
|
|
val = get_value(document.header, "\\use_mhchem", i)
|
|
|
|
if val == "1":
|
|
|
|
mhchem = "auto"
|
|
|
|
elif val == "2":
|
2009-11-11 01:48:07 +00:00
|
|
|
mhchem = "on"
|
2010-11-04 13:56:46 +00:00
|
|
|
del document.header[i]
|
|
|
|
|
2010-11-04 21:47:47 +00:00
|
|
|
if mhchem == "off":
|
|
|
|
# don't load case
|
|
|
|
return
|
|
|
|
|
2009-11-11 01:48:07 +00:00
|
|
|
if mhchem == "auto":
|
2010-11-04 13:56:46 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Formula", i)
|
2010-11-04 14:23:47 +00:00
|
|
|
if i == -1:
|
|
|
|
break
|
2010-11-04 13:56:46 +00:00
|
|
|
line = document.body[i]
|
2010-11-04 21:47:47 +00:00
|
|
|
if line.find("\\ce{") != -1 or line.find("\\cf{") != -1:
|
2010-11-04 13:56:46 +00:00
|
|
|
mhchem = "on"
|
|
|
|
break
|
2010-11-04 14:23:47 +00:00
|
|
|
i += 1
|
2010-11-04 13:56:46 +00:00
|
|
|
|
2009-11-11 01:48:07 +00:00
|
|
|
if mhchem == "on":
|
2010-11-05 17:54:57 +00:00
|
|
|
pre = ["\\PassOptionsToPackage{version=3}{mhchem}",
|
2010-11-04 13:56:46 +00:00
|
|
|
"\\usepackage{mhchem}"]
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document, pre)
|
2009-11-11 01:48:07 +00:00
|
|
|
|
|
|
|
|
2009-11-29 14:43:17 +00:00
|
|
|
def revert_fontenc(document):
|
|
|
|
" Remove fontencoding param "
|
2010-11-05 16:33:29 +00:00
|
|
|
if not del_token(document.header, '\\fontencoding', 0):
|
2009-11-29 14:43:17 +00:00
|
|
|
document.warning("Malformed LyX document: Missing \\fontencoding.")
|
|
|
|
|
|
|
|
|
2009-12-07 11:53:25 +00:00
|
|
|
def merge_gbrief(document):
|
|
|
|
" Merge g-brief-en and g-brief-de to one class "
|
|
|
|
|
|
|
|
if document.textclass != "g-brief-de":
|
|
|
|
if document.textclass == "g-brief-en":
|
|
|
|
document.textclass = "g-brief"
|
|
|
|
document.set_textclass()
|
|
|
|
return
|
|
|
|
|
|
|
|
obsoletedby = { "Brieftext": "Letter",
|
|
|
|
"Unterschrift": "Signature",
|
|
|
|
"Strasse": "Street",
|
|
|
|
"Zusatz": "Addition",
|
|
|
|
"Ort": "Town",
|
|
|
|
"Land": "State",
|
|
|
|
"RetourAdresse": "ReturnAddress",
|
|
|
|
"MeinZeichen": "MyRef",
|
|
|
|
"IhrZeichen": "YourRef",
|
|
|
|
"IhrSchreiben": "YourMail",
|
|
|
|
"Telefon": "Phone",
|
|
|
|
"BLZ": "BankCode",
|
|
|
|
"Konto": "BankAccount",
|
|
|
|
"Postvermerk": "PostalComment",
|
|
|
|
"Adresse": "Address",
|
|
|
|
"Datum": "Date",
|
|
|
|
"Betreff": "Reference",
|
|
|
|
"Anrede": "Opening",
|
|
|
|
"Anlagen": "Encl.",
|
|
|
|
"Verteiler": "cc",
|
|
|
|
"Gruss": "Closing"}
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, "\\begin_layout", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
layout = document.body[i][14:]
|
|
|
|
if layout in obsoletedby:
|
|
|
|
document.body[i] = "\\begin_layout " + obsoletedby[layout]
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
document.textclass = "g-brief"
|
|
|
|
document.set_textclass()
|
|
|
|
|
|
|
|
|
|
|
|
def revert_gbrief(document):
|
|
|
|
" Revert g-brief to g-brief-en "
|
|
|
|
if document.textclass == "g-brief":
|
|
|
|
document.textclass = "g-brief-en"
|
|
|
|
document.set_textclass()
|
|
|
|
|
|
|
|
|
2009-12-30 22:22:33 +00:00
|
|
|
def revert_html_options(document):
|
|
|
|
" Remove html options "
|
2010-11-05 16:33:29 +00:00
|
|
|
del_token(document.header, '\\html_use_mathml', 0)
|
|
|
|
del_token(document.header, '\\html_be_strict', 0)
|
2009-12-30 22:22:33 +00:00
|
|
|
|
|
|
|
|
2010-01-07 10:01:26 +00:00
|
|
|
def revert_includeonly(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.header, "\\begin_includeonly", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of(document.header, i, "\\begin_includeonly", "\\end_includeonly")
|
|
|
|
if j == -1:
|
2010-11-04 14:00:56 +00:00
|
|
|
document.warning("Unable to find end of includeonly section!!")
|
2010-01-07 10:01:26 +00:00
|
|
|
break
|
|
|
|
document.header[i : j + 1] = []
|
|
|
|
|
|
|
|
|
2010-01-10 13:25:41 +00:00
|
|
|
def revert_includeall(document):
|
|
|
|
" Remove maintain_unincluded_children param "
|
2010-11-05 16:33:29 +00:00
|
|
|
del_token(document.header, '\\maintain_unincluded_children', 0)
|
2010-01-10 13:25:41 +00:00
|
|
|
|
|
|
|
|
2010-02-11 04:10:57 +00:00
|
|
|
def revert_multirow(document):
|
2010-10-30 19:11:46 +00:00
|
|
|
" Revert multirow cells in tables to TeX-code"
|
2010-11-10 13:41:43 +00:00
|
|
|
|
|
|
|
# first, let's find out if we need to do anything
|
2010-11-09 04:58:46 +00:00
|
|
|
# cell type 3 is multirow begin cell
|
2010-11-10 13:41:43 +00:00
|
|
|
i = find_token(document.body, '<cell multirow="3"', 0)
|
2010-11-09 04:58:46 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-10 13:41:43 +00:00
|
|
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{multirow}"])
|
|
|
|
|
|
|
|
begin_table = 0
|
2010-02-11 04:10:57 +00:00
|
|
|
while True:
|
2010-11-10 13:41:43 +00:00
|
|
|
# find begin/end of table
|
|
|
|
begin_table = find_token(document.body, '<lyxtabular version=', begin_table)
|
|
|
|
if begin_table == -1:
|
|
|
|
break
|
|
|
|
end_table = find_end_of(document.body, begin_table, '<lyxtabular', '</lyxtabular>')
|
|
|
|
if end_table == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of table.")
|
|
|
|
begin_table += 1
|
|
|
|
continue
|
|
|
|
# does this table have multirow?
|
|
|
|
i = find_token(document.body, '<cell multirow="3"', begin_table, end_table)
|
|
|
|
if i == -1:
|
|
|
|
begin_table = end_table
|
|
|
|
continue
|
|
|
|
|
|
|
|
# store the number of rows and columns
|
|
|
|
numrows = get_option_value(document.body[begin_table], "rows")
|
|
|
|
numcols = get_option_value(document.body[begin_table], "columns")
|
|
|
|
try:
|
|
|
|
numrows = int(numrows)
|
|
|
|
numcols = int(numcols)
|
|
|
|
except:
|
|
|
|
document.warning(numrows)
|
|
|
|
document.warning("Unable to determine rows and columns!")
|
|
|
|
begin_table = end_table
|
|
|
|
continue
|
2010-11-10 13:46:50 +00:00
|
|
|
|
|
|
|
mrstarts = []
|
2010-11-10 13:41:43 +00:00
|
|
|
multirows = []
|
|
|
|
# collect info on rows and columns of this table.
|
|
|
|
begin_row = begin_table
|
|
|
|
for row in range(numrows):
|
|
|
|
begin_row = find_token(document.body, '<row>', begin_row, end_table)
|
|
|
|
if begin_row == -1:
|
|
|
|
document.warning("Can't find row " + str(row + 1))
|
|
|
|
break
|
|
|
|
end_row = find_end_of(document.body, begin_row, '<row>', '</row>')
|
|
|
|
if end_row == -1:
|
|
|
|
document.warning("Can't find end of row " + str(row + 1))
|
|
|
|
break
|
|
|
|
begin_cell = begin_row
|
|
|
|
multirows.append([])
|
|
|
|
for column in range(numcols):
|
|
|
|
begin_cell = find_token(document.body, '<cell ', begin_cell, end_row)
|
|
|
|
if begin_cell == -1:
|
|
|
|
document.warning("Can't find column " + str(column + 1) + \
|
|
|
|
"in row " + str(row + 1))
|
|
|
|
break
|
|
|
|
# NOTE
|
|
|
|
# this will fail if someone puts "</cell>" in a cell, but
|
|
|
|
# that seems fairly unlikely.
|
|
|
|
end_cell = find_end_of(document.body, begin_cell, '<cell', '</cell>')
|
|
|
|
if end_cell == -1:
|
|
|
|
document.warning("Can't find end of column " + str(column + 1) + \
|
|
|
|
"in row " + str(row + 1))
|
|
|
|
break
|
|
|
|
multirows[row].append([begin_cell, end_cell, 0])
|
|
|
|
if document.body[begin_cell].find('multirow="3"') != -1:
|
|
|
|
multirows[row][column][2] = 3 # begin multirow
|
|
|
|
mrstarts.append([row, column])
|
|
|
|
elif document.body[begin_cell].find('multirow="4"') != -1:
|
|
|
|
multirows[row][column][2] = 4 # in multirow
|
|
|
|
begin_cell = end_cell
|
|
|
|
begin_row = end_row
|
2010-11-10 13:46:50 +00:00
|
|
|
# end of table info collection
|
|
|
|
|
|
|
|
# work from the back to avoid messing up numbering
|
|
|
|
mrstarts.reverse()
|
|
|
|
for m in mrstarts:
|
|
|
|
row = m[0]
|
|
|
|
col = m[1]
|
|
|
|
# get column width
|
|
|
|
col_width = get_option_value(document.body[begin_table + 2 + col], "width")
|
|
|
|
# "0pt" means that no width is specified
|
|
|
|
if not col_width or col_width == "0pt":
|
|
|
|
col_width = "*"
|
|
|
|
# determine the number of cells that are part of the multirow
|
|
|
|
nummrs = 1
|
|
|
|
for r in range(row + 1, numrows):
|
|
|
|
if multirows[r][col][2] != 4:
|
|
|
|
break
|
|
|
|
nummrs += 1
|
|
|
|
# take the opportunity to revert this line
|
|
|
|
lineno = multirows[r][col][0]
|
|
|
|
document.body[lineno] = document.body[lineno].\
|
|
|
|
replace(' multirow="4" ', ' ').\
|
|
|
|
replace('valignment="middle"', 'valignment="top"').\
|
|
|
|
replace(' topline="true" ', ' ')
|
|
|
|
# remove bottom line of previous multirow-part cell
|
|
|
|
lineno = multirows[r-1][col][0]
|
|
|
|
document.body[lineno] = document.body[lineno].replace(' bottomline="true" ', ' ')
|
|
|
|
# revert beginning cell
|
|
|
|
bcell = multirows[row][col][0]
|
|
|
|
ecell = multirows[row][col][1]
|
|
|
|
document.body[bcell] = document.body[bcell].\
|
|
|
|
replace(' multirow="3" ', ' ').\
|
|
|
|
replace('valignment="middle"', 'valignment="top"')
|
|
|
|
blay = find_token(document.body, "\\begin_layout", bcell, ecell)
|
|
|
|
if blay == -1:
|
|
|
|
document.warning("Can't find layout for cell!")
|
|
|
|
continue
|
|
|
|
bend = find_end_of_layout(document.body, blay)
|
|
|
|
if bend == -1:
|
|
|
|
document.warning("Can't find end of layout for cell!")
|
|
|
|
continue
|
|
|
|
# do the later one first, so as not to mess up the numbering
|
|
|
|
# we are wrapping the whole cell in this ert
|
|
|
|
# so before the end of the layout...
|
|
|
|
document.body[bend:bend] = put_cmd_in_ert("}")
|
|
|
|
# ...and after the beginning
|
|
|
|
document.body[blay + 1:blay + 1] = \
|
|
|
|
put_cmd_in_ert("\\multirow{" + str(nummrs) + "}{" + col_width + "}{")
|
2010-11-10 13:41:43 +00:00
|
|
|
|
2010-11-10 13:46:50 +00:00
|
|
|
begin_table = end_table
|
2010-02-11 04:10:57 +00:00
|
|
|
|
|
|
|
|
2010-03-18 20:35:08 +00:00
|
|
|
def convert_math_output(document):
|
|
|
|
" Convert \html_use_mathml to \html_math_output "
|
|
|
|
i = find_token(document.header, "\\html_use_mathml", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
rgx = re.compile(r'\\html_use_mathml\s+(\w+)')
|
|
|
|
m = rgx.match(document.header[i])
|
2010-11-02 15:37:53 +00:00
|
|
|
newval = "0" # MathML
|
|
|
|
if m:
|
2010-11-05 15:01:42 +00:00
|
|
|
val = str2bool(m.group(1))
|
|
|
|
if not val:
|
2010-11-02 15:37:53 +00:00
|
|
|
newval = "2" # Images
|
|
|
|
else:
|
|
|
|
document.warning("Can't match " + document.header[i])
|
|
|
|
document.header[i] = "\\html_math_output " + newval
|
2010-03-18 20:35:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_math_output(document):
|
|
|
|
" Revert \html_math_output to \html_use_mathml "
|
|
|
|
i = find_token(document.header, "\\html_math_output", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2010-03-19 15:02:12 +00:00
|
|
|
rgx = re.compile(r'\\html_math_output\s+(\d)')
|
2010-03-18 20:35:08 +00:00
|
|
|
m = rgx.match(document.header[i])
|
2010-03-19 15:02:12 +00:00
|
|
|
newval = "true"
|
2010-11-02 14:47:28 +00:00
|
|
|
if m:
|
2010-03-18 20:35:08 +00:00
|
|
|
val = m.group(1)
|
2010-03-19 15:02:12 +00:00
|
|
|
if val == "1" or val == "2":
|
|
|
|
newval = "false"
|
|
|
|
else:
|
|
|
|
document.warning("Unable to match " + document.header[i])
|
|
|
|
document.header[i] = "\\html_use_mathml " + newval
|
2010-03-18 20:35:08 +00:00
|
|
|
|
|
|
|
|
2010-03-28 14:52:47 +00:00
|
|
|
|
|
|
|
def revert_inset_preview(document):
|
|
|
|
" Dissolves the preview inset "
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Preview", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-04 14:53:55 +00:00
|
|
|
iend = find_end_of_inset(document.body, i)
|
|
|
|
if iend == -1:
|
2010-03-28 14:52:47 +00:00
|
|
|
document.warning("Malformed LyX document: Could not find end of Preview inset.")
|
2010-11-04 14:53:55 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
2010-11-04 15:33:19 +00:00
|
|
|
# This has several issues.
|
|
|
|
# We need to do something about the layouts inside InsetPreview.
|
|
|
|
# If we just leave the first one, then we have something like:
|
|
|
|
# \begin_layout Standard
|
|
|
|
# ...
|
|
|
|
# \begin_layout Standard
|
|
|
|
# and we get a "no \end_layout" error. So something has to be done.
|
|
|
|
# Ideally, we would check if it is the same as the layout we are in.
|
|
|
|
# If so, we just remove it; if not, we end the active one. But it is
|
|
|
|
# not easy to know what layout we are in, due to depth changes, etc,
|
|
|
|
# and it is not clear to me how much work it is worth doing. In most
|
|
|
|
# cases, the layout will probably be the same.
|
|
|
|
#
|
|
|
|
# For the same reason, we have to remove the \end_layout tag at the
|
|
|
|
# end of the last layout in the inset. Again, that will sometimes be
|
|
|
|
# wrong, but it will usually be right. To know what to do, we would
|
|
|
|
# again have to know what layout the inset is in.
|
2010-11-04 14:53:55 +00:00
|
|
|
|
2010-11-04 15:33:19 +00:00
|
|
|
blay = find_token(document.body, "\\begin_layout", i, iend)
|
|
|
|
if blay == -1:
|
|
|
|
document.warning("Can't find layout for preview inset!")
|
|
|
|
# always do the later one first...
|
|
|
|
del document.body[iend]
|
|
|
|
del document.body[i]
|
|
|
|
# deletions mean we do not need to reset i
|
|
|
|
continue
|
|
|
|
|
|
|
|
# This is where we would check what layout we are in.
|
|
|
|
# The check for Standard is definitely wrong.
|
|
|
|
#
|
|
|
|
# lay = document.body[blay].split(None, 1)[1]
|
|
|
|
# if lay != oldlayout:
|
|
|
|
# # record a boolean to tell us what to do later....
|
|
|
|
# # better to do it later, since (a) it won't mess up
|
|
|
|
# # the numbering and (b) we only modify at the end.
|
2010-11-04 14:53:55 +00:00
|
|
|
|
2010-11-04 15:33:19 +00:00
|
|
|
# we want to delete the last \\end_layout in this inset, too.
|
|
|
|
# note that this may not be the \\end_layout that goes with blay!!
|
|
|
|
bend = find_end_of_layout(document.body, blay)
|
|
|
|
while True:
|
|
|
|
tmp = find_token(document.body, "\\end_layout", bend + 1, iend)
|
|
|
|
if tmp == -1:
|
|
|
|
break
|
|
|
|
bend = tmp
|
|
|
|
if bend == blay:
|
|
|
|
document.warning("Unable to find last layout in preview inset!")
|
|
|
|
del document.body[iend]
|
|
|
|
del document.body[i]
|
|
|
|
# deletions mean we do not need to reset i
|
|
|
|
continue
|
|
|
|
# always do the later one first...
|
|
|
|
del document.body[iend]
|
|
|
|
del document.body[bend]
|
|
|
|
del document.body[i:blay + 1]
|
2010-11-04 14:53:55 +00:00
|
|
|
# we do not need to reset i
|
2010-03-28 16:29:25 +00:00
|
|
|
|
2010-03-28 14:52:47 +00:00
|
|
|
|
2010-03-28 16:29:25 +00:00
|
|
|
def revert_equalspacing_xymatrix(document):
|
|
|
|
" Revert a Formula with xymatrix@! to an ERT inset "
|
|
|
|
i = 0
|
2010-03-29 16:29:57 +00:00
|
|
|
has_preamble = False
|
|
|
|
has_equal_spacing = False
|
2010-11-04 15:05:42 +00:00
|
|
|
|
2010-03-28 16:29:25 +00:00
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Formula", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Formula inset.")
|
2010-11-04 15:05:42 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
2010-03-29 16:29:57 +00:00
|
|
|
for curline in range(i,j):
|
|
|
|
found = document.body[curline].find("\\xymatrix@!")
|
2010-03-28 16:29:25 +00:00
|
|
|
if found != -1:
|
|
|
|
break
|
|
|
|
|
|
|
|
if found != -1:
|
2010-03-29 16:29:57 +00:00
|
|
|
has_equal_spacing = True
|
2010-03-29 17:05:21 +00:00
|
|
|
content = [document.body[i][21:]]
|
2010-07-06 23:56:47 +00:00
|
|
|
content += document.body[i + 1:j]
|
2010-03-29 17:05:21 +00:00
|
|
|
subst = put_cmd_in_ert(content)
|
2010-07-06 23:56:47 +00:00
|
|
|
document.body[i:j + 1] = subst
|
2010-11-04 15:05:42 +00:00
|
|
|
i += len(subst) - (j - i) + 1
|
2010-03-28 16:29:25 +00:00
|
|
|
else:
|
2010-03-29 16:29:57 +00:00
|
|
|
for curline in range(i,j):
|
|
|
|
l = document.body[curline].find("\\xymatrix")
|
2010-03-28 16:29:25 +00:00
|
|
|
if l != -1:
|
2010-03-29 16:29:57 +00:00
|
|
|
has_preamble = True;
|
|
|
|
break;
|
|
|
|
i = j + 1
|
2010-11-04 15:05:42 +00:00
|
|
|
|
2010-03-29 16:29:57 +00:00
|
|
|
if has_equal_spacing and not has_preamble:
|
2010-11-05 17:54:57 +00:00
|
|
|
add_to_preamble(document, ['\\usepackage[all]{xy}'])
|
2010-03-28 14:52:47 +00:00
|
|
|
|
2010-03-31 00:46:50 +00:00
|
|
|
|
|
|
|
def revert_notefontcolor(document):
|
|
|
|
" Reverts greyed-out note font color to preamble code "
|
2010-11-04 15:39:32 +00:00
|
|
|
|
|
|
|
i = find_token(document.header, "\\notefontcolor", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-04 16:09:35 +00:00
|
|
|
|
2010-11-05 20:13:58 +00:00
|
|
|
colorcode = get_value(document.header, '\\notefontcolor', i)
|
|
|
|
del document.header[i]
|
|
|
|
|
2010-11-04 16:09:35 +00:00
|
|
|
# are there any grey notes?
|
|
|
|
if find_token(document.body, "\\begin_inset Note Greyedout", 0) == -1:
|
2010-11-05 20:13:58 +00:00
|
|
|
# no need to do anything else, and \renewcommand will throw
|
|
|
|
# an error since lyxgreyedout will not exist.
|
2010-11-04 16:09:35 +00:00
|
|
|
return
|
|
|
|
|
2010-11-04 15:39:32 +00:00
|
|
|
# the color code is in the form #rrggbb where every character denotes a hex number
|
|
|
|
red = hex2ratio(colorcode[1:3])
|
|
|
|
green = hex2ratio(colorcode[3:5])
|
|
|
|
blue = hex2ratio(colorcode[5:7])
|
|
|
|
# write the preamble
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document,
|
|
|
|
[ '% for greyed-out notes',
|
2010-11-04 15:39:32 +00:00
|
|
|
'\\@ifundefined{definecolor}{\\usepackage{color}}{}'
|
2010-11-04 16:15:46 +00:00
|
|
|
'\\definecolor{note_fontcolor}{rgb}{%s,%s,%s}' % (red, green, blue),
|
2010-11-04 15:39:32 +00:00
|
|
|
'\\renewenvironment{lyxgreyedout}',
|
|
|
|
' {\\textcolor{note_fontcolor}\\bgroup}{\\egroup}'])
|
2010-03-31 00:46:50 +00:00
|
|
|
|
|
|
|
|
2010-04-01 00:40:19 +00:00
|
|
|
def revert_turkmen(document):
|
|
|
|
"Set language Turkmen to English"
|
2010-11-04 16:01:47 +00:00
|
|
|
|
2010-04-01 00:40:19 +00:00
|
|
|
if document.language == "turkmen":
|
|
|
|
document.language = "english"
|
|
|
|
i = find_token(document.header, "\\language", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header[i] = "\\language english"
|
2010-11-04 16:01:47 +00:00
|
|
|
|
2010-04-01 00:40:19 +00:00
|
|
|
j = 0
|
|
|
|
while True:
|
|
|
|
j = find_token(document.body, "\\lang turkmen", j)
|
|
|
|
if j == -1:
|
|
|
|
return
|
|
|
|
document.body[j] = document.body[j].replace("\\lang turkmen", "\\lang english")
|
2010-11-04 16:01:47 +00:00
|
|
|
j += 1
|
2010-04-01 00:40:19 +00:00
|
|
|
|
|
|
|
|
2010-04-02 23:39:36 +00:00
|
|
|
def revert_fontcolor(document):
|
|
|
|
" Reverts font color to preamble code "
|
2010-11-04 16:09:35 +00:00
|
|
|
i = find_token(document.header, "\\fontcolor", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
colorcode = get_value(document.header, '\\fontcolor', i)
|
|
|
|
del document.header[i]
|
|
|
|
# don't clutter the preamble if font color is not set
|
|
|
|
if colorcode == "#000000":
|
|
|
|
return
|
|
|
|
# the color code is in the form #rrggbb where every character denotes a hex number
|
|
|
|
red = hex2ratio(colorcode[1:3])
|
|
|
|
green = hex2ratio(colorcode[3:5])
|
|
|
|
blue = hex2ratio(colorcode[5:7])
|
|
|
|
# write the preamble
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document,
|
|
|
|
['% Set the font color',
|
2010-11-04 16:09:35 +00:00
|
|
|
'\\@ifundefined{definecolor}{\\usepackage{color}}{}',
|
2010-11-04 16:15:46 +00:00
|
|
|
'\\definecolor{document_fontcolor}{rgb}{%s,%s,%s}' % (red, green, blue),
|
2010-11-04 16:09:35 +00:00
|
|
|
'\\color{document_fontcolor}'])
|
|
|
|
|
2010-04-02 23:39:36 +00:00
|
|
|
|
2010-04-08 00:14:08 +00:00
|
|
|
def revert_shadedboxcolor(document):
|
|
|
|
" Reverts shaded box color to preamble code "
|
2010-11-04 16:14:17 +00:00
|
|
|
i = find_token(document.header, "\\boxbgcolor", 0)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
colorcode = get_value(document.header, '\\boxbgcolor', i)
|
|
|
|
del document.header[i]
|
|
|
|
# the color code is in the form #rrggbb
|
|
|
|
red = hex2ratio(colorcode[1:3])
|
|
|
|
green = hex2ratio(colorcode[3:5])
|
|
|
|
blue = hex2ratio(colorcode[5:7])
|
|
|
|
# write the preamble
|
2010-11-05 17:54:57 +00:00
|
|
|
insert_to_preamble(document,
|
|
|
|
['% Set the color of boxes with shaded background',
|
2010-11-04 16:14:17 +00:00
|
|
|
'\\@ifundefined{definecolor}{\\usepackage{color}}{}',
|
|
|
|
"\\definecolor{shadecolor}{rgb}{%s,%s,%s}" % (red, green, blue)])
|
2010-04-08 00:14:08 +00:00
|
|
|
|
|
|
|
|
2010-04-17 13:04:41 +00:00
|
|
|
def revert_lyx_version(document):
|
|
|
|
" Reverts LyX Version information from Inset Info "
|
2010-04-18 22:24:14 +00:00
|
|
|
version = "LyX version"
|
|
|
|
try:
|
|
|
|
import lyx2lyx_version
|
|
|
|
version = lyx2lyx_version.version
|
|
|
|
except:
|
2010-06-07 21:58:27 +00:00
|
|
|
pass
|
|
|
|
|
2010-04-17 13:04:41 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, '\\begin_inset Info', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i + 1)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of Info inset.")
|
2010-11-04 16:20:30 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
2010-04-17 13:04:41 +00:00
|
|
|
# We expect:
|
|
|
|
# \begin_inset Info
|
|
|
|
# type "lyxinfo"
|
|
|
|
# arg "version"
|
|
|
|
# \end_inset
|
2010-11-05 15:13:42 +00:00
|
|
|
typ = get_quoted_value(document.body, "type", i, j)
|
|
|
|
arg = get_quoted_value(document.body, "arg", i, j)
|
2010-04-17 13:04:41 +00:00
|
|
|
if arg != "version" or typ != "lyxinfo":
|
2010-07-06 23:56:47 +00:00
|
|
|
i = j + 1
|
2010-04-17 13:04:41 +00:00
|
|
|
continue
|
2010-04-18 22:24:14 +00:00
|
|
|
|
2010-04-17 13:04:41 +00:00
|
|
|
# We do not actually know the version of LyX used to produce the document.
|
|
|
|
# But we can use our version, since we are reverting.
|
2010-04-18 00:57:57 +00:00
|
|
|
s = [version]
|
2010-04-17 13:04:41 +00:00
|
|
|
# Now we want to check if the line after "\end_inset" is empty. It normally
|
|
|
|
# is, so we want to remove it, too.
|
2010-07-06 23:56:47 +00:00
|
|
|
lastline = j + 1
|
|
|
|
if document.body[j + 1].strip() == "":
|
|
|
|
lastline = j + 2
|
2010-04-17 13:04:41 +00:00
|
|
|
document.body[i: lastline] = s
|
2010-07-06 23:56:47 +00:00
|
|
|
i = i + 1
|
2010-04-17 13:04:41 +00:00
|
|
|
|
|
|
|
|
2010-04-21 15:18:25 +00:00
|
|
|
def revert_math_scale(document):
|
|
|
|
" Remove math scaling and LaTeX options "
|
2010-11-05 16:33:29 +00:00
|
|
|
del_token(document.header, '\\html_math_img_scale', 0)
|
|
|
|
del_token(document.header, '\\html_latex_start', 0)
|
|
|
|
del_token(document.header, '\\html_latex_end', 0)
|
2010-04-21 15:18:25 +00:00
|
|
|
|
|
|
|
|
2010-05-18 01:22:08 +00:00
|
|
|
def revert_pagesizes(document):
|
|
|
|
" Revert page sizes to default "
|
|
|
|
i = find_token(document.header, '\\papersize', 0)
|
|
|
|
if i != -1:
|
|
|
|
size = document.header[i][11:]
|
2010-05-22 01:42:14 +00:00
|
|
|
if size == "a0paper" or size == "a1paper" or size == "a2paper" \
|
|
|
|
or size == "a6paper" or size == "b0paper" or size == "b1paper" \
|
|
|
|
or size == "b2paper" or size == "b6paper" or size == "b0j" \
|
|
|
|
or size == "b1j" or size == "b2j" or size == "b3j" or size == "b4j" \
|
|
|
|
or size == "b5j" or size == "b6j":
|
2010-05-18 01:22:08 +00:00
|
|
|
del document.header[i]
|
|
|
|
|
2010-04-21 15:18:25 +00:00
|
|
|
|
2010-07-13 01:06:20 +00:00
|
|
|
def revert_DIN_C_pagesizes(document):
|
|
|
|
" Revert DIN C page sizes to default "
|
|
|
|
i = find_token(document.header, '\\papersize', 0)
|
|
|
|
if i != -1:
|
|
|
|
size = document.header[i][11:]
|
|
|
|
if size == "c0paper" or size == "c1paper" or size == "c2paper" \
|
|
|
|
or size == "c3paper" or size == "c4paper" or size == "c5paper" \
|
|
|
|
or size == "c6paper":
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
|
2010-05-24 19:34:43 +00:00
|
|
|
def convert_html_quotes(document):
|
|
|
|
" Remove quotes around html_latex_start and html_latex_end "
|
|
|
|
|
|
|
|
i = find_token(document.header, '\\html_latex_start', 0)
|
|
|
|
if i != -1:
|
|
|
|
line = document.header[i]
|
|
|
|
l = re.compile(r'\\html_latex_start\s+"(.*)"')
|
|
|
|
m = l.match(line)
|
2010-11-04 16:25:26 +00:00
|
|
|
if m:
|
2010-05-24 19:34:43 +00:00
|
|
|
document.header[i] = "\\html_latex_start " + m.group(1)
|
|
|
|
|
|
|
|
i = find_token(document.header, '\\html_latex_end', 0)
|
|
|
|
if i != -1:
|
|
|
|
line = document.header[i]
|
|
|
|
l = re.compile(r'\\html_latex_end\s+"(.*)"')
|
|
|
|
m = l.match(line)
|
2010-11-04 16:25:26 +00:00
|
|
|
if m:
|
2010-05-24 19:34:43 +00:00
|
|
|
document.header[i] = "\\html_latex_end " + m.group(1)
|
|
|
|
|
|
|
|
|
|
|
|
def revert_html_quotes(document):
|
|
|
|
" Remove quotes around html_latex_start and html_latex_end "
|
|
|
|
|
|
|
|
i = find_token(document.header, '\\html_latex_start', 0)
|
|
|
|
if i != -1:
|
|
|
|
line = document.header[i]
|
|
|
|
l = re.compile(r'\\html_latex_start\s+(.*)')
|
|
|
|
m = l.match(line)
|
2010-11-04 16:25:26 +00:00
|
|
|
if not m:
|
|
|
|
document.warning("Weird html_latex_start line: " + line)
|
|
|
|
del document.header[i]
|
|
|
|
else:
|
|
|
|
document.header[i] = "\\html_latex_start \"" + m.group(1) + "\""
|
2010-05-24 19:34:43 +00:00
|
|
|
|
|
|
|
i = find_token(document.header, '\\html_latex_end', 0)
|
|
|
|
if i != -1:
|
|
|
|
line = document.header[i]
|
|
|
|
l = re.compile(r'\\html_latex_end\s+(.*)')
|
|
|
|
m = l.match(line)
|
2010-11-04 16:25:26 +00:00
|
|
|
if not m:
|
|
|
|
document.warning("Weird html_latex_end line: " + line)
|
|
|
|
del document.header[i]
|
|
|
|
else:
|
|
|
|
document.header[i] = "\\html_latex_end \"" + m.group(1) + "\""
|
2010-05-24 19:34:43 +00:00
|
|
|
|
2010-05-25 11:36:00 +00:00
|
|
|
|
|
|
|
def revert_output_sync(document):
|
|
|
|
" Remove forward search options "
|
2010-11-05 16:33:29 +00:00
|
|
|
del_token(document.header, '\\output_sync_macro', 0)
|
|
|
|
del_token(document.header, '\\output_sync', 0)
|
2010-05-25 11:36:00 +00:00
|
|
|
|
|
|
|
|
2010-06-05 07:44:44 +00:00
|
|
|
def revert_align_decimal(document):
|
2010-11-04 20:54:58 +00:00
|
|
|
i = 0
|
2010-06-05 07:44:44 +00:00
|
|
|
while True:
|
2010-11-04 20:54:58 +00:00
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Unable to find end of Tabular inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
cell = find_token(document.body, "<cell", i, j)
|
|
|
|
if cell == -1:
|
|
|
|
document.warning("Can't find any cells in Tabular inset at line " + str(i))
|
|
|
|
i = j
|
|
|
|
continue
|
|
|
|
k = i + 1
|
|
|
|
while True:
|
|
|
|
k = find_token(document.body, "<column", k, cell)
|
|
|
|
if k == -1:
|
|
|
|
return
|
|
|
|
if document.body[k].find('alignment="decimal"') == -1:
|
|
|
|
k += 1
|
|
|
|
continue
|
|
|
|
remove_option(document.body, k, 'decimal_point')
|
|
|
|
document.body[k] = \
|
|
|
|
document.body[k].replace('alignment="decimal"', 'alignment="center"')
|
|
|
|
k += 1
|
2010-06-05 07:44:44 +00:00
|
|
|
|
|
|
|
|
2010-06-07 21:58:27 +00:00
|
|
|
def convert_optarg(document):
|
|
|
|
" Convert \\begin_inset OptArg to \\begin_inset Argument "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, '\\begin_inset OptArg', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
document.body[i] = "\\begin_inset Argument"
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
|
|
def revert_argument(document):
|
|
|
|
" Convert \\begin_inset Argument to \\begin_inset OptArg "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, '\\begin_inset Argument', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2010-07-05 22:22:48 +00:00
|
|
|
document.body[i] = "\\begin_inset OptArg"
|
|
|
|
i += 1
|
2010-07-03 13:14:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_makebox(document):
|
2010-07-05 02:01:10 +00:00
|
|
|
" Convert \\makebox to TeX code "
|
2010-07-03 13:14:15 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
2010-11-05 19:51:06 +00:00
|
|
|
i = find_token(document.body, '\\begin_inset Box', i)
|
2010-07-03 13:14:15 +00:00
|
|
|
if i == -1:
|
2010-11-05 19:51:06 +00:00
|
|
|
break
|
2010-07-06 14:00:30 +00:00
|
|
|
z = find_end_of_inset(document.body, i)
|
|
|
|
if z == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of box inset.")
|
2010-11-04 17:08:35 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
blay = find_token(document.body, "\\begin_layout", i, z)
|
|
|
|
if blay == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find layout in box.")
|
|
|
|
i = z
|
|
|
|
continue
|
|
|
|
# by looking before the layout we make sure we're actually finding
|
|
|
|
# an option, not text.
|
|
|
|
j = find_token(document.body, 'use_makebox', i, blay)
|
|
|
|
if j == -1:
|
|
|
|
i = z
|
|
|
|
continue
|
2010-11-05 19:51:06 +00:00
|
|
|
|
|
|
|
if not check_token(document.body[i], "\\begin_inset Box Frameless") \
|
|
|
|
or get_value(document.body, 'use_makebox', j) != 1:
|
2010-11-04 17:08:35 +00:00
|
|
|
del document.body[j]
|
|
|
|
i = z
|
|
|
|
continue
|
|
|
|
bend = find_end_of_layout(document.body, blay)
|
|
|
|
if bend == -1 or bend > z:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of layout in box.")
|
|
|
|
i = z
|
|
|
|
continue
|
|
|
|
# determine the alignment
|
2010-11-05 15:11:46 +00:00
|
|
|
align = get_quoted_value(document.body, 'hor_pos', i, blay, "c")
|
2010-11-04 17:08:35 +00:00
|
|
|
# determine the width
|
2010-11-05 15:11:46 +00:00
|
|
|
length = get_quoted_value(document.body, 'width', i, blay, "50col%")
|
2010-11-04 17:08:35 +00:00
|
|
|
length = latex_length(length)[1]
|
|
|
|
# remove the \end_layout \end_inset pair
|
|
|
|
document.body[bend:z + 1] = put_cmd_in_ert("}")
|
|
|
|
subst = "\\makebox[" + length + "][" \
|
|
|
|
+ align + "]{"
|
|
|
|
document.body[i:blay + 1] = put_cmd_in_ert(subst)
|
2010-07-03 13:14:15 +00:00
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2010-07-17 18:10:37 +00:00
|
|
|
def convert_use_makebox(document):
|
2010-07-17 19:29:13 +00:00
|
|
|
" Adds use_makebox option for boxes "
|
2010-07-17 18:10:37 +00:00
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, '\\begin_inset Box', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-04 17:13:44 +00:00
|
|
|
# all of this is to make sure we actually find the use_parbox
|
|
|
|
# that is an option for this box, not some text elsewhere.
|
|
|
|
z = find_end_of_inset(document.body, i)
|
|
|
|
if z == -1:
|
|
|
|
document.warning("Can't find end of box inset!!")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
blay = find_token(document.body, "\\begin_layout", i, z)
|
|
|
|
if blay == -1:
|
|
|
|
document.warning("Can't find layout in box inset!!")
|
|
|
|
i = z
|
|
|
|
continue
|
|
|
|
# so now we are looking for use_parbox before the box's layout
|
|
|
|
k = find_token(document.body, 'use_parbox', i, blay)
|
2010-07-17 18:10:37 +00:00
|
|
|
if k == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find use_parbox statement in box.")
|
2010-11-04 17:13:44 +00:00
|
|
|
i = z
|
|
|
|
continue
|
2010-07-17 18:10:37 +00:00
|
|
|
document.body.insert(k + 1, "use_makebox 0")
|
2011-01-19 22:04:45 +00:00
|
|
|
i = blay + 1 # not z + 1 (box insets may be nested)
|
2010-07-17 18:10:37 +00:00
|
|
|
|
|
|
|
|
2010-07-05 02:01:10 +00:00
|
|
|
def revert_IEEEtran(document):
|
2010-07-05 03:19:54 +00:00
|
|
|
" Convert IEEEtran layouts and styles to TeX code "
|
2010-07-06 13:50:23 +00:00
|
|
|
if document.textclass != "IEEEtran":
|
|
|
|
return
|
2010-11-05 17:35:53 +00:00
|
|
|
revert_flex_inset(document.body, "IEEE membership", "\\IEEEmembership")
|
|
|
|
revert_flex_inset(document.body, "Lowercase", "\\MakeLowercase")
|
2010-07-06 19:20:19 +00:00
|
|
|
layouts = ("Special Paper Notice", "After Title Text", "Publication ID",
|
|
|
|
"Page headings", "Biography without photo")
|
|
|
|
latexcmd = {"Special Paper Notice": "\\IEEEspecialpapernotice",
|
|
|
|
"After Title Text": "\\IEEEaftertitletext",
|
|
|
|
"Publication ID": "\\IEEEpubid"}
|
|
|
|
obsoletedby = {"Page headings": "MarkBoth",
|
|
|
|
"Biography without photo": "BiographyNoPhoto"}
|
|
|
|
for layout in layouts:
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_layout ' + layout, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2010-11-04 17:16:23 +00:00
|
|
|
j = find_end_of_layout(document.body, i)
|
2010-07-06 19:20:19 +00:00
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of " + layout + " layout.")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
if layout in obsoletedby:
|
|
|
|
document.body[i] = "\\begin_layout " + obsoletedby[layout]
|
|
|
|
i = j
|
2010-11-04 17:16:23 +00:00
|
|
|
continue
|
|
|
|
content = lyx2latex(document, document.body[i:j + 1])
|
|
|
|
add_to_preamble(document, [latexcmd[layout] + "{" + content + "}"])
|
|
|
|
del document.body[i:j + 1]
|
|
|
|
# no need to reset i
|
2010-07-05 02:01:10 +00:00
|
|
|
|
|
|
|
|
2010-10-13 12:36:53 +00:00
|
|
|
def convert_prettyref(document):
|
|
|
|
" Converts prettyref references to neutral formatted refs "
|
|
|
|
re_ref = re.compile("^\s*reference\s+\"(\w+):(\S+)\"")
|
|
|
|
nm_ref = re.compile("^\s*name\s+\"(\w+):(\S+)\"")
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: No end of InsetRef!")
|
|
|
|
i += 1
|
|
|
|
continue
|
2010-11-04 17:19:19 +00:00
|
|
|
k = find_token(document.body, "LatexCommand prettyref", i, j)
|
|
|
|
if k != -1:
|
2010-10-13 12:36:53 +00:00
|
|
|
document.body[k] = "LatexCommand formatted"
|
|
|
|
i = j + 1
|
|
|
|
document.header.insert(-1, "\\use_refstyle 0")
|
|
|
|
|
|
|
|
|
|
|
|
def revert_refstyle(document):
|
|
|
|
" Reverts neutral formatted refs to prettyref "
|
|
|
|
re_ref = re.compile("^reference\s+\"(\w+):(\S+)\"")
|
|
|
|
nm_ref = re.compile("^\s*name\s+\"(\w+):(\S+)\"")
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset ref", i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Malformed LyX document: No end of InsetRef")
|
|
|
|
i += 1
|
|
|
|
continue
|
2010-11-04 17:19:19 +00:00
|
|
|
k = find_token(document.body, "LatexCommand formatted", i, j)
|
|
|
|
if k != -1:
|
2010-10-13 12:36:53 +00:00
|
|
|
document.body[k] = "LatexCommand prettyref"
|
|
|
|
i = j + 1
|
|
|
|
i = find_token(document.header, "\\use_refstyle", 0)
|
|
|
|
if i != -1:
|
|
|
|
document.header.pop(i)
|
|
|
|
|
|
|
|
|
2010-07-13 14:02:57 +00:00
|
|
|
def revert_nameref(document):
|
|
|
|
" Convert namerefs to regular references "
|
2010-07-13 21:07:26 +00:00
|
|
|
cmds = ["Nameref", "nameref"]
|
|
|
|
foundone = False
|
|
|
|
rx = re.compile(r'reference "(.*)"')
|
2010-07-13 14:02:57 +00:00
|
|
|
for cmd in cmds:
|
|
|
|
i = 0
|
2010-07-13 21:07:26 +00:00
|
|
|
oldcmd = "LatexCommand " + cmd
|
2010-07-13 14:02:57 +00:00
|
|
|
while 1:
|
2010-07-13 21:07:26 +00:00
|
|
|
# It seems better to look for this, as most of the reference
|
|
|
|
# insets won't be ones we care about.
|
2010-07-13 14:02:57 +00:00
|
|
|
i = find_token(document.body, oldcmd, i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
2010-07-13 21:07:26 +00:00
|
|
|
cmdloc = i
|
|
|
|
i += 1
|
2010-07-13 14:02:57 +00:00
|
|
|
# Make sure it is actually in an inset!
|
2010-11-04 17:31:24 +00:00
|
|
|
# A normal line could begin with "LatexCommand nameref"!
|
2010-11-05 15:18:47 +00:00
|
|
|
val = is_in_inset(document.body, cmdloc, \
|
2010-11-04 17:39:36 +00:00
|
|
|
"\\begin_inset CommandInset ref")
|
2010-11-05 15:18:47 +00:00
|
|
|
if not val:
|
|
|
|
continue
|
|
|
|
stins, endins = val
|
2010-11-04 17:31:24 +00:00
|
|
|
|
|
|
|
# ok, so it is in an InsetRef
|
|
|
|
refline = find_token(document.body, "reference", stins, endins)
|
|
|
|
if refline == -1:
|
2010-07-13 21:07:26 +00:00
|
|
|
document.warning("Can't find reference for inset at line " + stinst + "!!")
|
|
|
|
continue
|
|
|
|
m = rx.match(document.body[refline])
|
|
|
|
if not m:
|
|
|
|
document.warning("Can't match reference line: " + document.body[ref])
|
|
|
|
continue
|
|
|
|
foundone = True
|
|
|
|
ref = m.group(1)
|
2010-11-04 17:31:24 +00:00
|
|
|
newcontent = put_cmd_in_ert('\\' + cmd + '{' + ref + '}')
|
2010-07-13 21:07:26 +00:00
|
|
|
document.body[stins:endins + 1] = newcontent
|
2010-11-04 17:31:24 +00:00
|
|
|
|
2010-07-13 21:07:26 +00:00
|
|
|
if foundone:
|
2010-11-05 17:54:57 +00:00
|
|
|
add_to_preamble(document, ["\usepackage{nameref}"])
|
2010-07-13 21:07:26 +00:00
|
|
|
|
|
|
|
|
2010-07-16 15:19:04 +00:00
|
|
|
def remove_Nameref(document):
|
|
|
|
" Convert Nameref commands to nameref commands "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
# It seems better to look for this, as most of the reference
|
|
|
|
# insets won't be ones we care about.
|
|
|
|
i = find_token(document.body, "LatexCommand Nameref" , i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
cmdloc = i
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# Make sure it is actually in an inset!
|
2010-11-05 15:18:47 +00:00
|
|
|
val = is_in_inset(document.body, cmdloc, \
|
|
|
|
"\\begin_inset CommandInset ref")
|
|
|
|
if not val:
|
2010-07-16 15:19:04 +00:00
|
|
|
continue
|
|
|
|
document.body[cmdloc] = "LatexCommand nameref"
|
2010-07-13 14:02:57 +00:00
|
|
|
|
|
|
|
|
2010-07-17 15:51:11 +00:00
|
|
|
def revert_mathrsfs(document):
|
|
|
|
" Load mathrsfs if \mathrsfs us use in the document "
|
|
|
|
i = 0
|
2010-11-04 17:42:47 +00:00
|
|
|
for line in document.body:
|
|
|
|
if line.find("\\mathscr{") != -1:
|
2010-11-05 17:54:57 +00:00
|
|
|
add_to_preamble(document, ["\\usepackage{mathrsfs}"])
|
2010-11-04 17:42:47 +00:00
|
|
|
return
|
2010-07-17 15:51:11 +00:00
|
|
|
|
|
|
|
|
2010-10-12 14:46:17 +00:00
|
|
|
def convert_flexnames(document):
|
|
|
|
"Convert \\begin_inset Flex Custom:Style to \\begin_inset Flex Style and similarly for CharStyle and Element."
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
rx = re.compile(r'^\\begin_inset Flex (?:Custom|CharStyle|Element):(.+)$')
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
m = rx.match(document.body[i])
|
|
|
|
if m:
|
|
|
|
document.body[i] = "\\begin_inset Flex " + m.group(1)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2010-11-04 17:48:53 +00:00
|
|
|
flex_insets = {
|
|
|
|
"Alert" : "CharStyle:Alert",
|
|
|
|
"Code" : "CharStyle:Code",
|
|
|
|
"Concepts" : "CharStyle:Concepts",
|
|
|
|
"E-Mail" : "CharStyle:E-Mail",
|
|
|
|
"Emph" : "CharStyle:Emph",
|
|
|
|
"Expression" : "CharStyle:Expression",
|
|
|
|
"Initial" : "CharStyle:Initial",
|
|
|
|
"Institute" : "CharStyle:Institute",
|
|
|
|
"Meaning" : "CharStyle:Meaning",
|
|
|
|
"Noun" : "CharStyle:Noun",
|
|
|
|
"Strong" : "CharStyle:Strong",
|
|
|
|
"Structure" : "CharStyle:Structure",
|
|
|
|
"ArticleMode" : "Custom:ArticleMode",
|
|
|
|
"Endnote" : "Custom:Endnote",
|
|
|
|
"Glosse" : "Custom:Glosse",
|
|
|
|
"PresentationMode" : "Custom:PresentationMode",
|
|
|
|
"Tri-Glosse" : "Custom:Tri-Glosse"
|
|
|
|
}
|
|
|
|
|
|
|
|
flex_elements = {
|
|
|
|
"Abbrev" : "Element:Abbrev",
|
|
|
|
"CCC-Code" : "Element:CCC-Code",
|
|
|
|
"Citation-number" : "Element:Citation-number",
|
|
|
|
"City" : "Element:City",
|
|
|
|
"Code" : "Element:Code",
|
|
|
|
"CODEN" : "Element:CODEN",
|
|
|
|
"Country" : "Element:Country",
|
|
|
|
"Day" : "Element:Day",
|
|
|
|
"Directory" : "Element:Directory",
|
|
|
|
"Dscr" : "Element:Dscr",
|
|
|
|
"Email" : "Element:Email",
|
|
|
|
"Emph" : "Element:Emph",
|
|
|
|
"Filename" : "Element:Filename",
|
|
|
|
"Firstname" : "Element:Firstname",
|
|
|
|
"Fname" : "Element:Fname",
|
|
|
|
"GuiButton" : "Element:GuiButton",
|
|
|
|
"GuiMenu" : "Element:GuiMenu",
|
|
|
|
"GuiMenuItem" : "Element:GuiMenuItem",
|
|
|
|
"ISSN" : "Element:ISSN",
|
|
|
|
"Issue-day" : "Element:Issue-day",
|
|
|
|
"Issue-months" : "Element:Issue-months",
|
|
|
|
"Issue-number" : "Element:Issue-number",
|
|
|
|
"KeyCap" : "Element:KeyCap",
|
|
|
|
"KeyCombo" : "Element:KeyCombo",
|
|
|
|
"Keyword" : "Element:Keyword",
|
|
|
|
"Literal" : "Element:Literal",
|
|
|
|
"MenuChoice" : "Element:MenuChoice",
|
|
|
|
"Month" : "Element:Month",
|
|
|
|
"Orgdiv" : "Element:Orgdiv",
|
|
|
|
"Orgname" : "Element:Orgname",
|
|
|
|
"Postcode" : "Element:Postcode",
|
|
|
|
"SS-Code" : "Element:SS-Code",
|
|
|
|
"SS-Title" : "Element:SS-Title",
|
|
|
|
"State" : "Element:State",
|
|
|
|
"Street" : "Element:Street",
|
|
|
|
"Surname" : "Element:Surname",
|
|
|
|
"Volume" : "Element:Volume",
|
|
|
|
"Year" : "Element:Year"
|
|
|
|
}
|
2010-10-12 14:46:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_flexnames(document):
|
|
|
|
if document.backend == "latex":
|
|
|
|
flexlist = flex_insets
|
|
|
|
else:
|
|
|
|
flexlist = flex_elements
|
|
|
|
|
|
|
|
rx = re.compile(r'^\\begin_inset Flex\s+(.+)$')
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Flex", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
m = rx.match(document.body[i])
|
|
|
|
if not m:
|
|
|
|
document.warning("Illegal flex inset: " + document.body[i])
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
style = m.group(1)
|
2010-11-04 17:48:53 +00:00
|
|
|
if style in flexlist:
|
|
|
|
document.body[i] = "\\begin_inset Flex " + flexlist[style]
|
2010-10-12 14:46:17 +00:00
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
2010-08-31 23:31:32 +00:00
|
|
|
def convert_mathdots(document):
|
2010-09-06 00:31:45 +00:00
|
|
|
" Load mathdots automatically "
|
2010-11-22 21:07:20 +00:00
|
|
|
i = find_token(document.header, "\\use_mhchem" , 0)
|
|
|
|
if i == -1:
|
|
|
|
i = find_token(document.header, "\\use_esint" , 0)
|
2010-11-04 19:42:03 +00:00
|
|
|
if i != -1:
|
|
|
|
document.header.insert(i + 1, "\\use_mathdots 1")
|
2010-08-31 23:31:32 +00:00
|
|
|
|
|
|
|
|
2010-08-31 02:04:46 +00:00
|
|
|
def revert_mathdots(document):
|
|
|
|
" Load mathdots if used in the document "
|
2010-11-04 21:38:32 +00:00
|
|
|
|
2010-09-20 02:24:03 +00:00
|
|
|
mathdots = find_token(document.header, "\\use_mathdots" , 0)
|
2010-11-04 21:38:32 +00:00
|
|
|
if mathdots == -1:
|
|
|
|
document.warning("No \\usemathdots line. Assuming auto.")
|
|
|
|
else:
|
|
|
|
val = get_value(document.header, "\\use_mathdots", mathdots)
|
2010-11-04 21:47:47 +00:00
|
|
|
del document.header[mathdots]
|
2010-11-04 21:38:32 +00:00
|
|
|
try:
|
|
|
|
usedots = int(val)
|
|
|
|
except:
|
2010-11-04 21:42:24 +00:00
|
|
|
document.warning("Invalid \\use_mathdots value: " + val + ". Assuming auto.")
|
2010-11-04 21:40:01 +00:00
|
|
|
# probably usedots has not been changed, but be safe.
|
|
|
|
usedots = 1
|
2010-11-04 21:38:32 +00:00
|
|
|
|
2010-11-04 21:42:24 +00:00
|
|
|
if usedots == 0:
|
|
|
|
# do not load case
|
|
|
|
return
|
|
|
|
if usedots == 2:
|
|
|
|
# force load case
|
2010-11-05 17:54:57 +00:00
|
|
|
add_to_preamble(["\\usepackage{mathdots}"])
|
2010-11-04 21:42:24 +00:00
|
|
|
return
|
2010-11-04 21:38:32 +00:00
|
|
|
|
|
|
|
# so we are in the auto case. we want to load mathdots if \iddots is used.
|
|
|
|
i = 0
|
2010-08-31 02:04:46 +00:00
|
|
|
while True:
|
2010-09-20 02:24:03 +00:00
|
|
|
i = find_token(document.body, '\\begin_inset Formula', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
2010-11-04 21:38:32 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
|
2010-09-20 02:24:03 +00:00
|
|
|
i += 1
|
|
|
|
continue
|
2010-11-04 21:38:32 +00:00
|
|
|
code = "\n".join(document.body[i:j])
|
|
|
|
if code.find("\\iddots") != -1:
|
2010-11-05 17:54:57 +00:00
|
|
|
add_to_preamble(document, ["\\@ifundefined{iddots}{\\usepackage{mathdots}}"])
|
2010-11-04 21:38:32 +00:00
|
|
|
return
|
|
|
|
i = j
|
2010-08-31 02:04:46 +00:00
|
|
|
|
|
|
|
|
2010-09-07 00:41:00 +00:00
|
|
|
def convert_rule(document):
|
2010-11-05 00:55:52 +00:00
|
|
|
" Convert \\lyxline to CommandInset line. "
|
2010-09-07 00:41:00 +00:00
|
|
|
i = 0
|
2010-11-05 00:55:52 +00:00
|
|
|
|
|
|
|
inset = ['\\begin_inset CommandInset line',
|
|
|
|
'LatexCommand rule',
|
|
|
|
'offset "0.5ex"',
|
|
|
|
'width "100line%"',
|
|
|
|
'height "1pt"', '',
|
|
|
|
'\\end_inset', '', '']
|
|
|
|
|
|
|
|
# if paragraphs are indented, we may have to unindent to get the
|
|
|
|
# line to be full-width.
|
|
|
|
indent = get_value(document.header, "\\paragraph_separation", 0)
|
|
|
|
have_indent = (indent == "indent")
|
|
|
|
|
2010-09-07 00:41:00 +00:00
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\lyxline" , i)
|
2010-11-03 22:06:43 +00:00
|
|
|
if i == -1:
|
2010-09-07 00:41:00 +00:00
|
|
|
return
|
2010-11-05 00:55:52 +00:00
|
|
|
|
|
|
|
# we need to find out if this line follows other content
|
|
|
|
# in its paragraph. find its layout....
|
|
|
|
lastlay = find_token_backwards(document.body, "\\begin_layout", i)
|
|
|
|
if lastlay == -1:
|
|
|
|
document.warning("Can't find layout for line at " + str(i))
|
|
|
|
# do the best we can.
|
|
|
|
document.body[i:i+1] = inset
|
|
|
|
i += len(inset)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# ...and look for other content before it.
|
|
|
|
lineisfirst = True
|
|
|
|
for line in document.body[lastlay + 1:i]:
|
|
|
|
# is it empty or a paragraph option?
|
|
|
|
if not line or line[0] == '\\':
|
|
|
|
continue
|
|
|
|
lineisfirst = False
|
|
|
|
break
|
|
|
|
|
|
|
|
if lineisfirst:
|
|
|
|
document.body[i:i+1] = inset
|
|
|
|
if indent:
|
|
|
|
# we need to unindent, lest the line be too long
|
|
|
|
document.body.insert(lastlay + 1, "\\noindent")
|
|
|
|
i += len(inset)
|
2010-11-03 22:06:43 +00:00
|
|
|
else:
|
2010-11-05 00:55:52 +00:00
|
|
|
# so our line is in the middle of a paragraph
|
|
|
|
# we need to add a new line, lest this line follow the
|
|
|
|
# other content on that line and run off the side of the page
|
|
|
|
document.body[i:i+1] = inset
|
|
|
|
document.body[i:i] = ["\\begin_inset Newline newline", "\\end_inset", ""]
|
|
|
|
i += len(inset)
|
2010-09-07 00:41:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_rule(document):
|
|
|
|
" Revert line insets to Tex code "
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, "\\begin_inset CommandInset line" , i)
|
2010-11-03 22:01:30 +00:00
|
|
|
if i == -1:
|
2010-09-07 00:41:00 +00:00
|
|
|
return
|
2010-11-03 22:01:30 +00:00
|
|
|
# find end of inset
|
|
|
|
j = find_token(document.body, "\\end_inset" , i)
|
2010-11-05 01:03:46 +00:00
|
|
|
if j == -1:
|
2010-11-03 22:01:30 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of line inset.")
|
|
|
|
return
|
|
|
|
# determine the optional offset
|
2010-11-05 15:11:46 +00:00
|
|
|
offset = get_quoted_value(document.body, 'offset', i, j)
|
2010-11-05 01:03:46 +00:00
|
|
|
if offset:
|
|
|
|
offset = '[' + offset + ']'
|
2010-11-03 22:01:30 +00:00
|
|
|
# determine the width
|
2010-11-05 15:11:46 +00:00
|
|
|
width = get_quoted_value(document.body, 'width', i, j, "100col%")
|
2010-11-05 01:03:46 +00:00
|
|
|
width = latex_length(width)[1]
|
2010-11-03 22:01:30 +00:00
|
|
|
# determine the height
|
2010-11-05 15:11:46 +00:00
|
|
|
height = get_quoted_value(document.body, 'height', i, j, "1pt")
|
2010-11-05 01:03:46 +00:00
|
|
|
height = latex_length(height)[1]
|
2010-11-03 22:01:30 +00:00
|
|
|
# output the \rule command
|
2010-11-05 01:03:46 +00:00
|
|
|
subst = "\\rule[" + offset + "]{" + width + "}{" + height + "}"
|
2010-11-03 22:01:30 +00:00
|
|
|
document.body[i:j + 1] = put_cmd_in_ert(subst)
|
2010-11-05 01:03:46 +00:00
|
|
|
i += len(subst) - (j - i)
|
2010-09-07 00:41:00 +00:00
|
|
|
|
2010-10-11 01:05:20 +00:00
|
|
|
|
2010-09-19 22:12:06 +00:00
|
|
|
def revert_diagram(document):
|
2010-09-20 01:55:34 +00:00
|
|
|
" Add the feyn package if \\Diagram is used in math "
|
2010-09-19 22:12:06 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, '\\begin_inset Formula', i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
2010-09-20 02:24:03 +00:00
|
|
|
document.warning("Malformed LyX document: Can't find end of Formula inset.")
|
2010-09-19 22:12:06 +00:00
|
|
|
return
|
2010-11-05 01:06:08 +00:00
|
|
|
lines = "\n".join(document.body[i:j])
|
|
|
|
if lines.find("\\Diagram") == -1:
|
|
|
|
i = j
|
2010-09-19 22:12:06 +00:00
|
|
|
continue
|
2010-11-05 17:54:57 +00:00
|
|
|
add_to_preamble(document, ["\\usepackage{feyn}"])
|
2010-09-19 22:12:06 +00:00
|
|
|
# only need to do it once!
|
|
|
|
return
|
|
|
|
|
2011-01-20 16:38:41 +00:00
|
|
|
chapters = ("amsbook", "book", "docbook-book", "elsart", "extbook", "extreport",
|
|
|
|
"jbook", "jreport", "jsbook", "literate-book", "literate-report", "memoir",
|
|
|
|
"mwbk", "mwrep", "recipebook", "report", "scrbook", "scrreprt", "svmono",
|
|
|
|
"svmult", "tbook", "treport", "tufte-book")
|
2010-09-19 22:12:06 +00:00
|
|
|
|
2010-11-02 15:24:49 +00:00
|
|
|
def convert_bibtex_clearpage(document):
|
2010-10-11 01:05:20 +00:00
|
|
|
" insert a clear(double)page bibliographystyle if bibtotoc option is used "
|
2010-11-02 15:24:49 +00:00
|
|
|
|
2011-01-20 16:38:41 +00:00
|
|
|
if document.textclass not in chapters:
|
|
|
|
return
|
|
|
|
|
2010-11-02 15:24:49 +00:00
|
|
|
i = find_token(document.header, '\\papersides', 0)
|
2010-11-05 14:39:58 +00:00
|
|
|
sides = 0
|
2010-11-02 15:24:49 +00:00
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find papersides definition.")
|
2010-11-05 14:39:58 +00:00
|
|
|
document.warning("Assuming single sided.")
|
|
|
|
sides = 1
|
|
|
|
else:
|
|
|
|
val = get_value(document.header, "\\papersides", i)
|
|
|
|
try:
|
|
|
|
sides = int(val)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
if sides != 1 and sides != 2:
|
|
|
|
document.warning("Invalid papersides value: " + val)
|
|
|
|
document.warning("Assuming single sided.")
|
|
|
|
sides = 1
|
2010-11-02 15:24:49 +00:00
|
|
|
|
|
|
|
j = 0
|
2010-10-11 01:05:20 +00:00
|
|
|
while True:
|
2010-11-02 15:24:49 +00:00
|
|
|
j = find_token(document.body, "\\begin_inset CommandInset bibtex", j)
|
2010-10-11 01:05:20 +00:00
|
|
|
if j == -1:
|
|
|
|
return
|
2010-11-02 15:24:49 +00:00
|
|
|
|
|
|
|
k = find_end_of_inset(document.body, j)
|
|
|
|
if k == -1:
|
|
|
|
document.warning("Can't find end of Bibliography inset at line " + str(j))
|
|
|
|
j += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# only act if there is the option "bibtotoc"
|
2010-11-05 14:39:58 +00:00
|
|
|
val = get_value(document.body, 'options', j, k)
|
|
|
|
if not val:
|
2010-11-02 15:24:49 +00:00
|
|
|
document.warning("Can't find options for bibliography inset at line " + str(j))
|
|
|
|
j = k
|
|
|
|
continue
|
|
|
|
|
2010-11-05 14:39:58 +00:00
|
|
|
if val.find("bibtotoc") == -1:
|
2010-11-02 15:24:49 +00:00
|
|
|
j = k
|
|
|
|
continue
|
|
|
|
|
|
|
|
# so we want to insert a new page right before the paragraph that
|
2010-11-05 14:39:58 +00:00
|
|
|
# this bibliography thing is in.
|
|
|
|
lay = find_token_backwards(document.body, "\\begin_layout", j)
|
|
|
|
if lay == -1:
|
2010-11-02 15:24:49 +00:00
|
|
|
document.warning("Can't find layout containing bibliography inset at line " + str(j))
|
|
|
|
j = k
|
|
|
|
continue
|
|
|
|
|
2010-10-11 01:05:20 +00:00
|
|
|
if sides == 1:
|
2010-11-05 14:39:58 +00:00
|
|
|
cmd = "clearpage"
|
2010-10-11 01:05:20 +00:00
|
|
|
else:
|
2010-11-05 14:39:58 +00:00
|
|
|
cmd = "cleardoublepage"
|
|
|
|
subst = ['\\begin_layout Standard',
|
|
|
|
'\\begin_inset Newpage ' + cmd,
|
|
|
|
'\\end_inset', '', '',
|
|
|
|
'\\end_layout', '']
|
|
|
|
document.body[lay:lay] = subst
|
|
|
|
j = k + len(subst)
|
2010-10-11 01:05:20 +00:00
|
|
|
|
2010-09-07 00:41:00 +00:00
|
|
|
|
2010-11-06 15:06:19 +00:00
|
|
|
def check_passthru(document):
|
|
|
|
tc = document.textclass
|
|
|
|
ok = (tc == "literate-article" or tc == "literate-book" or tc == "literate-report")
|
|
|
|
if not ok:
|
|
|
|
mods = document.get_module_list()
|
|
|
|
for mod in mods:
|
|
|
|
if mod == "sweave" or mod == "noweb":
|
|
|
|
ok = True
|
|
|
|
break
|
|
|
|
return ok
|
|
|
|
|
|
|
|
|
|
|
|
def convert_passthru(document):
|
|
|
|
" http://www.mail-archive.com/lyx-devel@lists.lyx.org/msg161298.html "
|
|
|
|
if not check_passthru:
|
|
|
|
return
|
|
|
|
|
|
|
|
rx = re.compile("\\\\begin_layout \s*(\w+)")
|
|
|
|
beg = 0
|
|
|
|
for lay in ["Chunk", "Scrap"]:
|
|
|
|
while True:
|
|
|
|
beg = find_token(document.body, "\\begin_layout " + lay, beg)
|
|
|
|
if beg == -1:
|
|
|
|
break
|
|
|
|
end = find_end_of_layout(document.body, beg)
|
|
|
|
if end == -1:
|
|
|
|
document.warning("Can't find end of layout at line " + str(beg))
|
|
|
|
beg += 1
|
|
|
|
continue
|
2011-01-28 19:48:03 +00:00
|
|
|
|
2010-11-06 15:06:19 +00:00
|
|
|
# we are now going to replace newline insets within this layout
|
|
|
|
# by new instances of this layout. so we have repeated layouts
|
|
|
|
# instead of newlines.
|
2011-01-28 19:48:03 +00:00
|
|
|
|
2011-03-02 21:46:47 +00:00
|
|
|
# if the paragraph has any customization, however, we do not want to
|
|
|
|
# do the replacement.
|
|
|
|
if document.body[beg + 1].startswith("\\"):
|
|
|
|
beg = end + 1
|
|
|
|
continue
|
2011-01-28 19:48:03 +00:00
|
|
|
|
2010-11-06 15:06:19 +00:00
|
|
|
ns = beg
|
|
|
|
while True:
|
|
|
|
ns = find_token(document.body, "\\begin_inset Newline newline", ns, end)
|
|
|
|
if ns == -1:
|
|
|
|
break
|
|
|
|
ne = find_end_of_inset(document.body, ns)
|
|
|
|
if ne == -1 or ne > end:
|
|
|
|
document.warning("Can't find end of inset at line " + str(nb))
|
|
|
|
ns += 1
|
|
|
|
continue
|
|
|
|
if document.body[ne + 1] == "":
|
|
|
|
ne += 1
|
2011-03-02 21:46:47 +00:00
|
|
|
subst = ["\\end_layout", "", "\\begin_layout " + lay]
|
2010-11-06 15:06:19 +00:00
|
|
|
document.body[ns:ne + 1] = subst
|
|
|
|
# now we need to adjust end, in particular, but might as well
|
|
|
|
# do ns properly, too
|
2011-03-02 21:46:47 +00:00
|
|
|
newlines = (ne - ns) - len(subst)
|
2010-11-06 15:06:19 +00:00
|
|
|
ns += newlines + 2
|
2011-01-28 19:48:03 +00:00
|
|
|
end += newlines + 2
|
|
|
|
|
2010-11-06 15:06:19 +00:00
|
|
|
# ok, we now want to find out if the next layout is the
|
|
|
|
# same as this one. if so, we will insert an extra copy of it
|
|
|
|
didit = False
|
|
|
|
next = find_token(document.body, "\\begin_layout", end)
|
|
|
|
if next != -1:
|
|
|
|
m = rx.match(document.body[next])
|
|
|
|
if m:
|
|
|
|
nextlay = m.group(1)
|
|
|
|
if nextlay == lay:
|
|
|
|
subst = ["\\begin_layout " + lay, "", "\\end_layout", ""]
|
|
|
|
document.body[next:next] = subst
|
|
|
|
didit = True
|
|
|
|
beg = end + 1
|
|
|
|
if didit:
|
|
|
|
beg += 4 # for the extra layout
|
|
|
|
|
|
|
|
|
|
|
|
def revert_passthru(document):
|
|
|
|
" http://www.mail-archive.com/lyx-devel@lists.lyx.org/msg161298.html "
|
|
|
|
if not check_passthru:
|
|
|
|
return
|
|
|
|
rx = re.compile("\\\\begin_layout \s*(\w+)")
|
|
|
|
beg = 0
|
|
|
|
for lay in ["Chunk", "Scrap"]:
|
|
|
|
while True:
|
|
|
|
beg = find_token(document.body, "\\begin_layout " + lay, beg)
|
|
|
|
if beg == -1:
|
|
|
|
break
|
|
|
|
end = find_end_of_layout(document.body, beg)
|
|
|
|
if end == -1:
|
|
|
|
document.warning("Can't find end of layout at line " + str(beg))
|
|
|
|
beg += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
# we now want to find out if the next layout is the
|
|
|
|
# same as this one. but we will need to do this over and
|
|
|
|
# over again.
|
|
|
|
while True:
|
|
|
|
next = find_token(document.body, "\\begin_layout", end)
|
|
|
|
if next == -1:
|
|
|
|
break
|
|
|
|
m = rx.match(document.body[next])
|
|
|
|
if not m:
|
|
|
|
break
|
|
|
|
nextlay = m.group(1)
|
|
|
|
if nextlay != lay:
|
|
|
|
break
|
|
|
|
# so it is the same layout again. we now want to know if it is empty.
|
|
|
|
# but first let's check and make sure there is no content between the
|
|
|
|
# two layouts. i'm not sure if that can happen or not.
|
|
|
|
for l in range(end + 1, next):
|
|
|
|
document.warning("c'" + document.body[l] + "'")
|
|
|
|
if document.body[l] != "":
|
|
|
|
document.warning("Found content between adjacent " + lay + " layouts!")
|
|
|
|
break
|
|
|
|
nextend = find_end_of_layout(document.body, next)
|
|
|
|
if nextend == -1:
|
|
|
|
document.warning("Can't find end of layout at line " + str(next))
|
|
|
|
break
|
|
|
|
empty = True
|
|
|
|
for l in range(next + 1, nextend):
|
|
|
|
document.warning("e'" + document.body[l] + "'")
|
|
|
|
if document.body[l] != "":
|
|
|
|
empty = False
|
|
|
|
break
|
|
|
|
if empty:
|
|
|
|
# empty layouts just get removed
|
|
|
|
# should we check if it's before yet another such layout?
|
|
|
|
del document.body[next : nextend + 1]
|
|
|
|
# and we do not want to check again. we know the next layout
|
|
|
|
# should be another Chunk and should be left as is.
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
# if it's not empty, then we want to insert a newline in place
|
|
|
|
# of the layout switch
|
|
|
|
subst = ["\\begin_inset Newline newline", "\\end_inset", ""]
|
|
|
|
document.body[end : next + 1] = subst
|
|
|
|
# and now we have to find the end of the new, larger layout
|
|
|
|
newend = find_end_of_layout(document.body, beg)
|
|
|
|
if newend == -1:
|
|
|
|
document.warning("Can't find end of new layout at line " + str(beg))
|
|
|
|
break
|
|
|
|
end = newend
|
|
|
|
beg = end + 1
|
|
|
|
|
2010-11-07 05:32:28 +00:00
|
|
|
|
|
|
|
def revert_multirowOffset(document):
|
|
|
|
" Revert multirow cells with offset in tables to TeX-code"
|
2010-11-09 05:35:51 +00:00
|
|
|
# this routine is the same as the revert_multirow routine except that
|
|
|
|
# it checks additionally for the offset
|
2010-11-10 14:00:54 +00:00
|
|
|
|
|
|
|
# first, let's find out if we need to do anything
|
|
|
|
i = find_token(document.body, '<cell multirow="3" mroffset=', 0)
|
2010-11-09 05:35:51 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-10 14:00:54 +00:00
|
|
|
|
|
|
|
add_to_preamble(document, ["\\usepackage{multirow}"])
|
|
|
|
|
|
|
|
rgx = re.compile(r'mroffset="[^"]+?"')
|
|
|
|
begin_table = 0
|
|
|
|
|
2010-11-07 05:32:28 +00:00
|
|
|
while True:
|
2010-11-10 14:00:54 +00:00
|
|
|
# find begin/end of table
|
|
|
|
begin_table = find_token(document.body, '<lyxtabular version=', begin_table)
|
|
|
|
if begin_table == -1:
|
|
|
|
break
|
|
|
|
end_table = find_end_of(document.body, begin_table, '<lyxtabular', '</lyxtabular>')
|
|
|
|
if end_table == -1:
|
|
|
|
document.warning("Malformed LyX document: Could not find end of table.")
|
|
|
|
begin_table += 1
|
|
|
|
continue
|
|
|
|
# does this table have multirow?
|
|
|
|
i = find_token(document.body, '<cell multirow="3"', begin_table, end_table)
|
|
|
|
if i == -1:
|
|
|
|
begin_table = end_table
|
|
|
|
continue
|
|
|
|
|
|
|
|
# store the number of rows and columns
|
|
|
|
numrows = get_option_value(document.body[begin_table], "rows")
|
|
|
|
numcols = get_option_value(document.body[begin_table], "columns")
|
|
|
|
try:
|
|
|
|
numrows = int(numrows)
|
|
|
|
numcols = int(numcols)
|
|
|
|
except:
|
|
|
|
document.warning(numrows)
|
|
|
|
document.warning("Unable to determine rows and columns!")
|
|
|
|
begin_table = end_table
|
|
|
|
continue
|
|
|
|
|
|
|
|
mrstarts = []
|
|
|
|
multirows = []
|
|
|
|
# collect info on rows and columns of this table.
|
|
|
|
begin_row = begin_table
|
|
|
|
for row in range(numrows):
|
|
|
|
begin_row = find_token(document.body, '<row>', begin_row, end_table)
|
|
|
|
if begin_row == -1:
|
|
|
|
document.warning("Can't find row " + str(row + 1))
|
|
|
|
break
|
|
|
|
end_row = find_end_of(document.body, begin_row, '<row>', '</row>')
|
|
|
|
if end_row == -1:
|
|
|
|
document.warning("Can't find end of row " + str(row + 1))
|
|
|
|
break
|
|
|
|
begin_cell = begin_row
|
|
|
|
multirows.append([])
|
|
|
|
for column in range(numcols):
|
|
|
|
begin_cell = find_token(document.body, '<cell ', begin_cell, end_row)
|
|
|
|
if begin_cell == -1:
|
|
|
|
document.warning("Can't find column " + str(column + 1) + \
|
|
|
|
"in row " + str(row + 1))
|
|
|
|
break
|
|
|
|
# NOTE
|
|
|
|
# this will fail if someone puts "</cell>" in a cell, but
|
|
|
|
# that seems fairly unlikely.
|
|
|
|
end_cell = find_end_of(document.body, begin_cell, '<cell', '</cell>')
|
|
|
|
if end_cell == -1:
|
|
|
|
document.warning("Can't find end of column " + str(column + 1) + \
|
|
|
|
"in row " + str(row + 1))
|
|
|
|
break
|
|
|
|
multirows[row].append([begin_cell, end_cell, 0])
|
|
|
|
if document.body[begin_cell].find('multirow="3" mroffset=') != -1:
|
|
|
|
multirows[row][column][2] = 3 # begin multirow
|
|
|
|
mrstarts.append([row, column])
|
|
|
|
elif document.body[begin_cell].find('multirow="4"') != -1:
|
|
|
|
multirows[row][column][2] = 4 # in multirow
|
|
|
|
begin_cell = end_cell
|
|
|
|
begin_row = end_row
|
|
|
|
# end of table info collection
|
|
|
|
|
|
|
|
# work from the back to avoid messing up numbering
|
|
|
|
mrstarts.reverse()
|
|
|
|
for m in mrstarts:
|
|
|
|
row = m[0]
|
|
|
|
col = m[1]
|
|
|
|
# get column width
|
|
|
|
col_width = get_option_value(document.body[begin_table + 2 + col], "width")
|
2010-11-09 05:35:51 +00:00
|
|
|
# "0pt" means that no width is specified
|
2010-11-10 14:00:54 +00:00
|
|
|
if not col_width or col_width == "0pt":
|
2010-11-09 05:35:51 +00:00
|
|
|
col_width = "*"
|
|
|
|
# determine the number of cells that are part of the multirow
|
2010-11-10 14:00:54 +00:00
|
|
|
nummrs = 1
|
|
|
|
for r in range(row + 1, numrows):
|
|
|
|
if multirows[r][col][2] != 4:
|
|
|
|
break
|
|
|
|
nummrs += 1
|
|
|
|
# take the opportunity to revert this line
|
|
|
|
lineno = multirows[r][col][0]
|
|
|
|
document.body[lineno] = document.body[lineno].\
|
|
|
|
replace(' multirow="4" ', ' ').\
|
|
|
|
replace('valignment="middle"', 'valignment="top"').\
|
|
|
|
replace(' topline="true" ', ' ')
|
2010-11-09 05:35:51 +00:00
|
|
|
# remove bottom line of previous multirow-part cell
|
2010-11-10 14:00:54 +00:00
|
|
|
lineno = multirows[r-1][col][0]
|
|
|
|
document.body[lineno] = document.body[lineno].replace(' bottomline="true" ', ' ')
|
|
|
|
# revert beginning cell
|
|
|
|
bcell = multirows[row][col][0]
|
|
|
|
ecell = multirows[row][col][1]
|
|
|
|
offset = get_option_value(document.body[bcell], "mroffset")
|
|
|
|
document.body[bcell] = document.body[bcell].\
|
|
|
|
replace(' multirow="3" ', ' ').\
|
|
|
|
replace('valignment="middle"', 'valignment="top"')
|
|
|
|
# remove mroffset option
|
|
|
|
document.body[bcell] = rgx.sub('', document.body[bcell])
|
|
|
|
|
|
|
|
blay = find_token(document.body, "\\begin_layout", bcell, ecell)
|
2010-11-09 05:35:51 +00:00
|
|
|
if blay == -1:
|
|
|
|
document.warning("Can't find layout for cell!")
|
|
|
|
continue
|
|
|
|
bend = find_end_of_layout(document.body, blay)
|
|
|
|
if bend == -1:
|
|
|
|
document.warning("Can't find end of layout for cell!")
|
|
|
|
continue
|
|
|
|
# do the later one first, so as not to mess up the numbering
|
|
|
|
# we are wrapping the whole cell in this ert
|
|
|
|
# so before the end of the layout...
|
|
|
|
document.body[bend:bend] = put_cmd_in_ert("}")
|
|
|
|
# ...and after the beginning
|
2010-11-10 14:00:54 +00:00
|
|
|
document.body[blay + 1:blay + 1] = \
|
|
|
|
put_cmd_in_ert("\\multirow{" + str(nummrs) + "}{" + col_width + "}[" \
|
|
|
|
+ offset + "]{")
|
2010-11-09 05:35:51 +00:00
|
|
|
|
2010-11-10 14:00:54 +00:00
|
|
|
# on to the next table
|
|
|
|
begin_table = end_table
|
2010-11-07 05:32:28 +00:00
|
|
|
|
|
|
|
|
2010-11-21 17:00:12 +00:00
|
|
|
def revert_script(document):
|
|
|
|
" Convert subscript/superscript inset to TeX code "
|
|
|
|
i = 0
|
|
|
|
foundsubscript = False
|
|
|
|
while 1:
|
|
|
|
i = find_token(document.body, '\\begin_inset script', i)
|
|
|
|
if i == -1:
|
|
|
|
break
|
|
|
|
z = find_end_of_inset(document.body, i)
|
|
|
|
if z == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of script inset.")
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
blay = find_token(document.body, "\\begin_layout", i, z)
|
|
|
|
if blay == -1:
|
|
|
|
document.warning("Malformed LyX document: Can't find layout in script inset.")
|
|
|
|
i = z
|
|
|
|
continue
|
|
|
|
|
|
|
|
if check_token(document.body[i], "\\begin_inset script subscript"):
|
|
|
|
subst = '\\textsubscript{'
|
|
|
|
foundsubscript = True
|
|
|
|
elif check_token(document.body[i], "\\begin_inset script superscript"):
|
|
|
|
subst = '\\textsuperscript{'
|
|
|
|
else:
|
|
|
|
document.warning("Malformed LyX document: Unknown type of script inset.")
|
|
|
|
i = z
|
|
|
|
continue
|
|
|
|
bend = find_end_of_layout(document.body, blay)
|
|
|
|
if bend == -1 or bend > z:
|
|
|
|
document.warning("Malformed LyX document: Can't find end of layout in script inset.")
|
|
|
|
i = z
|
|
|
|
continue
|
|
|
|
# remove the \end_layout \end_inset pair
|
|
|
|
document.body[bend:z + 1] = put_cmd_in_ert("}")
|
|
|
|
document.body[i:blay + 1] = put_cmd_in_ert(subst)
|
|
|
|
i += 1
|
|
|
|
# these classes provide a \textsubscript command:
|
|
|
|
# FIXME: Would be nice if we could use the information of the .layout file here
|
|
|
|
classes = ["memoir", "scrartcl", "scrbook", "scrlttr2", "scrreprt"]
|
|
|
|
if foundsubscript and find_token_exact(classes, document.textclass, 0) == -1:
|
|
|
|
add_to_preamble(document, ['\\usepackage{subscript}'])
|
|
|
|
|
|
|
|
|
2010-11-26 09:42:03 +00:00
|
|
|
def convert_use_xetex(document):
|
|
|
|
" convert \\use_xetex to \\use_non_tex_fonts "
|
|
|
|
i = 0
|
|
|
|
i = find_token(document.header, "\\use_xetex", 0)
|
|
|
|
if i == -1:
|
2010-11-26 16:07:17 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
val = get_value(document.header, "\\use_xetex", 0)
|
|
|
|
document.header[i] = "\\use_non_tex_fonts " + val
|
2010-11-26 09:42:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
def revert_use_xetex(document):
|
|
|
|
" revert \\use_non_tex_fonts to \\use_xetex "
|
|
|
|
i = 0
|
|
|
|
i = find_token(document.header, "\\use_non_tex_fonts", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document. No \\use_non_tex_fonts param!")
|
2010-11-26 16:07:17 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
val = get_value(document.header, "\\use_non_tex_fonts", 0)
|
|
|
|
document.header[i] = "\\use_xetex " + val
|
2010-11-26 09:42:03 +00:00
|
|
|
|
|
|
|
|
2010-11-26 17:21:17 +00:00
|
|
|
def revert_labeling(document):
|
|
|
|
koma = ("scrartcl", "scrarticle-beamer", "scrbook", "scrlettr",
|
|
|
|
"scrlttr2", "scrreprt")
|
|
|
|
if document.textclass in koma:
|
|
|
|
return
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token_exact(document.body, "\\begin_layout Labeling", i)
|
2010-12-03 19:27:35 +00:00
|
|
|
if i == -1:
|
|
|
|
return
|
2010-11-26 17:21:17 +00:00
|
|
|
document.body[i] = "\\begin_layout List"
|
|
|
|
|
|
|
|
|
2011-02-03 14:17:31 +00:00
|
|
|
def revert_langpack(document):
|
|
|
|
" revert \\language_package parameter "
|
|
|
|
i = 0
|
|
|
|
i = find_token(document.header, "\\language_package", 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document. No \\language_package param!")
|
|
|
|
return
|
|
|
|
|
|
|
|
del document.header[i]
|
|
|
|
|
|
|
|
|
|
|
|
def convert_langpack(document):
|
|
|
|
" Add \\language_package parameter "
|
|
|
|
i = find_token(document.header, "\language" , 0)
|
|
|
|
if i == -1:
|
|
|
|
document.warning("Malformed document. No \\language defined!")
|
|
|
|
return
|
|
|
|
|
|
|
|
document.header.insert(i + 1, "\\language_package default")
|
|
|
|
|
2011-02-03 23:11:26 +00:00
|
|
|
|
|
|
|
def revert_tabularwidth(document):
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i = find_token(document.body, "\\begin_inset Tabular", i)
|
|
|
|
if i == -1:
|
|
|
|
return
|
|
|
|
j = find_end_of_inset(document.body, i)
|
|
|
|
if j == -1:
|
|
|
|
document.warning("Unable to find end of Tabular inset at line " + str(i))
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
i += 1
|
|
|
|
features = find_token(document.body, "<features", i, j)
|
|
|
|
if features == -1:
|
|
|
|
document.warning("Can't find any features in Tabular inset at line " + str(i))
|
|
|
|
i = j
|
|
|
|
continue
|
|
|
|
if document.body[features].find('alignment="tabularwidth"') != -1:
|
|
|
|
remove_option(document.body, features, 'tabularwidth')
|
|
|
|
|
2011-02-15 17:44:17 +00:00
|
|
|
def revert_html_css_as_file(document):
|
|
|
|
if not del_token(document.header, '\\html_css_as_file', 0):
|
|
|
|
document.warning("Malformed LyX document: Missing \\html_css_as_file.")
|
|
|
|
|
2011-02-03 23:11:26 +00:00
|
|
|
|
2008-11-15 22:55:53 +00:00
|
|
|
##
|
|
|
|
# Conversion hub
|
|
|
|
#
|
|
|
|
|
2009-01-03 18:33:09 +00:00
|
|
|
supported_versions = ["2.0.0","2.0"]
|
|
|
|
convert = [[346, []],
|
2009-01-30 00:56:37 +00:00
|
|
|
[347, []],
|
2009-04-06 06:58:30 +00:00
|
|
|
[348, []],
|
2009-04-10 11:06:53 +00:00
|
|
|
[349, []],
|
2009-04-11 21:40:11 +00:00
|
|
|
[350, []],
|
2009-04-16 07:29:01 +00:00
|
|
|
[351, []],
|
2009-04-26 11:24:38 +00:00
|
|
|
[352, [convert_splitindex]],
|
2009-04-27 06:10:25 +00:00
|
|
|
[353, []],
|
2009-05-03 22:45:14 +00:00
|
|
|
[354, []],
|
2009-05-05 09:26:28 +00:00
|
|
|
[355, []],
|
2009-05-06 16:58:15 +00:00
|
|
|
[356, []],
|
2009-05-22 07:20:00 +00:00
|
|
|
[357, []],
|
2009-05-22 08:31:43 +00:00
|
|
|
[358, []],
|
2009-05-22 16:26:15 +00:00
|
|
|
[359, [convert_nomencl_width]],
|
2009-05-25 08:27:12 +00:00
|
|
|
[360, []],
|
2009-06-12 08:47:38 +00:00
|
|
|
[361, []],
|
2009-07-11 13:09:45 +00:00
|
|
|
[362, []],
|
2009-07-13 14:30:08 +00:00
|
|
|
[363, []],
|
2009-07-19 21:13:27 +00:00
|
|
|
[364, []],
|
2009-07-19 22:08:58 +00:00
|
|
|
[365, []],
|
2009-07-20 11:22:51 +00:00
|
|
|
[366, []],
|
2009-07-21 11:51:43 +00:00
|
|
|
[367, []],
|
2009-07-23 20:08:05 +00:00
|
|
|
[368, []],
|
2009-08-14 00:52:33 +00:00
|
|
|
[369, [convert_author_id]],
|
|
|
|
[370, []],
|
2010-11-22 21:07:20 +00:00
|
|
|
[371, [convert_mhchem]],
|
2009-12-07 11:53:25 +00:00
|
|
|
[372, []],
|
2009-12-30 22:22:33 +00:00
|
|
|
[373, [merge_gbrief]],
|
2010-01-07 10:01:26 +00:00
|
|
|
[374, []],
|
2010-01-10 13:25:41 +00:00
|
|
|
[375, []],
|
2010-02-11 01:07:54 +00:00
|
|
|
[376, []],
|
2010-02-12 01:57:49 +00:00
|
|
|
[377, []],
|
2010-03-18 20:35:08 +00:00
|
|
|
[378, []],
|
2010-03-28 14:52:47 +00:00
|
|
|
[379, [convert_math_output]],
|
2010-03-28 16:29:25 +00:00
|
|
|
[380, []],
|
2010-03-31 00:46:50 +00:00
|
|
|
[381, []],
|
2010-04-01 00:40:19 +00:00
|
|
|
[382, []],
|
2010-04-02 23:39:36 +00:00
|
|
|
[383, []],
|
2010-04-08 00:14:08 +00:00
|
|
|
[384, []],
|
2010-04-17 13:04:41 +00:00
|
|
|
[385, []],
|
2010-04-21 15:18:25 +00:00
|
|
|
[386, []],
|
|
|
|
[387, []],
|
2010-05-18 01:22:08 +00:00
|
|
|
[388, []],
|
2010-05-25 11:36:00 +00:00
|
|
|
[389, [convert_html_quotes]],
|
2010-06-05 07:44:44 +00:00
|
|
|
[390, []],
|
2010-06-07 21:29:31 +00:00
|
|
|
[391, []],
|
2010-09-24 22:34:52 +00:00
|
|
|
[392, []],
|
2010-07-03 13:14:15 +00:00
|
|
|
[393, [convert_optarg]],
|
2010-07-17 18:10:37 +00:00
|
|
|
[394, [convert_use_makebox]],
|
2010-07-13 21:07:26 +00:00
|
|
|
[395, []],
|
2010-07-16 15:19:04 +00:00
|
|
|
[396, []],
|
2010-07-17 15:51:11 +00:00
|
|
|
[397, [remove_Nameref]],
|
2010-08-31 02:04:46 +00:00
|
|
|
[398, []],
|
2010-09-07 00:41:00 +00:00
|
|
|
[399, [convert_mathdots]],
|
2010-09-19 22:12:06 +00:00
|
|
|
[400, [convert_rule]],
|
2010-10-11 01:05:20 +00:00
|
|
|
[401, []],
|
2010-11-02 15:24:49 +00:00
|
|
|
[402, [convert_bibtex_clearpage]],
|
2010-10-13 12:36:53 +00:00
|
|
|
[403, [convert_flexnames]],
|
2010-11-06 00:39:06 +00:00
|
|
|
[404, [convert_prettyref]],
|
2010-11-06 15:06:19 +00:00
|
|
|
[405, []],
|
2010-11-07 05:32:28 +00:00
|
|
|
[406, [convert_passthru]],
|
2010-11-21 17:00:12 +00:00
|
|
|
[407, []],
|
2010-11-26 09:42:03 +00:00
|
|
|
[408, []],
|
2010-11-26 17:21:17 +00:00
|
|
|
[409, [convert_use_xetex]],
|
2011-02-03 14:17:31 +00:00
|
|
|
[410, []],
|
2011-02-03 23:11:26 +00:00
|
|
|
[411, [convert_langpack]],
|
2011-02-15 17:44:17 +00:00
|
|
|
[412, []],
|
|
|
|
[413, []],
|
2010-10-12 14:46:17 +00:00
|
|
|
]
|
2008-11-15 22:55:53 +00:00
|
|
|
|
2011-02-15 17:44:17 +00:00
|
|
|
revert = [[412, [revert_html_css_as_file]],
|
|
|
|
[411, [revert_tabularwidth]],
|
2011-02-03 23:11:26 +00:00
|
|
|
[410, [revert_langpack]],
|
2011-02-03 14:17:31 +00:00
|
|
|
[409, [revert_labeling]],
|
2010-11-26 17:21:17 +00:00
|
|
|
[408, [revert_use_xetex]],
|
2010-11-26 09:42:03 +00:00
|
|
|
[407, [revert_script]],
|
2010-11-21 17:00:12 +00:00
|
|
|
[406, [revert_multirowOffset]],
|
2010-11-07 05:32:28 +00:00
|
|
|
[405, [revert_passthru]],
|
2010-11-06 15:06:19 +00:00
|
|
|
[404, []],
|
2010-11-06 00:39:06 +00:00
|
|
|
[403, [revert_refstyle]],
|
2010-10-13 12:36:53 +00:00
|
|
|
[402, [revert_flexnames]],
|
2010-10-12 14:46:17 +00:00
|
|
|
[401, []],
|
2010-10-11 01:05:20 +00:00
|
|
|
[400, [revert_diagram]],
|
2010-09-19 22:12:06 +00:00
|
|
|
[399, [revert_rule]],
|
2010-09-07 00:41:00 +00:00
|
|
|
[398, [revert_mathdots]],
|
2010-08-31 02:04:46 +00:00
|
|
|
[397, [revert_mathrsfs]],
|
2010-07-17 15:51:11 +00:00
|
|
|
[396, []],
|
2010-07-16 15:19:04 +00:00
|
|
|
[395, [revert_nameref]],
|
2010-07-13 14:02:57 +00:00
|
|
|
[394, [revert_DIN_C_pagesizes]],
|
2010-07-13 01:06:20 +00:00
|
|
|
[393, [revert_makebox]],
|
2010-07-03 13:14:15 +00:00
|
|
|
[392, [revert_argument]],
|
2010-11-04 16:31:18 +00:00
|
|
|
[391, []],
|
2010-07-05 02:01:10 +00:00
|
|
|
[390, [revert_align_decimal, revert_IEEEtran]],
|
2010-06-05 07:44:44 +00:00
|
|
|
[389, [revert_output_sync]],
|
2010-05-25 11:36:00 +00:00
|
|
|
[388, [revert_html_quotes]],
|
2010-05-24 19:34:43 +00:00
|
|
|
[387, [revert_pagesizes]],
|
2010-05-18 01:22:08 +00:00
|
|
|
[386, [revert_math_scale]],
|
2010-04-21 15:18:25 +00:00
|
|
|
[385, [revert_lyx_version]],
|
2010-04-17 13:04:41 +00:00
|
|
|
[384, [revert_shadedboxcolor]],
|
2010-04-08 00:14:08 +00:00
|
|
|
[383, [revert_fontcolor]],
|
2010-04-02 23:39:36 +00:00
|
|
|
[382, [revert_turkmen]],
|
2010-04-01 00:40:19 +00:00
|
|
|
[381, [revert_notefontcolor]],
|
2010-03-31 00:46:50 +00:00
|
|
|
[380, [revert_equalspacing_xymatrix]],
|
2010-03-28 16:29:25 +00:00
|
|
|
[379, [revert_inset_preview]],
|
2010-03-28 14:52:47 +00:00
|
|
|
[378, [revert_math_output]],
|
2010-03-18 20:35:08 +00:00
|
|
|
[377, []],
|
2010-02-12 01:57:49 +00:00
|
|
|
[376, [revert_multirow]],
|
2010-02-11 01:07:54 +00:00
|
|
|
[375, [revert_includeall]],
|
2010-01-10 13:25:41 +00:00
|
|
|
[374, [revert_includeonly]],
|
2010-01-07 10:01:26 +00:00
|
|
|
[373, [revert_html_options]],
|
2009-12-30 22:22:33 +00:00
|
|
|
[372, [revert_gbrief]],
|
2009-12-07 11:53:25 +00:00
|
|
|
[371, [revert_fontenc]],
|
2009-11-29 14:43:17 +00:00
|
|
|
[370, [revert_mhchem]],
|
2009-11-11 01:48:07 +00:00
|
|
|
[369, [revert_suppress_date]],
|
2009-08-14 00:52:33 +00:00
|
|
|
[368, [revert_author_id]],
|
2009-07-23 20:08:05 +00:00
|
|
|
[367, [revert_hspace_glue_lengths]],
|
2009-07-21 11:51:43 +00:00
|
|
|
[366, [revert_percent_vspace_lengths, revert_percent_hspace_lengths]],
|
2009-07-20 11:22:51 +00:00
|
|
|
[365, [revert_percent_skip_lengths]],
|
2009-07-19 22:08:58 +00:00
|
|
|
[364, [revert_paragraph_indentation]],
|
2009-07-19 21:13:27 +00:00
|
|
|
[363, [revert_branch_filename]],
|
2009-07-13 14:30:08 +00:00
|
|
|
[362, [revert_longtable_align]],
|
2009-07-11 13:09:45 +00:00
|
|
|
[361, [revert_applemac]],
|
2009-06-12 08:47:38 +00:00
|
|
|
[360, []],
|
2009-05-25 08:27:12 +00:00
|
|
|
[359, [revert_nomencl_cwidth]],
|
2009-05-22 16:26:15 +00:00
|
|
|
[358, [revert_nomencl_width]],
|
2009-05-22 08:31:43 +00:00
|
|
|
[357, [revert_custom_processors]],
|
2009-05-22 07:20:00 +00:00
|
|
|
[356, [revert_ulinelatex]],
|
2010-07-05 03:19:54 +00:00
|
|
|
[355, []],
|
2009-05-05 09:26:28 +00:00
|
|
|
[354, [revert_strikeout]],
|
2009-05-03 22:45:14 +00:00
|
|
|
[353, [revert_printindexall]],
|
2009-04-27 06:10:25 +00:00
|
|
|
[352, [revert_subindex]],
|
2009-04-26 11:24:38 +00:00
|
|
|
[351, [revert_splitindex]],
|
2009-04-16 07:29:01 +00:00
|
|
|
[350, [revert_backgroundcolor]],
|
2009-04-11 21:40:11 +00:00
|
|
|
[349, [revert_outputformat]],
|
2009-04-10 11:06:53 +00:00
|
|
|
[348, [revert_xetex]],
|
2009-04-06 06:58:30 +00:00
|
|
|
[347, [revert_phantom, revert_hphantom, revert_vphantom]],
|
2009-01-30 00:56:37 +00:00
|
|
|
[346, [revert_tabularvalign]],
|
2009-01-03 18:33:09 +00:00
|
|
|
[345, [revert_swiss]]
|
2008-11-15 22:55:53 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
pass
|