mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-12-25 05:55:34 +00:00
Remove calls to deprecated module string.
The calls are now made to string methods, the advantage being that they work with unicode strings. git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/trunk@14537 a592a061-630c-0410-9148-cb99ea01b6c8
This commit is contained in:
parent
552a471c99
commit
26c0f379c7
@ -22,7 +22,6 @@ import os.path
|
|||||||
import gzip
|
import gzip
|
||||||
import sys
|
import sys
|
||||||
import re
|
import re
|
||||||
import string
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
import lyx2lyx_version
|
import lyx2lyx_version
|
||||||
@ -171,7 +170,7 @@ class LyX_Base:
|
|||||||
if check_token(line, '\\end_preamble'):
|
if check_token(line, '\\end_preamble'):
|
||||||
break
|
break
|
||||||
|
|
||||||
if string.split(line)[:0] in ("\\layout", "\\begin_layout", "\\begin_body"):
|
if line.split()[:0] in ("\\layout", "\\begin_layout", "\\begin_body"):
|
||||||
self.warning("Malformed LyX file: Missing '\\end_preamble'.")
|
self.warning("Malformed LyX file: Missing '\\end_preamble'.")
|
||||||
self.warning("Adding it now and hoping for the best.")
|
self.warning("Adding it now and hoping for the best.")
|
||||||
|
|
||||||
@ -180,11 +179,11 @@ class LyX_Base:
|
|||||||
if check_token(line, '\\end_preamble'):
|
if check_token(line, '\\end_preamble'):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
line = string.strip(line)
|
line = line.strip()
|
||||||
if not line:
|
if not line:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if string.split(line)[0] in ("\\layout", "\\begin_layout", "\\begin_body"):
|
if line.split()[0] in ("\\layout", "\\begin_layout", "\\begin_body"):
|
||||||
self.body.append(line)
|
self.body.append(line)
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -442,7 +441,7 @@ class LyX_Base:
|
|||||||
self.warning('Incomplete file.', 0)
|
self.warning('Incomplete file.', 0)
|
||||||
break
|
break
|
||||||
|
|
||||||
section = string.split(self.body[i])[1]
|
section = self.body[i].split()[1]
|
||||||
if section[-1] == '*':
|
if section[-1] == '*':
|
||||||
section = section[:-1]
|
section = section[:-1]
|
||||||
|
|
||||||
@ -450,12 +449,12 @@ class LyX_Base:
|
|||||||
|
|
||||||
k = i + 1
|
k = i + 1
|
||||||
# skip paragraph parameters
|
# skip paragraph parameters
|
||||||
while not string.strip(self.body[k]) or string.split(self.body[k])[0] in allowed_parameters:
|
while not self.body[k].strip() or self.body[k].split()[0] in allowed_parameters:
|
||||||
k = k +1
|
k = k +1
|
||||||
|
|
||||||
while k < j:
|
while k < j:
|
||||||
if check_token(self.body[k], '\\begin_inset'):
|
if check_token(self.body[k], '\\begin_inset'):
|
||||||
inset = string.split(self.body[k])[1]
|
inset = self.body[k].split()[1]
|
||||||
end = find_end_of_inset(self.body, k)
|
end = find_end_of_inset(self.body, k)
|
||||||
if end == -1 or end > j:
|
if end == -1 or end > j:
|
||||||
self.warning('Malformed file.', 0)
|
self.warning('Malformed file.', 0)
|
||||||
@ -468,7 +467,7 @@ class LyX_Base:
|
|||||||
k = k + 1
|
k = k + 1
|
||||||
|
|
||||||
# trim empty lines in the end.
|
# trim empty lines in the end.
|
||||||
while string.strip(par[-1]) == '' and par:
|
while par[-1].strip() == '' and par:
|
||||||
par.pop()
|
par.pop()
|
||||||
|
|
||||||
toc_par.append(Paragraph(section, par))
|
toc_par.append(Paragraph(section, par))
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
""" Convert files to the file format generated by lyx 0.12"""
|
""" Convert files to the file format generated by lyx 0.12"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import string
|
|
||||||
from parser_tools import find_token, find_re, check_token
|
from parser_tools import find_token, find_re, check_token
|
||||||
|
|
||||||
|
|
||||||
@ -32,7 +31,7 @@ def space_before_layout(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
|
|
||||||
prot_space = string.find(lines[i-2],'\\protected_separator')
|
prot_space = lines[i-2].find('\\protected_separator')
|
||||||
if lines[i - 1] == '' and prot_space == -1:
|
if lines[i - 1] == '' and prot_space == -1:
|
||||||
del lines[i-1]
|
del lines[i-1]
|
||||||
i = i + 1
|
i = i + 1
|
||||||
@ -66,8 +65,8 @@ def update_tabular(document):
|
|||||||
|
|
||||||
lines[i] = 'multicol4'
|
lines[i] = 'multicol4'
|
||||||
i = i + 1
|
i = i + 1
|
||||||
rows = int(string.split(lines[i])[0])
|
rows = int(lines[i].split()[0])
|
||||||
columns = int(string.split(lines[i])[1])
|
columns = int(lines[i].split()[1])
|
||||||
|
|
||||||
lines[i] = lines[i] + ' 0 0 -1 -1 -1 -1'
|
lines[i] = lines[i] + ' 0 0 -1 -1 -1 -1'
|
||||||
i = i + 1
|
i = i + 1
|
||||||
@ -80,13 +79,13 @@ def update_tabular(document):
|
|||||||
lines[i] = lines[i] + ' '
|
lines[i] = lines[i] + ' '
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
while string.strip(lines[i]):
|
while lines[i].strip():
|
||||||
if not format:
|
if not format:
|
||||||
lines[i] = lines[i] + ' 1 1'
|
lines[i] = lines[i] + ' 1 1'
|
||||||
lines[i] = lines[i] + ' 0 0 0'
|
lines[i] = lines[i] + ' 0 0 0'
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
lines[i] = string.strip(lines[i])
|
lines[i] = lines[i].strip()
|
||||||
|
|
||||||
|
|
||||||
def final_dot(document):
|
def final_dot(document):
|
||||||
@ -125,9 +124,8 @@ def update_latexdel(document):
|
|||||||
i = find_token(lines, '\\begin_inset LatexDel', i)
|
i = find_token(lines, '\\begin_inset LatexDel', i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
lines[i] = string.replace(lines[i],
|
lines[i] = lines[i].replace('\\begin_inset LatexDel',
|
||||||
'\\begin_inset LatexDel',
|
'\\begin_inset LatexCommand')
|
||||||
'\\begin_inset LatexCommand')
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
@ -135,12 +133,10 @@ def update_vfill(document):
|
|||||||
" Update fill_top and fill_bottom."
|
" Update fill_top and fill_bottom."
|
||||||
lines = document.body
|
lines = document.body
|
||||||
for i in range(len(lines)):
|
for i in range(len(lines)):
|
||||||
lines[i] = string.replace(lines[i],
|
lines[i] = lines[i].replace('\\fill_top',
|
||||||
'\\fill_top',
|
'\\added_space_top vfill')
|
||||||
'\\added_space_top vfill')
|
lines[i] = lines[i].replace('\\fill_bottom',
|
||||||
lines[i] = string.replace(lines[i],
|
'\\added_space_bottom vfill')
|
||||||
'\\fill_bottom',
|
|
||||||
'\\added_space_bottom vfill')
|
|
||||||
|
|
||||||
|
|
||||||
def update_space_units(document):
|
def update_space_units(document):
|
||||||
@ -153,13 +149,13 @@ def update_space_units(document):
|
|||||||
if result:
|
if result:
|
||||||
old = '\\added_space_bottom ' + result.group(1)
|
old = '\\added_space_bottom ' + result.group(1)
|
||||||
new = '\\added_space_bottom ' + str(float(result.group(1))) + 'cm'
|
new = '\\added_space_bottom ' + str(float(result.group(1))) + 'cm'
|
||||||
lines[i] = string.replace(lines[i], old, new)
|
lines[i] = lines[i].replace(old, new)
|
||||||
|
|
||||||
result = added_space_top.search(lines[i])
|
result = added_space_top.search(lines[i])
|
||||||
if result:
|
if result:
|
||||||
old = '\\added_space_top ' + result.group(1)
|
old = '\\added_space_top ' + result.group(1)
|
||||||
new = '\\added_space_top ' + str(float(result.group(1))) + 'cm'
|
new = '\\added_space_top ' + str(float(result.group(1))) + 'cm'
|
||||||
lines[i] = string.replace(lines[i], old, new)
|
lines[i] = lines[i].replace(old, new)
|
||||||
|
|
||||||
|
|
||||||
def remove_cursor(document):
|
def remove_cursor(document):
|
||||||
@ -172,7 +168,7 @@ def remove_cursor(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
cursor = cursor_re.search(lines[i]).group(1)
|
cursor = cursor_re.search(lines[i]).group(1)
|
||||||
lines[i] = string.replace(lines[i], cursor, '')
|
lines[i] = lines[i].replace(cursor, '')
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
@ -224,12 +220,12 @@ def header_update(document):
|
|||||||
lines[i] = lines[i][:-1]
|
lines[i] = lines[i][:-1]
|
||||||
|
|
||||||
if check_token(lines[i], '\\epsfig'):
|
if check_token(lines[i], '\\epsfig'):
|
||||||
lines[i] = string.replace(lines[i], '\\epsfig', '\\graphics')
|
lines[i] = lines[i].replace('\\epsfig', '\\graphics')
|
||||||
i = i + 1
|
i = i + 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if check_token(lines[i], '\\papersize'):
|
if check_token(lines[i], '\\papersize'):
|
||||||
size = string.split(lines[i])[1]
|
size = lines[i].split()[1]
|
||||||
new_size = size
|
new_size = size
|
||||||
paperpackage = ""
|
paperpackage = ""
|
||||||
|
|
||||||
@ -252,7 +248,7 @@ def header_update(document):
|
|||||||
|
|
||||||
|
|
||||||
if check_token(lines[i], '\\baselinestretch'):
|
if check_token(lines[i], '\\baselinestretch'):
|
||||||
size = string.split(lines[i])[1]
|
size = lines[i].split()[1]
|
||||||
if size == '1.00':
|
if size == '1.00':
|
||||||
name = 'single'
|
name = 'single'
|
||||||
elif size == '1.50':
|
elif size == '1.50':
|
||||||
@ -277,9 +273,9 @@ def update_latexaccents(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
contents = string.strip(body[i][2:])
|
contents = body[i][2:].strip()
|
||||||
|
|
||||||
if string.find(contents, '{') != -1 and string.find(contents, '}') != -1:
|
if contents.find('{') != -1 and contents.find('}') != -1:
|
||||||
i = i + 1
|
i = i + 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -306,7 +302,7 @@ def obsolete_latex_title(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
if string.find(string.lower(body[i]),'latex_title') != -1:
|
if body[i].lower().find('latex_title') != -1:
|
||||||
body[i] = '\\layout Title'
|
body[i] = '\\layout Title'
|
||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
""" Convert files to the file format generated by lyx 1.0"""
|
""" Convert files to the file format generated by lyx 1.0"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import string
|
|
||||||
from parser_tools import find_token, find_re
|
from parser_tools import find_token, find_re
|
||||||
|
|
||||||
def obsolete_latex_title(document):
|
def obsolete_latex_title(document):
|
||||||
@ -32,7 +31,7 @@ def obsolete_latex_title(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
if string.find(string.lower(body[i]),'latex title') != -1:
|
if body[i].lower().find('latex title') != -1:
|
||||||
body[i] = '\\layout Title'
|
body[i] = '\\layout Title'
|
||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
@ -56,8 +55,8 @@ def update_tabular(document):
|
|||||||
|
|
||||||
lines[i] = 'multicol4'
|
lines[i] = 'multicol4'
|
||||||
i = i + 1
|
i = i + 1
|
||||||
rows = int(string.split(lines[i])[0])
|
rows = int(lines[i].split()[0])
|
||||||
columns = int(string.split(lines[i])[1])
|
columns = int(lines[i].split()[1])
|
||||||
|
|
||||||
lines[i] = lines[i] + ' 0 0 -1 -1 -1 -1'
|
lines[i] = lines[i] + ' 0 0 -1 -1 -1 -1'
|
||||||
i = i + 1
|
i = i + 1
|
||||||
@ -70,11 +69,11 @@ def update_tabular(document):
|
|||||||
lines[i] = lines[i] + ' '
|
lines[i] = lines[i] + ' '
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
while string.strip(lines[i]):
|
while lines[i].strip():
|
||||||
lines[i] = lines[i] + ' 0 0 0'
|
lines[i] = lines[i] + ' 0 0 0'
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
lines[i] = string.strip(lines[i])
|
lines[i] = lines[i].strip()
|
||||||
|
|
||||||
|
|
||||||
supported_versions = ["1.0.0","1.0"]
|
supported_versions = ["1.0.0","1.0"]
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
""" Convert files to the file format generated by lyx 1.1.5"""
|
""" Convert files to the file format generated by lyx 1.1.5"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import string
|
|
||||||
from parser_tools import find_token, find_token_backwards, find_re
|
from parser_tools import find_token, find_token_backwards, find_re
|
||||||
|
|
||||||
####################################################################
|
####################################################################
|
||||||
@ -27,7 +26,7 @@ from parser_tools import find_token, find_token_backwards, find_re
|
|||||||
|
|
||||||
def get_layout(line, default_layout):
|
def get_layout(line, default_layout):
|
||||||
" Get the line layout, beware of the empty layout."
|
" Get the line layout, beware of the empty layout."
|
||||||
tokens = string.split(line)
|
tokens = line.split()
|
||||||
if len(tokens) > 1:
|
if len(tokens) > 1:
|
||||||
return tokens[1]
|
return tokens[1]
|
||||||
return default_layout
|
return default_layout
|
||||||
@ -91,16 +90,16 @@ def update_tabular(document):
|
|||||||
|
|
||||||
lines[i]='multicol5'
|
lines[i]='multicol5'
|
||||||
i = i + 1
|
i = i + 1
|
||||||
rows = int(string.split(lines[i])[0])
|
rows = int(lines[i].split()[0])
|
||||||
columns = int(string.split(lines[i])[1])
|
columns = int(lines[i].split()[1])
|
||||||
|
|
||||||
i = i + rows + 1
|
i = i + rows + 1
|
||||||
for j in range(columns):
|
for j in range(columns):
|
||||||
col_info = string.split(lines[i])
|
col_info = lines[i].split()
|
||||||
if len(col_info) == 3:
|
if len(col_info) == 3:
|
||||||
lines[i] = lines[i] + '"" ""'
|
lines[i] = lines[i] + '"" ""'
|
||||||
else:
|
else:
|
||||||
lines[i] = string.join(col_info[:3]) + ' "%s" ""' % col_info[3]
|
lines[i] = "".join(col_info[:3]) + ' "%s" ""' % col_info[3]
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
while lines[i]:
|
while lines[i]:
|
||||||
@ -188,7 +187,7 @@ def latexdel_getargs(document, i):
|
|||||||
document.warning("Unexpected end of inset.")
|
document.warning("Unexpected end of inset.")
|
||||||
j = find_token(lines, '\\begin_inset LatexDel }{', i)
|
j = find_token(lines, '\\begin_inset LatexDel }{', i)
|
||||||
|
|
||||||
ref = string.join(lines[i:j])
|
ref = "".join(lines[i:j])
|
||||||
del lines[i:j + 1]
|
del lines[i:j + 1]
|
||||||
|
|
||||||
# play safe, clean empty lines
|
# play safe, clean empty lines
|
||||||
@ -203,7 +202,7 @@ def latexdel_getargs(document, i):
|
|||||||
else:
|
else:
|
||||||
document.warning("Unexpected end of inset.")
|
document.warning("Unexpected end of inset.")
|
||||||
j = find_token(lines, '\\begin_inset LatexDel }', i)
|
j = find_token(lines, '\\begin_inset LatexDel }', i)
|
||||||
label = string.join(lines[i:j])
|
label = "".join(lines[i:j])
|
||||||
del lines[i:j + 1]
|
del lines[i:j + 1]
|
||||||
|
|
||||||
return ref, label
|
return ref, label
|
||||||
@ -218,7 +217,7 @@ def update_ref(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
if string.split(lines[i])[-1] == "\\ref{":
|
if lines[i].split()[-1] == "\\ref{":
|
||||||
i = i + 1
|
i = i + 1
|
||||||
ref, label = latexdel_getargs(document, i)
|
ref, label = latexdel_getargs(document, i)
|
||||||
lines[i - 1] = "%s[%s]{%s}" % (lines[i - 1][:-1], ref, label)
|
lines[i - 1] = "%s[%s]{%s}" % (lines[i - 1][:-1], ref, label)
|
||||||
@ -235,16 +234,15 @@ def update_latexdel(document):
|
|||||||
i = find_re(lines, latexdel_re, i)
|
i = find_re(lines, latexdel_re, i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
lines[i] = string.replace(lines[i],
|
lines[i] = lines[i].replace('\\begin_inset LatexDel',
|
||||||
'\\begin_inset LatexDel',
|
'\\begin_inset LatexCommand')
|
||||||
'\\begin_inset LatexCommand')
|
|
||||||
|
|
||||||
j = string.find(lines[i],'\\begin_inset')
|
j = lines[i].find('\\begin_inset')
|
||||||
lines.insert(i+1, lines[i][j:])
|
lines.insert(i+1, lines[i][j:])
|
||||||
lines[i] = string.strip(lines[i][:j])
|
lines[i] = lines[i][:j].strip()
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
if string.split(lines[i])[-1] in ("\\url{", "\\htmlurl{"):
|
if lines[i].split()[-1] in ("\\url{", "\\htmlurl{"):
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
ref, label = latexdel_getargs(document, i)
|
ref, label = latexdel_getargs(document, i)
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
""" Convert files to the file format generated by lyx 1.1.6"""
|
""" Convert files to the file format generated by lyx 1.1.6"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import string
|
|
||||||
from parser_tools import find_re, find_tokens, find_token, check_token
|
from parser_tools import find_re, find_tokens, find_token, check_token
|
||||||
|
|
||||||
lyxtable_re = re.compile(r".*\\LyXTable$")
|
lyxtable_re = re.compile(r".*\\LyXTable$")
|
||||||
@ -43,7 +42,7 @@ def update_tabular(document):
|
|||||||
i = i + 1
|
i = i + 1
|
||||||
lines[i] = "\\begin_inset Tabular"
|
lines[i] = "\\begin_inset Tabular"
|
||||||
i = i + 1
|
i = i + 1
|
||||||
head = string.split(lines[i])
|
head = lines[i].split()
|
||||||
rows = int(head[0])
|
rows = int(head[0])
|
||||||
columns = int(head[1])
|
columns = int(head[1])
|
||||||
|
|
||||||
@ -56,8 +55,8 @@ def update_tabular(document):
|
|||||||
row_info = []
|
row_info = []
|
||||||
cont_row = []
|
cont_row = []
|
||||||
for j in range(rows):
|
for j in range(rows):
|
||||||
row_info.append(string.split(lines[i]))
|
row_info.append(lines[i].split())
|
||||||
if string.split(lines[i])[2] == '1':
|
if lines[i].split()[2] == '1':
|
||||||
cont_row.append(j)
|
cont_row.append(j)
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
|
||||||
@ -116,7 +115,7 @@ def update_tabular(document):
|
|||||||
del lines[i]
|
del lines[i]
|
||||||
end = end -1
|
end = end -1
|
||||||
|
|
||||||
if string.find(lines[i],'\\newline') != -1:
|
if lines[i].find('\\newline') != -1:
|
||||||
del lines[i]
|
del lines[i]
|
||||||
l = l + 1
|
l = l + 1
|
||||||
|
|
||||||
@ -275,7 +274,7 @@ def update_language(document):
|
|||||||
header.append('\\language english')
|
header.append('\\language english')
|
||||||
return
|
return
|
||||||
# This is the lyx behaviour: defaults to english
|
# This is the lyx behaviour: defaults to english
|
||||||
if string.split(header[i])[1] == 'default':
|
if header[i].split()[1] == 'default':
|
||||||
header[i] = '\\language english'
|
header[i] = '\\language english'
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
""" Convert files to the file format generated by lyx 1.1.6fix3"""
|
""" Convert files to the file format generated by lyx 1.1.6fix3"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import string
|
|
||||||
from parser_tools import find_token, find_re
|
from parser_tools import find_token, find_re
|
||||||
|
|
||||||
def bool_table(item):
|
def bool_table(item):
|
||||||
@ -70,7 +69,7 @@ row_re = re.compile(r'<row topline="?(\d)"? bottomline="?(\d)"? newpage="?(\d)"?
|
|||||||
|
|
||||||
def table_update(lines):
|
def table_update(lines):
|
||||||
" Update table's internal content to format 2."
|
" Update table's internal content to format 2."
|
||||||
lines[1] = string.replace(lines[1], '<Features', '<features')
|
lines[1] = lines[1].replace('<Features', '<features')
|
||||||
res = features_re.match( lines[1] )
|
res = features_re.match( lines[1] )
|
||||||
if res:
|
if res:
|
||||||
val = res.groups()
|
val = res.groups()
|
||||||
@ -81,14 +80,14 @@ def table_update(lines):
|
|||||||
i = 2
|
i = 2
|
||||||
col_info = []
|
col_info = []
|
||||||
while i < len(lines):
|
while i < len(lines):
|
||||||
lines[i] = string.replace(lines[i], '<Cell', '<cell')
|
lines[i] = lines[i].replace('<Cell', '<cell')
|
||||||
lines[i] = string.replace(lines[i], '</Cell', '</cell')
|
lines[i] = lines[i].replace('</Cell', '</cell')
|
||||||
lines[i] = string.replace(lines[i], '<Row', '<row')
|
lines[i] = lines[i].replace('<Row', '<row')
|
||||||
lines[i] = string.replace(lines[i], '</Row', '</row')
|
lines[i] = lines[i].replace('</Row', '</row')
|
||||||
lines[i] = string.replace(lines[i], '<Column', '<column')
|
lines[i] = lines[i].replace('<Column', '<column')
|
||||||
lines[i] = string.replace(lines[i], '</Column', '</column')
|
lines[i] = lines[i].replace('</Column', '</column')
|
||||||
lines[i] = string.replace(lines[i], '</LyXTabular', '</lyxtabular')
|
lines[i] = lines[i].replace('</LyXTabular', '</lyxtabular')
|
||||||
k = string.find (lines[i], '<column ')
|
k = lines[i].find ('<column ')
|
||||||
if k != -1:
|
if k != -1:
|
||||||
col_info.append(lines[i])
|
col_info.append(lines[i])
|
||||||
del lines[i]
|
del lines[i]
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
""" Convert files to the file format generated by lyx 1.2"""
|
""" Convert files to the file format generated by lyx 1.2"""
|
||||||
|
|
||||||
import string
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from parser_tools import find_token, find_token_backwards, \
|
from parser_tools import find_token, find_token_backwards, \
|
||||||
@ -33,7 +32,7 @@ from parser_tools import find_token, find_token_backwards, \
|
|||||||
|
|
||||||
def get_layout(line, default_layout):
|
def get_layout(line, default_layout):
|
||||||
" Get layout, if empty return the default layout."
|
" Get layout, if empty return the default layout."
|
||||||
tokens = string.split(line)
|
tokens = line.split()
|
||||||
if len(tokens) > 1:
|
if len(tokens) > 1:
|
||||||
return tokens[1]
|
return tokens[1]
|
||||||
return default_layout
|
return default_layout
|
||||||
@ -154,7 +153,7 @@ def remove_oldfloat(document):
|
|||||||
# There are no nested floats, so finding the end of the float is simple
|
# There are no nested floats, so finding the end of the float is simple
|
||||||
j = find_token(lines, "\\end_float", i+1)
|
j = find_token(lines, "\\end_float", i+1)
|
||||||
|
|
||||||
floattype = string.split(lines[i])[1]
|
floattype = lines[i].split()[1]
|
||||||
if not floats.has_key(floattype):
|
if not floats.has_key(floattype):
|
||||||
document.warning("Error! Unknown float type " + floattype)
|
document.warning("Error! Unknown float type " + floattype)
|
||||||
floattype = "fig"
|
floattype = "fig"
|
||||||
@ -477,7 +476,7 @@ def get_length(lines, name, start, end):
|
|||||||
i = find_token(lines, name, start, end)
|
i = find_token(lines, name, start, end)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return ""
|
return ""
|
||||||
x = string.split(lines[i])
|
x = lines[i].split()
|
||||||
return x[2]+oldunits[int(x[1])]
|
return x[2]+oldunits[int(x[1])]
|
||||||
|
|
||||||
|
|
||||||
@ -497,9 +496,9 @@ def remove_figinset(document):
|
|||||||
break
|
break
|
||||||
j = find_end_of_inset(lines, i)
|
j = find_end_of_inset(lines, i)
|
||||||
|
|
||||||
if ( len(string.split(lines[i])) > 2 ):
|
if ( len(lines[i].split()) > 2 ):
|
||||||
lyxwidth = string.split(lines[i])[3]+"pt"
|
lyxwidth = lines[i].split()[3]+"pt"
|
||||||
lyxheight = string.split(lines[i])[4]+"pt"
|
lyxheight = lines[i].split()[4]+"pt"
|
||||||
else:
|
else:
|
||||||
lyxwidth = ""
|
lyxwidth = ""
|
||||||
lyxheight = ""
|
lyxheight = ""
|
||||||
@ -572,9 +571,9 @@ def update_tabular(document):
|
|||||||
|
|
||||||
for k in get_tabular_lines(lines, i):
|
for k in get_tabular_lines(lines, i):
|
||||||
if check_token(lines[k], "<lyxtabular"):
|
if check_token(lines[k], "<lyxtabular"):
|
||||||
lines[k] = string.replace(lines[k], 'version="2"', 'version="3"')
|
lines[k] = lines[k].replace('version="2"', 'version="3"')
|
||||||
elif check_token(lines[k], "<column"):
|
elif check_token(lines[k], "<column"):
|
||||||
lines[k] = string.replace(lines[k], 'width=""', 'width="0pt"')
|
lines[k] = lines[k].replace('width=""', 'width="0pt"')
|
||||||
|
|
||||||
if line_re.match(lines[k]):
|
if line_re.match(lines[k]):
|
||||||
lines[k] = re.sub(attr_re, "", lines[k])
|
lines[k] = re.sub(attr_re, "", lines[k])
|
||||||
@ -685,7 +684,7 @@ def setHeaderFooterRows(hr, fhr, fr, lfr, rows_, row_info):
|
|||||||
|
|
||||||
def insert_attribute(lines, i, attribute):
|
def insert_attribute(lines, i, attribute):
|
||||||
" Insert attribute in lines[i]."
|
" Insert attribute in lines[i]."
|
||||||
last = string.find(lines[i],'>')
|
last = lines[i].find('>')
|
||||||
lines[i] = lines[i][:last] + ' ' + attribute + lines[i][last:]
|
lines[i] = lines[i][:last] + ' ' + attribute + lines[i][last:]
|
||||||
|
|
||||||
|
|
||||||
@ -726,7 +725,7 @@ def update_longtables(document):
|
|||||||
# remove longtable elements from features
|
# remove longtable elements from features
|
||||||
features = lt_features_re.search(body[i])
|
features = lt_features_re.search(body[i])
|
||||||
if features:
|
if features:
|
||||||
body[i] = string.replace(body[i], features.group(1), "")
|
body[i] = body[i].replace(features.group(1), "")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
row_info = row() * rows
|
row_info = row() * rows
|
||||||
@ -798,7 +797,7 @@ def change_infoinset(document):
|
|||||||
i = find_token(lines, "\\begin_inset Info", i)
|
i = find_token(lines, "\\begin_inset Info", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
txt = string.lstrip(lines[i][18:])
|
txt = lines[i][18:].lstrip()
|
||||||
new = ["\\begin_inset Note", "collapsed true", ""]
|
new = ["\\begin_inset Note", "collapsed true", ""]
|
||||||
j = find_token(lines, "\\end_inset", i)
|
j = find_token(lines, "\\end_inset", i)
|
||||||
if j == -1:
|
if j == -1:
|
||||||
@ -810,7 +809,7 @@ def change_infoinset(document):
|
|||||||
|
|
||||||
for line in note_lines:
|
for line in note_lines:
|
||||||
new = new + ['\layout %s' % document.default_layout, ""]
|
new = new + ['\layout %s' % document.default_layout, ""]
|
||||||
tmp = string.split(line, '\\')
|
tmp = line.split('\\')
|
||||||
new = new + [tmp[0]]
|
new = new + [tmp[0]]
|
||||||
for x in tmp[1:]:
|
for x in tmp[1:]:
|
||||||
new = new + ["\\backslash ", x]
|
new = new + ["\\backslash ", x]
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
""" Convert files to the file format generated by lyx 1.3"""
|
""" Convert files to the file format generated by lyx 1.3"""
|
||||||
|
|
||||||
import string
|
|
||||||
import re
|
import re
|
||||||
from parser_tools import find_token, find_end_of, get_value,\
|
from parser_tools import find_token, find_end_of, get_value,\
|
||||||
find_token_exact, del_token
|
find_token_exact, del_token
|
||||||
@ -63,7 +62,7 @@ def change_insetgraphics(document):
|
|||||||
if k == -1:
|
if k == -1:
|
||||||
k = find_token_exact(lines, "size_kind", i, j)
|
k = find_token_exact(lines, "size_kind", i, j)
|
||||||
if k != -1:
|
if k != -1:
|
||||||
size_type = string.split(lines[k])[1]
|
size_type = lines[k].split()[1]
|
||||||
del lines[k]
|
del lines[k]
|
||||||
j = j-1
|
j = j-1
|
||||||
if size_type in ["0", "original"]:
|
if size_type in ["0", "original"]:
|
||||||
@ -82,7 +81,7 @@ def change_insetgraphics(document):
|
|||||||
if k == -1:
|
if k == -1:
|
||||||
k = find_token_exact(lines, "lyxsize_kind", i, j)
|
k = find_token_exact(lines, "lyxsize_kind", i, j)
|
||||||
if k != -1:
|
if k != -1:
|
||||||
lyxsize_type = string.split(lines[k])[1]
|
lyxsize_type = lines[k].split()[1]
|
||||||
del lines[k]
|
del lines[k]
|
||||||
j = j-1
|
j = j-1
|
||||||
j = del_token(lines, "lyxwidth", i, j)
|
j = del_token(lines, "lyxwidth", i, j)
|
||||||
|
@ -28,7 +28,6 @@ from parser_tools import check_token, find_token, \
|
|||||||
find_tokens, find_end_of, find_beginning_of, find_token_exact, find_tokens_exact, \
|
find_tokens, find_end_of, find_beginning_of, find_token_exact, find_tokens_exact, \
|
||||||
find_re, find_tokens_backwards
|
find_re, find_tokens_backwards
|
||||||
from sys import stdin
|
from sys import stdin
|
||||||
from string import replace, split, find, strip, join
|
|
||||||
|
|
||||||
from lyx_0_12 import update_latexaccents
|
from lyx_0_12 import update_latexaccents
|
||||||
|
|
||||||
@ -37,7 +36,7 @@ from lyx_0_12 import update_latexaccents
|
|||||||
|
|
||||||
def get_layout(line, default_layout):
|
def get_layout(line, default_layout):
|
||||||
" Get layout, if empty return the default layout."
|
" Get layout, if empty return the default layout."
|
||||||
tokens = split(line)
|
tokens = line.split()
|
||||||
if len(tokens) > 1:
|
if len(tokens) > 1:
|
||||||
return tokens[1]
|
return tokens[1]
|
||||||
return default_layout
|
return default_layout
|
||||||
@ -95,8 +94,8 @@ def remove_color_default(document):
|
|||||||
i = find_token(document.body, "\\color default", i)
|
i = find_token(document.body, "\\color default", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
document.body[i] = replace(document.body[i], "\\color default",
|
document.body[i] = document.body[i].replace("\\color default",
|
||||||
"\\color inherit")
|
"\\color inherit")
|
||||||
|
|
||||||
|
|
||||||
def add_end_header(document):
|
def add_end_header(document):
|
||||||
@ -118,7 +117,7 @@ def convert_amsmath(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
|
document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
|
||||||
return
|
return
|
||||||
tokens = split(document.header[i])
|
tokens = document.header[i].split()
|
||||||
if len(tokens) != 2:
|
if len(tokens) != 2:
|
||||||
document.warning("Malformed LyX document: Could not parse line '%s'." % document.header[i])
|
document.warning("Malformed LyX document: Could not parse line '%s'." % document.header[i])
|
||||||
use_amsmath = '0'
|
use_amsmath = '0'
|
||||||
@ -139,7 +138,7 @@ def revert_amsmath(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
|
document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
|
||||||
return
|
return
|
||||||
tokens = split(document.header[i])
|
tokens = document.header[i].split()
|
||||||
if len(tokens) != 2:
|
if len(tokens) != 2:
|
||||||
document.warning("Malformed LyX document: Could not parse line '%s'." % document.header[i])
|
document.warning("Malformed LyX document: Could not parse line '%s'." % document.header[i])
|
||||||
use_amsmath = '0'
|
use_amsmath = '0'
|
||||||
@ -157,7 +156,8 @@ def revert_amsmath(document):
|
|||||||
def convert_spaces(document):
|
def convert_spaces(document):
|
||||||
" \SpecialChar ~ -> \InsetSpace ~"
|
" \SpecialChar ~ -> \InsetSpace ~"
|
||||||
for i in range(len(document.body)):
|
for i in range(len(document.body)):
|
||||||
document.body[i] = replace(document.body[i],"\\SpecialChar ~","\\InsetSpace ~")
|
document.body[i] = document.body[i].replace("\\SpecialChar ~",
|
||||||
|
"\\InsetSpace ~")
|
||||||
|
|
||||||
|
|
||||||
def revert_spaces(document):
|
def revert_spaces(document):
|
||||||
@ -185,16 +185,20 @@ def rename_spaces(document):
|
|||||||
""" \InsetSpace \, -> \InsetSpace \thinspace{}
|
""" \InsetSpace \, -> \InsetSpace \thinspace{}
|
||||||
\InsetSpace \space -> \InsetSpace \space{}"""
|
\InsetSpace \space -> \InsetSpace \space{}"""
|
||||||
for i in range(len(document.body)):
|
for i in range(len(document.body)):
|
||||||
document.body[i] = replace(document.body[i],"\\InsetSpace \\space","\\InsetSpace \\space{}")
|
document.body[i] = document.body[i].replace("\\InsetSpace \\space",
|
||||||
document.body[i] = replace(document.body[i],"\\InsetSpace \,","\\InsetSpace \\thinspace{}")
|
"\\InsetSpace \\space{}")
|
||||||
|
document.body[i] = document.body[i].replace("\\InsetSpace \,",
|
||||||
|
"\\InsetSpace \\thinspace{}")
|
||||||
|
|
||||||
|
|
||||||
def revert_space_names(document):
|
def revert_space_names(document):
|
||||||
""" \InsetSpace \thinspace{} -> \InsetSpace \,
|
""" \InsetSpace \thinspace{} -> \InsetSpace \,
|
||||||
\InsetSpace \space{} -> \InsetSpace \space"""
|
\InsetSpace \space{} -> \InsetSpace \space"""
|
||||||
for i in range(len(document.body)):
|
for i in range(len(document.body)):
|
||||||
document.body[i] = replace(document.body[i],"\\InsetSpace \\space{}","\\InsetSpace \\space")
|
document.body[i] = document.body[i].replace("\\InsetSpace \\space{}",
|
||||||
document.body[i] = replace(document.body[i],"\\InsetSpace \\thinspace{}","\\InsetSpace \\,")
|
"\\InsetSpace \\space")
|
||||||
|
document.body[i] = document.body[i].replace("\\InsetSpace \\thinspace{}",
|
||||||
|
"\\InsetSpace \\,")
|
||||||
|
|
||||||
|
|
||||||
def lyx_support_escape(lab):
|
def lyx_support_escape(lab):
|
||||||
@ -231,15 +235,15 @@ def revert_eqref(document):
|
|||||||
def convert_bibtex(document):
|
def convert_bibtex(document):
|
||||||
" Convert BibTeX changes."
|
" Convert BibTeX changes."
|
||||||
for i in range(len(document.body)):
|
for i in range(len(document.body)):
|
||||||
document.body[i] = replace(document.body[i],"\\begin_inset LatexCommand \\BibTeX",
|
document.body[i] = document.body[i].replace("\\begin_inset LatexCommand \\BibTeX",
|
||||||
"\\begin_inset LatexCommand \\bibtex")
|
"\\begin_inset LatexCommand \\bibtex")
|
||||||
|
|
||||||
|
|
||||||
def revert_bibtex(document):
|
def revert_bibtex(document):
|
||||||
" Revert BibTeX changes."
|
" Revert BibTeX changes."
|
||||||
for i in range(len(document.body)):
|
for i in range(len(document.body)):
|
||||||
document.body[i] = replace(document.body[i], "\\begin_inset LatexCommand \\bibtex",
|
document.body[i] = document.body[i].replace("\\begin_inset LatexCommand \\bibtex",
|
||||||
"\\begin_inset LatexCommand \\BibTeX")
|
"\\begin_inset LatexCommand \\BibTeX")
|
||||||
|
|
||||||
|
|
||||||
def remove_insetparent(document):
|
def remove_insetparent(document):
|
||||||
@ -297,19 +301,19 @@ def revert_external_1(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
|
|
||||||
template = split(document.body[i+1])
|
template = document.body[i+1].split()
|
||||||
template.reverse()
|
template.reverse()
|
||||||
del document.body[i+1]
|
del document.body[i+1]
|
||||||
|
|
||||||
filename = split(document.body[i+1])
|
filename = document.body[i+1].split()
|
||||||
filename.reverse()
|
filename.reverse()
|
||||||
del document.body[i+1]
|
del document.body[i+1]
|
||||||
|
|
||||||
params = split(document.body[i+1])
|
params = document.body[i+1].split()
|
||||||
params.reverse()
|
params.reverse()
|
||||||
if document.body[i+1]: del document.body[i+1]
|
if document.body[i+1]: del document.body[i+1]
|
||||||
|
|
||||||
document.body[i] = document.body[i] + " " + template[0]+ ', "' + filename[0] + '", " '+ join(params[1:]) + '"'
|
document.body[i] = document.body[i] + " " + template[0]+ ', "' + filename[0] + '", " '+ "".join(params[1:]) + '"'
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
@ -388,7 +392,7 @@ def convert_comment(document):
|
|||||||
i = i + 1
|
i = i + 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if find(document.body[i], comment) == -1:
|
if document.body[i].find(comment) == -1:
|
||||||
document.body[i:i] = ["\\end_inset"]
|
document.body[i:i] = ["\\end_inset"]
|
||||||
i = i + 1
|
i = i + 1
|
||||||
break
|
break
|
||||||
@ -423,7 +427,7 @@ def add_end_layout(document):
|
|||||||
"\\begin_deeper", "\\end_deeper", "\\the_end"], i)
|
"\\begin_deeper", "\\end_deeper", "\\the_end"], i)
|
||||||
|
|
||||||
if i != -1:
|
if i != -1:
|
||||||
token = split(document.body[i])[0]
|
token = document.body[i].split()[0]
|
||||||
else:
|
else:
|
||||||
document.warning("Truncated document.")
|
document.warning("Truncated document.")
|
||||||
i = len(document.body)
|
i = len(document.body)
|
||||||
@ -529,7 +533,7 @@ def layout2begin_layout(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
document.body[i] = replace(document.body[i], '\\layout', '\\begin_layout')
|
document.body[i] = document.body[i].replace('\\layout', '\\begin_layout')
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
@ -541,7 +545,7 @@ def begin_layout2layout(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
document.body[i] = replace(document.body[i], '\\begin_layout', '\\layout')
|
document.body[i] = document.body[i].replace('\\begin_layout', '\\layout')
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
|
|
||||||
@ -549,7 +553,7 @@ def convert_valignment_middle(body, start, end):
|
|||||||
'valignment="center" -> valignment="middle"'
|
'valignment="center" -> valignment="middle"'
|
||||||
for i in range(start, end):
|
for i in range(start, end):
|
||||||
if re.search('^<(column|cell) .*valignment="center".*>$', body[i]):
|
if re.search('^<(column|cell) .*valignment="center".*>$', body[i]):
|
||||||
body[i] = replace(body[i], 'valignment="center"', 'valignment="middle"')
|
body[i] = body[i].replace('valignment="center"', 'valignment="middle"')
|
||||||
|
|
||||||
|
|
||||||
def convert_table_valignment_middle(document):
|
def convert_table_valignment_middle(document):
|
||||||
@ -573,7 +577,7 @@ def revert_table_valignment_middle(body, start, end):
|
|||||||
" valignment, middle -> center"
|
" valignment, middle -> center"
|
||||||
for i in range(start, end):
|
for i in range(start, end):
|
||||||
if re.search('^<(column|cell) .*valignment="middle".*>$', body[i]):
|
if re.search('^<(column|cell) .*valignment="middle".*>$', body[i]):
|
||||||
body[i] = replace(body[i], 'valignment="middle"', 'valignment="center"')
|
body[i] = body[i].replace('valignment="middle"', 'valignment="center"')
|
||||||
|
|
||||||
|
|
||||||
def revert_valignment_middle(document):
|
def revert_valignment_middle(document):
|
||||||
@ -665,16 +669,16 @@ parskip}
|
|||||||
# Merge all paragraph parameters into a single line
|
# Merge all paragraph parameters into a single line
|
||||||
# We cannot check for '\\' only because paragraphs may start e.g.
|
# We cannot check for '\\' only because paragraphs may start e.g.
|
||||||
# with '\\backslash'
|
# with '\\backslash'
|
||||||
while document.body[i + 1][:1] == '\\' and split(document.body[i + 1][1:])[0] in par_params:
|
while document.body[i + 1][:1] == '\\' and document.body[i + 1][1:].split()[0] in par_params:
|
||||||
document.body[i] = document.body[i + 1] + ' ' + document.body[i]
|
document.body[i] = document.body[i + 1] + ' ' + document.body[i]
|
||||||
del document.body[i+1]
|
del document.body[i+1]
|
||||||
|
|
||||||
line_top = find(document.body[i],"\\line_top")
|
line_top = document.body[i].find("\\line_top")
|
||||||
line_bot = find(document.body[i],"\\line_bottom")
|
line_bot = document.body[i].find("\\line_bottom")
|
||||||
pb_top = find(document.body[i],"\\pagebreak_top")
|
pb_top = document.body[i].find("\\pagebreak_top")
|
||||||
pb_bot = find(document.body[i],"\\pagebreak_bottom")
|
pb_bot = document.body[i].find("\\pagebreak_bottom")
|
||||||
vspace_top = find(document.body[i],"\\added_space_top")
|
vspace_top = document.body[i].find("\\added_space_top")
|
||||||
vspace_bot = find(document.body[i],"\\added_space_bottom")
|
vspace_bot = document.body[i].find("\\added_space_bottom")
|
||||||
|
|
||||||
if line_top == -1 and line_bot == -1 and pb_bot == -1 and pb_top == -1 and vspace_top == -1 and vspace_bot == -1:
|
if line_top == -1 and line_bot == -1 and pb_bot == -1 and pb_top == -1 and vspace_top == -1 and vspace_bot == -1:
|
||||||
continue
|
continue
|
||||||
@ -687,9 +691,9 @@ parskip}
|
|||||||
# inherit font sizes.
|
# inherit font sizes.
|
||||||
nonstandard = 0
|
nonstandard = 0
|
||||||
if (not document.is_default_layout(layout) or
|
if (not document.is_default_layout(layout) or
|
||||||
find(document.body[i],"\\align") != -1 or
|
document.body[i].find("\\align") != -1 or
|
||||||
find(document.body[i],"\\labelwidthstring") != -1 or
|
document.body[i].find("\\labelwidthstring") != -1 or
|
||||||
find(document.body[i],"\\noindent") != -1):
|
document.body[i].find("\\noindent") != -1):
|
||||||
nonstandard = 1
|
nonstandard = 1
|
||||||
|
|
||||||
# get the font size of the beginning of this paragraph, since we need
|
# get the font size of the beginning of this paragraph, since we need
|
||||||
@ -698,29 +702,29 @@ parskip}
|
|||||||
while not is_nonempty_line(document.body[j]):
|
while not is_nonempty_line(document.body[j]):
|
||||||
j = j + 1
|
j = j + 1
|
||||||
size_top = ""
|
size_top = ""
|
||||||
if find(document.body[j], "\\size") != -1:
|
if document.body[j].find("\\size") != -1:
|
||||||
size_top = split(document.body[j])[1]
|
size_top = document.body[j].split()[1]
|
||||||
|
|
||||||
for tag in "\\line_top", "\\line_bottom", "\\pagebreak_top", "\\pagebreak_bottom":
|
for tag in "\\line_top", "\\line_bottom", "\\pagebreak_top", "\\pagebreak_bottom":
|
||||||
document.body[i] = replace(document.body[i], tag, "")
|
document.body[i] = document.body[i].replace(tag, "")
|
||||||
|
|
||||||
if vspace_top != -1:
|
if vspace_top != -1:
|
||||||
# the position could be change because of the removal of other
|
# the position could be change because of the removal of other
|
||||||
# paragraph properties above
|
# paragraph properties above
|
||||||
vspace_top = find(document.body[i],"\\added_space_top")
|
vspace_top = document.body[i].find("\\added_space_top")
|
||||||
tmp_list = split(document.body[i][vspace_top:])
|
tmp_list = document.body[i][vspace_top:].split()
|
||||||
vspace_top_value = tmp_list[1]
|
vspace_top_value = tmp_list[1]
|
||||||
document.body[i] = document.body[i][:vspace_top] + join(tmp_list[2:])
|
document.body[i] = document.body[i][:vspace_top] + "".join(tmp_list[2:])
|
||||||
|
|
||||||
if vspace_bot != -1:
|
if vspace_bot != -1:
|
||||||
# the position could be change because of the removal of other
|
# the position could be change because of the removal of other
|
||||||
# paragraph properties above
|
# paragraph properties above
|
||||||
vspace_bot = find(document.body[i],"\\added_space_bottom")
|
vspace_bot = document.body[i].find("\\added_space_bottom")
|
||||||
tmp_list = split(document.body[i][vspace_bot:])
|
tmp_list = document.body[i][vspace_bot:].split()
|
||||||
vspace_bot_value = tmp_list[1]
|
vspace_bot_value = tmp_list[1]
|
||||||
document.body[i] = document.body[i][:vspace_bot] + join(tmp_list[2:])
|
document.body[i] = document.body[i][:vspace_bot] + "".join(tmp_list[2:])
|
||||||
|
|
||||||
document.body[i] = strip(document.body[i])
|
document.body[i] = document.body[i].strip()
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
# Create an empty paragraph or paragraph fragment for line and
|
# Create an empty paragraph or paragraph fragment for line and
|
||||||
@ -770,10 +774,10 @@ parskip}
|
|||||||
size_bot = size_top
|
size_bot = size_top
|
||||||
j = i + 1
|
j = i + 1
|
||||||
while j < k:
|
while j < k:
|
||||||
if find(document.body[j], "\\size") != -1:
|
if document.body[j].find("\\size") != -1:
|
||||||
size_bot = split(document.body[j])[1]
|
size_bot = document.body[j].split()[1]
|
||||||
j = j + 1
|
j = j + 1
|
||||||
elif find(document.body[j], "\\begin_inset") != -1:
|
elif document.body[j].find("\\begin_inset") != -1:
|
||||||
# skip insets
|
# skip insets
|
||||||
j = find_end_of_inset(document.body, j)
|
j = find_end_of_inset(document.body, j)
|
||||||
else:
|
else:
|
||||||
@ -1139,10 +1143,10 @@ def get_par_params(lines, i):
|
|||||||
# We cannot check for '\\' only because paragraphs may start e.g.
|
# We cannot check for '\\' only because paragraphs may start e.g.
|
||||||
# with '\\backslash'
|
# with '\\backslash'
|
||||||
params = ''
|
params = ''
|
||||||
while lines[i][:1] == '\\' and split(lines[i][1:])[0] in par_params:
|
while lines[i][:1] == '\\' and lines[i][1:].split()[0] in par_params:
|
||||||
params = params + ' ' + strip(lines[i])
|
params = params + ' ' + lines[i].strip()
|
||||||
i = i + 1
|
i = i + 1
|
||||||
return strip(params)
|
return params.strip()
|
||||||
|
|
||||||
|
|
||||||
def lyxsize2latexsize(lyxsize):
|
def lyxsize2latexsize(lyxsize):
|
||||||
@ -1165,7 +1169,7 @@ def revert_breaks(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
defskipamount = 'medskip'
|
defskipamount = 'medskip'
|
||||||
else:
|
else:
|
||||||
defskipamount = split(document.header[i])[1]
|
defskipamount = document.header[i].split()[1]
|
||||||
|
|
||||||
keys = {"\\begin_inset" : "vspace", "\\lyxline" : "lyxline",
|
keys = {"\\begin_inset" : "vspace", "\\lyxline" : "lyxline",
|
||||||
"\\newpage" : "newpage"}
|
"\\newpage" : "newpage"}
|
||||||
@ -1190,12 +1194,12 @@ def revert_breaks(document):
|
|||||||
size = "normal"
|
size = "normal"
|
||||||
# Paragraph parameters may be on one or more lines.
|
# Paragraph parameters may be on one or more lines.
|
||||||
# Find the start of the real paragraph text.
|
# Find the start of the real paragraph text.
|
||||||
while document.body[start][:1] == '\\' and split(document.body[start])[0] in params:
|
while document.body[start][:1] == '\\' and document.body[start].split()[0] in params:
|
||||||
start = start + 1
|
start = start + 1
|
||||||
for k in range(start, i):
|
for k in range(start, i):
|
||||||
if find(document.body[k], "\\size") != -1:
|
if document.body[k].find("\\size") != -1:
|
||||||
# store font size
|
# store font size
|
||||||
size = split(document.body[k])[1]
|
size = document.body[k].split()[1]
|
||||||
elif is_nonempty_line(document.body[k]):
|
elif is_nonempty_line(document.body[k]):
|
||||||
paragraph_start = 0
|
paragraph_start = 0
|
||||||
break
|
break
|
||||||
@ -1229,16 +1233,16 @@ def revert_breaks(document):
|
|||||||
while k < next_par:
|
while k < next_par:
|
||||||
if find_tokens(document.body, tokens, k) == k:
|
if find_tokens(document.body, tokens, k) == k:
|
||||||
# inset to convert
|
# inset to convert
|
||||||
lines.append(split(document.body[k]))
|
lines.append(document.body[k].split())
|
||||||
insets.append(keys[lines[n][0]])
|
insets.append(keys[lines[n][0]])
|
||||||
del_lines.append([k, k])
|
del_lines.append([k, k])
|
||||||
top.append(0)
|
top.append(0)
|
||||||
sizes.append(size)
|
sizes.append(size)
|
||||||
n = n + 1
|
n = n + 1
|
||||||
inset_end = k
|
inset_end = k
|
||||||
elif find(document.body[k], "\\size") != -1:
|
elif document.body[k].find("\\size") != -1:
|
||||||
# store font size
|
# store font size
|
||||||
size = split(document.body[k])[1]
|
size = document.body[k].split()[1]
|
||||||
elif find_token(document.body, "\\begin_inset ERT", k) == k:
|
elif find_token(document.body, "\\begin_inset ERT", k) == k:
|
||||||
ert_begin = find_token(document.body, "\\layout", k) + 1
|
ert_begin = find_token(document.body, "\\layout", k) + 1
|
||||||
if ert_begin == 0:
|
if ert_begin == 0:
|
||||||
@ -1320,13 +1324,13 @@ def revert_breaks(document):
|
|||||||
# determine font size
|
# determine font size
|
||||||
prev_size = "normal"
|
prev_size = "normal"
|
||||||
k = prev_par + 1
|
k = prev_par + 1
|
||||||
while document.body[k][:1] == '\\' and split(document.body[k])[0] in prev_params:
|
while document.body[k][:1] == '\\' and document.body[k].split()[0] in prev_params:
|
||||||
k = k + 1
|
k = k + 1
|
||||||
while k < this_par:
|
while k < this_par:
|
||||||
if find(document.body[k], "\\size") != -1:
|
if document.body[k].find("\\size") != -1:
|
||||||
prev_size = split(document.body[k])[1]
|
prev_size = document.body[k].split()[1]
|
||||||
break
|
break
|
||||||
elif find(document.body[k], "\\begin_inset") != -1:
|
elif document.body[k].find("\\begin_inset") != -1:
|
||||||
# skip insets
|
# skip insets
|
||||||
k = find_end_of_inset(document.body, k)
|
k = find_end_of_inset(document.body, k)
|
||||||
elif is_nonempty_line(document.body[k]):
|
elif is_nonempty_line(document.body[k]):
|
||||||
@ -1348,14 +1352,14 @@ def revert_breaks(document):
|
|||||||
if next_par > 0 and not after:
|
if next_par > 0 and not after:
|
||||||
next_params = get_par_params(document.body, next_par + 1)
|
next_params = get_par_params(document.body, next_par + 1)
|
||||||
ert = 0
|
ert = 0
|
||||||
while document.body[k][:1] == '\\' and split(document.body[k])[0] in next_params:
|
while document.body[k][:1] == '\\' and document.body[k].split()[0] in next_params:
|
||||||
k = k + 1
|
k = k + 1
|
||||||
# determine font size
|
# determine font size
|
||||||
next_size = "normal"
|
next_size = "normal"
|
||||||
k = next_par + 1
|
k = next_par + 1
|
||||||
while k < this_par:
|
while k < this_par:
|
||||||
if find(document.body[k], "\\size") != -1:
|
if document.body[k].find("\\size") != -1:
|
||||||
next_size = split(document.body[k])[1]
|
next_size = document.body[k].split()[1]
|
||||||
break
|
break
|
||||||
elif is_nonempty_line(document.body[k]):
|
elif is_nonempty_line(document.body[k]):
|
||||||
break
|
break
|
||||||
@ -1474,7 +1478,7 @@ def convert_len(len, special):
|
|||||||
|
|
||||||
# Convert LyX units to LaTeX units
|
# Convert LyX units to LaTeX units
|
||||||
for unit in units.keys():
|
for unit in units.keys():
|
||||||
if find(len, unit) != -1:
|
if len.find(unit) != -1:
|
||||||
len = '%f' % (len2value(len) / 100) + units[unit]
|
len = '%f' % (len2value(len) / 100) + units[unit]
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -1550,7 +1554,7 @@ def convert_frameless_box(document):
|
|||||||
'special':'none', 'height':'1in',
|
'special':'none', 'height':'1in',
|
||||||
'height_special':'totalheight', 'collapsed':'false'}
|
'height_special':'totalheight', 'collapsed':'false'}
|
||||||
for key in params.keys():
|
for key in params.keys():
|
||||||
value = replace(get_value(document.body, key, i, j), '"', '')
|
value = get_value(document.body, key, i, j).replace('"', '')
|
||||||
if value != "":
|
if value != "":
|
||||||
if key == 'position':
|
if key == 'position':
|
||||||
# convert new to old position: 'position "t"' -> 0
|
# convert new to old position: 'position "t"' -> 0
|
||||||
@ -1715,7 +1719,7 @@ def remove_branches(document):
|
|||||||
i = find_token(document.header, "\\branch", i)
|
i = find_token(document.header, "\\branch", i)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
document.warning("Removing branch %s." % split(document.header[i])[1])
|
document.warning("Removing branch %s." % document.header[i].split()[1])
|
||||||
j = find_token(document.header, "\\end_branch", i)
|
j = find_token(document.header, "\\end_branch", i)
|
||||||
if j == -1:
|
if j == -1:
|
||||||
document.warning("Malformed LyX document: Missing '\\end_branch'.")
|
document.warning("Malformed LyX document: Missing '\\end_branch'.")
|
||||||
@ -1852,7 +1856,7 @@ def convert_graphics(document):
|
|||||||
if j == -1:
|
if j == -1:
|
||||||
return
|
return
|
||||||
i = i + 1
|
i = i + 1
|
||||||
filename = split(document.body[j])[1]
|
filename = document.body[j].split()[1]
|
||||||
absname = os.path.normpath(os.path.join(document.dir, filename))
|
absname = os.path.normpath(os.path.join(document.dir, filename))
|
||||||
if document.input == stdin and not os.path.isabs(filename):
|
if document.input == stdin and not os.path.isabs(filename):
|
||||||
# We don't know the directory and cannot check the document.
|
# We don't know the directory and cannot check the document.
|
||||||
@ -1869,10 +1873,10 @@ def convert_graphics(document):
|
|||||||
if access(absname, F_OK):
|
if access(absname, F_OK):
|
||||||
continue
|
continue
|
||||||
if access(absname + ".ps", F_OK):
|
if access(absname + ".ps", F_OK):
|
||||||
document.body[j] = replace(document.body[j], filename, filename + ".ps")
|
document.body[j] = document.body[j].replace(filename, filename + ".ps")
|
||||||
continue
|
continue
|
||||||
if access(absname + ".eps", F_OK):
|
if access(absname + ".eps", F_OK):
|
||||||
document.body[j] = replace(document.body[j], filename, filename + ".eps")
|
document.body[j] = document.body[j].replace(filename, filename + ".eps")
|
||||||
|
|
||||||
|
|
||||||
def convert_names(document):
|
def convert_names(document):
|
||||||
@ -1977,9 +1981,9 @@ def convert_cite_engine(document):
|
|||||||
document.warning("Malformed lyx document: Missing '\\use_jurabib'.")
|
document.warning("Malformed lyx document: Missing '\\use_jurabib'.")
|
||||||
return
|
return
|
||||||
|
|
||||||
use_natbib = int(split(document.header[a])[1])
|
use_natbib = int(document.header[a].split()[1])
|
||||||
use_numerical_citations = int(split(document.header[b])[1])
|
use_numerical_citations = int(document.header[b].split()[1])
|
||||||
use_jurabib = int(split(document.header[c])[1])
|
use_jurabib = int(document.header[c].split()[1])
|
||||||
|
|
||||||
cite_engine = "basic"
|
cite_engine = "basic"
|
||||||
if use_natbib:
|
if use_natbib:
|
||||||
@ -2001,7 +2005,7 @@ def revert_cite_engine(document):
|
|||||||
document.warning("Malformed lyx document: Missing '\\cite_engine'.")
|
document.warning("Malformed lyx document: Missing '\\cite_engine'.")
|
||||||
return
|
return
|
||||||
|
|
||||||
cite_engine = split(document.header[i])[1]
|
cite_engine = document.header[i].split()[1]
|
||||||
|
|
||||||
use_natbib = '0'
|
use_natbib = '0'
|
||||||
use_numerical = '0'
|
use_numerical = '0'
|
||||||
@ -2027,9 +2031,9 @@ def convert_paperpackage(document):
|
|||||||
return
|
return
|
||||||
|
|
||||||
packages = {'default':'none','a4':'none', 'a4wide':'a4', 'widemarginsa4':'a4wide'}
|
packages = {'default':'none','a4':'none', 'a4wide':'a4', 'widemarginsa4':'a4wide'}
|
||||||
if len(split(document.header[i])) > 1:
|
if len(document.header[i].split()) > 1:
|
||||||
paperpackage = split(document.header[i])[1]
|
paperpackage = document.header[i].split()[1]
|
||||||
document.header[i] = replace(document.header[i], paperpackage, packages[paperpackage])
|
document.header[i] = document.header[i].replace(paperpackage, packages[paperpackage])
|
||||||
else:
|
else:
|
||||||
document.header[i] = document.header[i] + ' widemarginsa4'
|
document.header[i] = document.header[i] + ' widemarginsa4'
|
||||||
|
|
||||||
@ -2042,11 +2046,11 @@ def revert_paperpackage(document):
|
|||||||
|
|
||||||
packages = {'none':'a4', 'a4':'a4wide', 'a4wide':'widemarginsa4',
|
packages = {'none':'a4', 'a4':'a4wide', 'a4wide':'widemarginsa4',
|
||||||
'widemarginsa4':'', 'default': 'default'}
|
'widemarginsa4':'', 'default': 'default'}
|
||||||
if len(split(document.header[i])) > 1:
|
if len(document.header[i].split()) > 1:
|
||||||
paperpackage = split(document.header[i])[1]
|
paperpackage = document.header[i].split()[1]
|
||||||
else:
|
else:
|
||||||
paperpackage = 'default'
|
paperpackage = 'default'
|
||||||
document.header[i] = replace(document.header[i], paperpackage, packages[paperpackage])
|
document.header[i] = document.header[i].replace(paperpackage, packages[paperpackage])
|
||||||
|
|
||||||
|
|
||||||
def convert_bullets(document):
|
def convert_bullets(document):
|
||||||
@ -2057,11 +2061,11 @@ def convert_bullets(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
if document.header[i][:12] == '\\bulletLaTeX':
|
if document.header[i][:12] == '\\bulletLaTeX':
|
||||||
document.header[i] = document.header[i] + ' ' + strip(document.header[i+1])
|
document.header[i] = document.header[i] + ' ' + document.header[i+1].strip()
|
||||||
n = 3
|
n = 3
|
||||||
else:
|
else:
|
||||||
document.header[i] = document.header[i] + ' ' + strip(document.header[i+1]) +\
|
document.header[i] = document.header[i] + ' ' + document.header[i+1].strip() +\
|
||||||
' ' + strip(document.header[i+2]) + ' ' + strip(document.header[i+3])
|
' ' + document.header[i+2].strip() + ' ' + document.header[i+3].strip()
|
||||||
n = 5
|
n = 5
|
||||||
del document.header[i+1:i + n]
|
del document.header[i+1:i + n]
|
||||||
i = i + 1
|
i = i + 1
|
||||||
@ -2075,7 +2079,7 @@ def revert_bullets(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
if document.header[i][:12] == '\\bulletLaTeX':
|
if document.header[i][:12] == '\\bulletLaTeX':
|
||||||
n = find(document.header[i], '"')
|
n = document.header[i].find('"')
|
||||||
if n == -1:
|
if n == -1:
|
||||||
document.warning("Malformed header.")
|
document.warning("Malformed header.")
|
||||||
return
|
return
|
||||||
@ -2083,7 +2087,7 @@ def revert_bullets(document):
|
|||||||
document.header[i:i+1] = [document.header[i][:n-1],'\t' + document.header[i][n:], '\\end_bullet']
|
document.header[i:i+1] = [document.header[i][:n-1],'\t' + document.header[i][n:], '\\end_bullet']
|
||||||
i = i + 3
|
i = i + 3
|
||||||
else:
|
else:
|
||||||
frag = split(document.header[i])
|
frag = document.header[i].split()
|
||||||
if len(frag) != 5:
|
if len(frag) != 5:
|
||||||
document.warning("Malformed header.")
|
document.warning("Malformed header.")
|
||||||
return
|
return
|
||||||
@ -2138,7 +2142,7 @@ def normalize_papersize(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
tmp = split(document.header[i])
|
tmp = document.header[i].split()
|
||||||
if tmp[1] == "Default":
|
if tmp[1] == "Default":
|
||||||
document.header[i] = '\\papersize default'
|
document.header[i] = '\\papersize default'
|
||||||
return
|
return
|
||||||
@ -2152,7 +2156,7 @@ def denormalize_papersize(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
tmp = split(document.header[i])
|
tmp = document.header[i].split()
|
||||||
if tmp[1] == "custom":
|
if tmp[1] == "custom":
|
||||||
document.header[i] = '\\papersize Custom'
|
document.header[i] = '\\papersize Custom'
|
||||||
|
|
||||||
@ -2161,7 +2165,7 @@ def strip_end_space(document):
|
|||||||
" Strip spaces at end of command line. "
|
" Strip spaces at end of command line. "
|
||||||
for i in range(len(document.body)):
|
for i in range(len(document.body)):
|
||||||
if document.body[i][:1] == '\\':
|
if document.body[i][:1] == '\\':
|
||||||
document.body[i] = strip(document.body[i])
|
document.body[i] = document.body[i].strip()
|
||||||
|
|
||||||
|
|
||||||
def use_x_boolean(document):
|
def use_x_boolean(document):
|
||||||
@ -2171,7 +2175,7 @@ def use_x_boolean(document):
|
|||||||
i = find_token(document.header, use, 0)
|
i = find_token(document.header, use, 0)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
continue
|
continue
|
||||||
decompose = split(document.header[i])
|
decompose = document.header[i].split()
|
||||||
document.header[i] = decompose[0] + ' ' + bin2bool[decompose[1]]
|
document.header[i] = decompose[0] + ' ' + bin2bool[decompose[1]]
|
||||||
|
|
||||||
|
|
||||||
@ -2182,7 +2186,7 @@ def use_x_binary(document):
|
|||||||
i = find_token(document.header, use, 0)
|
i = find_token(document.header, use, 0)
|
||||||
if i == -1:
|
if i == -1:
|
||||||
continue
|
continue
|
||||||
decompose = split(document.header[i])
|
decompose = document.header[i].split()
|
||||||
document.header[i] = decompose[0] + ' ' + bool2bin[decompose[1]]
|
document.header[i] = decompose[0] + ' ' + bool2bin[decompose[1]]
|
||||||
|
|
||||||
|
|
||||||
@ -2202,13 +2206,13 @@ def normalize_paragraph_params(document):
|
|||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
while 1:
|
while 1:
|
||||||
if strip(body[i]) and split(body[i])[0] not in allowed_parameters:
|
if body[i].strip() and body[i].split()[0] not in allowed_parameters:
|
||||||
break
|
break
|
||||||
|
|
||||||
j = find(body[i],'\\', 1)
|
j = body[i].find('\\', 1)
|
||||||
|
|
||||||
if j != -1:
|
if j != -1:
|
||||||
body[i:i+1] = [strip(body[i][:j]), body[i][j:]]
|
body[i:i+1] = [body[i][:j].strip(), body[i][j:]]
|
||||||
|
|
||||||
i = i + 1
|
i = i + 1
|
||||||
|
|
||||||
@ -2264,8 +2268,8 @@ def convert_ert_paragraphs(document):
|
|||||||
# remove all paragraph parameters and font settings
|
# remove all paragraph parameters and font settings
|
||||||
k = i
|
k = i
|
||||||
while k < j:
|
while k < j:
|
||||||
if (strip(document.body[k]) and
|
if (document.body[k].strip() and
|
||||||
split(document.body[k])[0] in forbidden_settings):
|
document.body[k].split()[0] in forbidden_settings):
|
||||||
del document.body[k]
|
del document.body[k]
|
||||||
j = j - 1
|
j = j - 1
|
||||||
else:
|
else:
|
||||||
@ -2337,7 +2341,7 @@ def revert_ert_paragraphs(document):
|
|||||||
l = k + 1
|
l = k + 1
|
||||||
while document.body[l] == "":
|
while document.body[l] == "":
|
||||||
l = l + 1
|
l = l + 1
|
||||||
if strip(document.body[l]) and split(document.body[l])[0] == "\\newline":
|
if document.body[l].strip() and document.body[l].split()[0] == "\\newline":
|
||||||
document.body[k:l+1] = ["\\end_layout", "",
|
document.body[k:l+1] = ["\\end_layout", "",
|
||||||
'\\begin_layout %s' % document.default_layout]
|
'\\begin_layout %s' % document.default_layout]
|
||||||
j = j - l + k + 2
|
j = j - l + k + 2
|
||||||
@ -2377,7 +2381,7 @@ def remove_paperpackage(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
return
|
return
|
||||||
|
|
||||||
paperpackage = split(document.header[i])[1]
|
paperpackage = document.header[i].split()[1]
|
||||||
|
|
||||||
del document.header[i]
|
del document.header[i]
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
from parser_tools import find_token, find_token_exact, find_tokens, find_end_of, get_value
|
from parser_tools import find_token, find_token_exact, find_tokens, find_end_of, get_value
|
||||||
from string import replace
|
|
||||||
|
|
||||||
|
|
||||||
####################################################################
|
####################################################################
|
||||||
@ -208,7 +207,7 @@ def revert_booktabs(document):
|
|||||||
for k in range(i, j):
|
for k in range(i, j):
|
||||||
if re.search('^<features.* booktabs="true".*>$', document.body[k]):
|
if re.search('^<features.* booktabs="true".*>$', document.body[k]):
|
||||||
document.warning("Converting 'booktabs' table to normal table.")
|
document.warning("Converting 'booktabs' table to normal table.")
|
||||||
document.body[k] = replace(document.body[k], ' booktabs="true"', '')
|
document.body[k] = document.body[k].replace(' booktabs="true"', '')
|
||||||
if re.search(re_row, document.body[k]):
|
if re.search(re_row, document.body[k]):
|
||||||
document.warning("Removing extra row space.")
|
document.warning("Removing extra row space.")
|
||||||
document.body[k] = re_tspace.sub('', document.body[k])
|
document.body[k] = re_tspace.sub('', document.body[k])
|
||||||
|
Loading…
Reference in New Issue
Block a user