mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-11-22 10:00:33 +00:00
Fix code
Run "ruff check --fix" Remove unneeded imports; Reword code to make clear the "not in" operator; Put imports in its own line (readability);
This commit is contained in:
parent
b4db3ea137
commit
a0a5892ae8
@ -23,7 +23,6 @@ from parser_tools import (
|
|||||||
get_value,
|
get_value,
|
||||||
check_token,
|
check_token,
|
||||||
find_token,
|
find_token,
|
||||||
find_tokens,
|
|
||||||
find_end_of,
|
find_end_of,
|
||||||
find_complete_lines,
|
find_complete_lines,
|
||||||
)
|
)
|
||||||
|
@ -90,7 +90,6 @@ revert_language(document, lyxname, babelname="", polyglossianame=""):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import sys
|
|
||||||
from parser_tools import (
|
from parser_tools import (
|
||||||
find_token,
|
find_token,
|
||||||
find_end_of_inset,
|
find_end_of_inset,
|
||||||
@ -108,7 +107,7 @@ from unicode_symbols import unicode_reps
|
|||||||
def add_to_preamble(document, text):
|
def add_to_preamble(document, text):
|
||||||
"Add text to the preamble if it is not already there."
|
"Add text to the preamble if it is not already there."
|
||||||
|
|
||||||
if not type(text) is list:
|
if type(text) is not list:
|
||||||
# split on \n just in case
|
# split on \n just in case
|
||||||
# it'll give us the one element list we want
|
# it'll give us the one element list we want
|
||||||
# if there's no \n, too
|
# if there's no \n, too
|
||||||
@ -139,7 +138,7 @@ def add_to_preamble(document, text):
|
|||||||
def insert_to_preamble(document, text, index=0):
|
def insert_to_preamble(document, text, index=0):
|
||||||
"""Insert text to the preamble at a given line"""
|
"""Insert text to the preamble at a given line"""
|
||||||
|
|
||||||
if not type(text) is list:
|
if type(text) is not list:
|
||||||
# split on \n just in case
|
# split on \n just in case
|
||||||
# it'll give us the one element list we want
|
# it'll give us the one element list we want
|
||||||
# if there's no \n, too
|
# if there's no \n, too
|
||||||
@ -460,7 +459,7 @@ def length_in_bp(length):
|
|||||||
return 0
|
return 0
|
||||||
value = m.group(1)
|
value = m.group(1)
|
||||||
unit = m.group(2)
|
unit = m.group(2)
|
||||||
if not unit in scales.keys():
|
if unit not in scales.keys():
|
||||||
document.warning("Unknown length unit: " + unit + ".")
|
document.warning("Unknown length unit: " + unit + ".")
|
||||||
return value
|
return value
|
||||||
return "%g" % (float(value) * scales[unit])
|
return "%g" % (float(value) * scales[unit])
|
||||||
@ -736,7 +735,7 @@ def revert_language(document, lyxname, babelname="", polyglossianame=""):
|
|||||||
# \end_layout
|
# \end_layout
|
||||||
|
|
||||||
# Ensure correct handling of list labels
|
# Ensure correct handling of list labels
|
||||||
if parent[0] in ["Labeling", "Description"] and not " " in "\n".join(
|
if parent[0] in ["Labeling", "Description"] and " " not in "\n".join(
|
||||||
document.body[parent[3] : i]
|
document.body[parent[3] : i]
|
||||||
):
|
):
|
||||||
# line `i+1` is first line of a list item,
|
# line `i+1` is first line of a list item,
|
||||||
|
@ -35,7 +35,6 @@ from parser_tools import (
|
|||||||
find_re,
|
find_re,
|
||||||
find_tokens_backwards,
|
find_tokens_backwards,
|
||||||
)
|
)
|
||||||
from sys import stdin
|
|
||||||
|
|
||||||
from lyx_0_12 import update_latexaccents
|
from lyx_0_12 import update_latexaccents
|
||||||
|
|
||||||
|
@ -20,7 +20,8 @@
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
import unicodedata
|
import unicodedata
|
||||||
import sys, os
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
from parser_tools import (
|
from parser_tools import (
|
||||||
find_re,
|
find_re,
|
||||||
@ -128,7 +129,7 @@ def convert_font_settings(document):
|
|||||||
if font_scheme == "":
|
if font_scheme == "":
|
||||||
document.warning("Malformed LyX document: Empty `\\fontscheme'.")
|
document.warning("Malformed LyX document: Empty `\\fontscheme'.")
|
||||||
font_scheme = "default"
|
font_scheme = "default"
|
||||||
if not font_scheme in list(roman_fonts.keys()):
|
if font_scheme not in list(roman_fonts.keys()):
|
||||||
document.warning("Malformed LyX document: Unknown `\\fontscheme' `%s'." % font_scheme)
|
document.warning("Malformed LyX document: Unknown `\\fontscheme' `%s'." % font_scheme)
|
||||||
font_scheme = "default"
|
font_scheme = "default"
|
||||||
document.header[i : i + 1] = [
|
document.header[i : i + 1] = [
|
||||||
@ -1902,7 +1903,7 @@ def revert_listings_inset(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
if not "\\usepackage{listings}" in document.preamble:
|
if "\\usepackage{listings}" not in document.preamble:
|
||||||
document.preamble.append("\\usepackage{listings}")
|
document.preamble.append("\\usepackage{listings}")
|
||||||
j = find_end_of_inset(document.body, i + 1)
|
j = find_end_of_inset(document.body, i + 1)
|
||||||
if j == -1:
|
if j == -1:
|
||||||
@ -2040,7 +2041,7 @@ def revert_include_listings(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
if not "\\usepackage{listings}" in document.preamble:
|
if "\\usepackage{listings}" not in document.preamble:
|
||||||
document.preamble.append("\\usepackage{listings}")
|
document.preamble.append("\\usepackage{listings}")
|
||||||
j = find_end_of_inset(document.body, i + 1)
|
j = find_end_of_inset(document.body, i + 1)
|
||||||
if j == -1:
|
if j == -1:
|
||||||
|
@ -18,8 +18,6 @@
|
|||||||
"""Convert files to the file format generated by lyx 1.6"""
|
"""Convert files to the file format generated by lyx 1.6"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import unicodedata
|
|
||||||
import sys, os
|
|
||||||
|
|
||||||
from parser_tools import find_token, find_end_of, find_tokens, get_value
|
from parser_tools import find_token, find_end_of, find_tokens, get_value
|
||||||
from unicode_symbols import unicode_reps
|
from unicode_symbols import unicode_reps
|
||||||
|
@ -17,15 +17,12 @@
|
|||||||
|
|
||||||
"""Convert files to the file format generated by lyx 2.0"""
|
"""Convert files to the file format generated by lyx 2.0"""
|
||||||
|
|
||||||
import re, string
|
import re
|
||||||
import unicodedata
|
|
||||||
import sys, os
|
|
||||||
|
|
||||||
from parser_tools import (
|
from parser_tools import (
|
||||||
del_complete_lines,
|
del_complete_lines,
|
||||||
find_token,
|
find_token,
|
||||||
find_end_of,
|
find_end_of,
|
||||||
find_tokens,
|
|
||||||
find_token_exact,
|
find_token_exact,
|
||||||
find_end_of_inset,
|
find_end_of_inset,
|
||||||
find_end_of_layout,
|
find_end_of_layout,
|
||||||
|
@ -17,9 +17,7 @@
|
|||||||
|
|
||||||
"""Convert files to the file format generated by LyX 2.1"""
|
"""Convert files to the file format generated by LyX 2.1"""
|
||||||
|
|
||||||
import re, string
|
import re
|
||||||
import unicodedata
|
|
||||||
import sys, os
|
|
||||||
|
|
||||||
# Uncomment only what you need to import, please.
|
# Uncomment only what you need to import, please.
|
||||||
|
|
||||||
|
@ -17,9 +17,8 @@
|
|||||||
|
|
||||||
"""Convert files to the file format generated by lyx 2.2"""
|
"""Convert files to the file format generated by lyx 2.2"""
|
||||||
|
|
||||||
import re, string
|
import re
|
||||||
import unicodedata
|
import os
|
||||||
import sys, os
|
|
||||||
|
|
||||||
# Uncomment only what you need to import, please.
|
# Uncomment only what you need to import, please.
|
||||||
|
|
||||||
@ -31,7 +30,6 @@ from lyx2lyx_tools import (
|
|||||||
lyx2verbatim,
|
lyx2verbatim,
|
||||||
length_in_bp,
|
length_in_bp,
|
||||||
convert_info_insets,
|
convert_info_insets,
|
||||||
insert_document_option,
|
|
||||||
revert_language,
|
revert_language,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -50,7 +48,6 @@ from parser_tools import (
|
|||||||
get_quoted_value,
|
get_quoted_value,
|
||||||
get_value,
|
get_value,
|
||||||
is_in_inset,
|
is_in_inset,
|
||||||
get_bool_value,
|
|
||||||
set_bool_value,
|
set_bool_value,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -633,7 +630,7 @@ def revert_question_env(document):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Do we use theorems-ams-extended-bytype module?
|
# Do we use theorems-ams-extended-bytype module?
|
||||||
if not "theorems-ams-extended-bytype" in document.get_module_list():
|
if "theorems-ams-extended-bytype" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
consecutive = False
|
consecutive = False
|
||||||
@ -958,7 +955,7 @@ def convert_specialchar_internal(document, forward):
|
|||||||
i = j
|
i = j
|
||||||
continue
|
continue
|
||||||
# else...
|
# else...
|
||||||
if not "\\SpecialChar" in document.body[i]:
|
if "\\SpecialChar" not in document.body[i]:
|
||||||
i += 1
|
i += 1
|
||||||
continue
|
continue
|
||||||
for key, value in specialchars.items():
|
for key, value in specialchars.items():
|
||||||
@ -1021,7 +1018,7 @@ def revert_sigplan_doi(document):
|
|||||||
def revert_ex_itemargs(document):
|
def revert_ex_itemargs(document):
|
||||||
"Reverts \\item arguments of the example environments (Linguistics module) to TeX-code"
|
"Reverts \\item arguments of the example environments (Linguistics module) to TeX-code"
|
||||||
|
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
@ -1052,7 +1049,7 @@ def revert_ex_itemargs(document):
|
|||||||
def revert_forest(document):
|
def revert_forest(document):
|
||||||
"Reverts the forest environment (Linguistics module) to TeX-code"
|
"Reverts the forest environment (Linguistics module) to TeX-code"
|
||||||
|
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
@ -1079,7 +1076,7 @@ def revert_forest(document):
|
|||||||
def revert_glossgroup(document):
|
def revert_glossgroup(document):
|
||||||
"Reverts the GroupGlossedWords inset (Linguistics module) to TeX-code"
|
"Reverts the GroupGlossedWords inset (Linguistics module) to TeX-code"
|
||||||
|
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
@ -1106,7 +1103,7 @@ def revert_glossgroup(document):
|
|||||||
def revert_newgloss(document):
|
def revert_newgloss(document):
|
||||||
"Reverts the new Glosse insets (Linguistics module) to the old format"
|
"Reverts the new Glosse insets (Linguistics module) to the old format"
|
||||||
|
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")
|
glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")
|
||||||
@ -1192,7 +1189,7 @@ def revert_newgloss(document):
|
|||||||
def convert_newgloss(document):
|
def convert_newgloss(document):
|
||||||
"Converts Glosse insets (Linguistics module) to the new format"
|
"Converts Glosse insets (Linguistics module) to the new format"
|
||||||
|
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")
|
glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")
|
||||||
|
@ -26,14 +26,12 @@ from parser_tools import (
|
|||||||
del_value,
|
del_value,
|
||||||
del_complete_lines,
|
del_complete_lines,
|
||||||
find_complete_lines,
|
find_complete_lines,
|
||||||
find_end_of,
|
|
||||||
find_end_of_layout,
|
find_end_of_layout,
|
||||||
find_end_of_inset,
|
find_end_of_inset,
|
||||||
find_re,
|
find_re,
|
||||||
find_substring,
|
find_substring,
|
||||||
find_token,
|
find_token,
|
||||||
find_token_backwards,
|
find_token_backwards,
|
||||||
find_across_lines,
|
|
||||||
get_containing_inset,
|
get_containing_inset,
|
||||||
get_containing_layout,
|
get_containing_layout,
|
||||||
get_bool_value,
|
get_bool_value,
|
||||||
@ -154,7 +152,7 @@ def revert_ibranches(document):
|
|||||||
continue
|
continue
|
||||||
if inverted:
|
if inverted:
|
||||||
branch = document.body[i][20:].strip()
|
branch = document.body[i][20:].strip()
|
||||||
if not branch in antibranches:
|
if branch not in antibranches:
|
||||||
antibranch = "Anti-" + branch
|
antibranch = "Anti-" + branch
|
||||||
while antibranch in antibranches:
|
while antibranch in antibranches:
|
||||||
antibranch = "x" + antibranch
|
antibranch = "x" + antibranch
|
||||||
@ -1425,7 +1423,7 @@ def convert_literalparam(document):
|
|||||||
if i == -1:
|
if i == -1:
|
||||||
break
|
break
|
||||||
inset = document.body[i][pos:].strip()
|
inset = document.body[i][pos:].strip()
|
||||||
if not inset in command_insets:
|
if inset not in command_insets:
|
||||||
i += 1
|
i += 1
|
||||||
continue
|
continue
|
||||||
j = find_end_of_inset(document.body, i)
|
j = find_end_of_inset(document.body, i)
|
||||||
|
@ -144,10 +144,10 @@ class fontmapping:
|
|||||||
def getfontname(self, pkg, options):
|
def getfontname(self, pkg, options):
|
||||||
options.sort()
|
options.sort()
|
||||||
pkgkey = createkey(pkg, options)
|
pkgkey = createkey(pkg, options)
|
||||||
if not pkgkey in self.pkg2fontmap:
|
if pkgkey not in self.pkg2fontmap:
|
||||||
return None
|
return None
|
||||||
fontname = self.pkg2fontmap[pkgkey]
|
fontname = self.pkg2fontmap[pkgkey]
|
||||||
if not fontname in self.font2pkgmap:
|
if fontname not in self.font2pkgmap:
|
||||||
document.error("Something is wrong in pkgname+options <-> fontname mapping")
|
document.error("Something is wrong in pkgname+options <-> fontname mapping")
|
||||||
return None
|
return None
|
||||||
if pkgkey == self.font2pkgmap[fontname].pkgkey:
|
if pkgkey == self.font2pkgmap[fontname].pkgkey:
|
||||||
@ -359,7 +359,7 @@ def convert_fonts(document, fm, osfoption="osf"):
|
|||||||
del options[o]
|
del options[o]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not pkg in fm.pkginmap:
|
if pkg not in fm.pkginmap:
|
||||||
continue
|
continue
|
||||||
# determine fontname
|
# determine fontname
|
||||||
fn = None
|
fn = None
|
||||||
@ -456,11 +456,11 @@ def revert_fonts(document, fm, fontmap, OnlyWithXOpts=False, WithXOpts=False):
|
|||||||
val = get_value(document.header, ft, i)
|
val = get_value(document.header, ft, i)
|
||||||
words = val.split(" ") # ! splits also values like '"DejaVu Sans"'
|
words = val.split(" ") # ! splits also values like '"DejaVu Sans"'
|
||||||
font = words[0].strip('"') # TeX font name has no whitespace
|
font = words[0].strip('"') # TeX font name has no whitespace
|
||||||
if not font in fm.font2pkgmap:
|
if font not in fm.font2pkgmap:
|
||||||
continue
|
continue
|
||||||
fontinfo = fm.font2pkgmap[font]
|
fontinfo = fm.font2pkgmap[font]
|
||||||
val = fontinfo.package
|
val = fontinfo.package
|
||||||
if not val in fontmap:
|
if val not in fontmap:
|
||||||
fontmap[val] = []
|
fontmap[val] = []
|
||||||
x = -1
|
x = -1
|
||||||
if OnlyWithXOpts or WithXOpts:
|
if OnlyWithXOpts or WithXOpts:
|
||||||
@ -795,7 +795,7 @@ def revert_xcharter(document):
|
|||||||
def revert_lscape(document):
|
def revert_lscape(document):
|
||||||
"""Reverts the landscape environment (Landscape module) to TeX-code"""
|
"""Reverts the landscape environment (Landscape module) to TeX-code"""
|
||||||
|
|
||||||
if not "landscape" in document.get_module_list():
|
if "landscape" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
@ -1171,7 +1171,7 @@ def revert_bibencoding(document):
|
|||||||
h = find_token(document.header, "\\biblio_options", 0)
|
h = find_token(document.header, "\\biblio_options", 0)
|
||||||
if h != -1:
|
if h != -1:
|
||||||
biblio_options = get_value(document.header, "\\biblio_options", h)
|
biblio_options = get_value(document.header, "\\biblio_options", h)
|
||||||
if not "bibencoding" in biblio_options:
|
if "bibencoding" not in biblio_options:
|
||||||
document.header[h] += ",bibencoding=%s" % encodings[encoding]
|
document.header[h] += ",bibencoding=%s" % encodings[encoding]
|
||||||
else:
|
else:
|
||||||
bs = find_token(document.header, "\\biblatex_bibstyle", 0)
|
bs = find_token(document.header, "\\biblatex_bibstyle", 0)
|
||||||
@ -2666,7 +2666,7 @@ def convert_linggloss(document):
|
|||||||
|
|
||||||
def revert_linggloss(document):
|
def revert_linggloss(document):
|
||||||
"Revert to old ling gloss definitions"
|
"Revert to old ling gloss definitions"
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
document.del_local_layout(gloss_inset_def)
|
document.del_local_layout(gloss_inset_def)
|
||||||
document.del_local_layout(glosss_inset_def)
|
document.del_local_layout(glosss_inset_def)
|
||||||
@ -2806,7 +2806,7 @@ def revert_linggloss(document):
|
|||||||
def revert_subexarg(document):
|
def revert_subexarg(document):
|
||||||
"Revert linguistic subexamples with argument to ERT"
|
"Revert linguistic subexamples with argument to ERT"
|
||||||
|
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
cov_req = False
|
cov_req = False
|
||||||
@ -2886,7 +2886,7 @@ def revert_subexarg(document):
|
|||||||
def revert_drs(document):
|
def revert_drs(document):
|
||||||
"Revert DRS insets (linguistics) to ERT"
|
"Revert DRS insets (linguistics) to ERT"
|
||||||
|
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
cov_req = False
|
cov_req = False
|
||||||
@ -3678,7 +3678,7 @@ def revert_texfontopts(document):
|
|||||||
# We need to use this regex since split() does not handle quote protection
|
# We need to use this regex since split() does not handle quote protection
|
||||||
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
|
||||||
roman = romanfont[1].strip('"')
|
roman = romanfont[1].strip('"')
|
||||||
if not roman in rmfonts:
|
if roman not in rmfonts:
|
||||||
return
|
return
|
||||||
romanfont[1] = '"default"'
|
romanfont[1] = '"default"'
|
||||||
document.header[i] = " ".join(romanfont)
|
document.header[i] = " ".join(romanfont)
|
||||||
@ -4047,7 +4047,7 @@ def revert_dupqualicites(document):
|
|||||||
else:
|
else:
|
||||||
engine = get_value(document.header, "\\cite_engine", i)
|
engine = get_value(document.header, "\\cite_engine", i)
|
||||||
|
|
||||||
if not engine in ["biblatex", "biblatex-natbib"]:
|
if engine not in ["biblatex", "biblatex-natbib"]:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Citation insets that support qualified lists, with their LaTeX code
|
# Citation insets that support qualified lists, with their LaTeX code
|
||||||
@ -4082,7 +4082,7 @@ def revert_dupqualicites(document):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
cmd = get_value(document.body, "LatexCommand", k)
|
cmd = get_value(document.body, "LatexCommand", k)
|
||||||
if not cmd in list(ql_citations.keys()):
|
if cmd not in list(ql_citations.keys()):
|
||||||
i = j + 1
|
i = j + 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -4256,8 +4256,8 @@ def revert_theendnotes(document):
|
|||||||
"Reverts native support of \\theendnotes to TeX-code"
|
"Reverts native support of \\theendnotes to TeX-code"
|
||||||
|
|
||||||
if (
|
if (
|
||||||
not "endnotes" in document.get_module_list()
|
"endnotes" not in document.get_module_list()
|
||||||
and not "foottoend" in document.get_module_list()
|
and "foottoend" not in document.get_module_list()
|
||||||
):
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -4278,8 +4278,8 @@ def revert_enotez(document):
|
|||||||
"Reverts native support of enotez package to TeX-code"
|
"Reverts native support of enotez package to TeX-code"
|
||||||
|
|
||||||
if (
|
if (
|
||||||
not "enotez" in document.get_module_list()
|
"enotez" not in document.get_module_list()
|
||||||
and not "foottoenotez" in document.get_module_list()
|
and "foottoenotez" not in document.get_module_list()
|
||||||
):
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -4490,7 +4490,7 @@ def convert_totalheight(document):
|
|||||||
def convert_changebars(document):
|
def convert_changebars(document):
|
||||||
"Converts the changebars module to native solution"
|
"Converts the changebars module to native solution"
|
||||||
|
|
||||||
if not "changebars" in document.get_module_list():
|
if "changebars" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
i = find_token(document.header, "\\output_changes", 0)
|
i = find_token(document.header, "\\output_changes", 0)
|
||||||
@ -6085,7 +6085,7 @@ def convert_cov_options(document):
|
|||||||
def revert_linggloss2(document):
|
def revert_linggloss2(document):
|
||||||
"Revert gloss with new args to ERT"
|
"Revert gloss with new args to ERT"
|
||||||
|
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
cov_req = False
|
cov_req = False
|
||||||
@ -6304,7 +6304,7 @@ def revert_linggloss2(document):
|
|||||||
def revert_exarg2(document):
|
def revert_exarg2(document):
|
||||||
"Revert linguistic examples with new arguments to ERT"
|
"Revert linguistic examples with new arguments to ERT"
|
||||||
|
|
||||||
if not "linguistics" in document.get_module_list():
|
if "linguistics" not in document.get_module_list():
|
||||||
return
|
return
|
||||||
|
|
||||||
cov_req = False
|
cov_req = False
|
||||||
|
@ -21,9 +21,9 @@ import imp
|
|||||||
lyx2lyx = imp.load_source("lyx2lyx", "lyx2lyx", open("lyx2lyx"))
|
lyx2lyx = imp.load_source("lyx2lyx", "lyx2lyx", open("lyx2lyx"))
|
||||||
|
|
||||||
# Profiler used in the study
|
# Profiler used in the study
|
||||||
import hotshot, hotshot.stats
|
import hotshot
|
||||||
|
import hotshot.stats
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -17,7 +17,9 @@
|
|||||||
|
|
||||||
"Import unicode_reps from this module for access to the unicode<->LaTeX mapping."
|
"Import unicode_reps from this module for access to the unicode<->LaTeX mapping."
|
||||||
|
|
||||||
import sys, os, re, codecs
|
import os
|
||||||
|
import re
|
||||||
|
import codecs
|
||||||
|
|
||||||
|
|
||||||
def read_unicodesymbols():
|
def read_unicodesymbols():
|
||||||
|
Loading…
Reference in New Issue
Block a user