Run "ruff check --fix"

Remove unneeded imports;
Reword code to make clear the "not in" operator;
Put imports in its own line (readability);
This commit is contained in:
José Matos 2024-06-15 11:26:28 +01:00
parent b4db3ea137
commit a0a5892ae8
12 changed files with 47 additions and 59 deletions

View File

@ -23,7 +23,6 @@ from parser_tools import (
get_value,
check_token,
find_token,
find_tokens,
find_end_of,
find_complete_lines,
)

View File

@ -90,7 +90,6 @@ revert_language(document, lyxname, babelname="", polyglossianame=""):
"""
import re
import sys
from parser_tools import (
find_token,
find_end_of_inset,
@ -108,7 +107,7 @@ from unicode_symbols import unicode_reps
def add_to_preamble(document, text):
"Add text to the preamble if it is not already there."
if not type(text) is list:
if type(text) is not list:
# split on \n just in case
# it'll give us the one element list we want
# if there's no \n, too
@ -139,7 +138,7 @@ def add_to_preamble(document, text):
def insert_to_preamble(document, text, index=0):
"""Insert text to the preamble at a given line"""
if not type(text) is list:
if type(text) is not list:
# split on \n just in case
# it'll give us the one element list we want
# if there's no \n, too
@ -460,7 +459,7 @@ def length_in_bp(length):
return 0
value = m.group(1)
unit = m.group(2)
if not unit in scales.keys():
if unit not in scales.keys():
document.warning("Unknown length unit: " + unit + ".")
return value
return "%g" % (float(value) * scales[unit])
@ -736,7 +735,7 @@ def revert_language(document, lyxname, babelname="", polyglossianame=""):
# \end_layout
# Ensure correct handling of list labels
if parent[0] in ["Labeling", "Description"] and not " " in "\n".join(
if parent[0] in ["Labeling", "Description"] and " " not in "\n".join(
document.body[parent[3] : i]
):
# line `i+1` is first line of a list item,

View File

@ -35,7 +35,6 @@ from parser_tools import (
find_re,
find_tokens_backwards,
)
from sys import stdin
from lyx_0_12 import update_latexaccents

View File

@ -20,7 +20,8 @@
import re
import unicodedata
import sys, os
import sys
import os
from parser_tools import (
find_re,
@ -128,7 +129,7 @@ def convert_font_settings(document):
if font_scheme == "":
document.warning("Malformed LyX document: Empty `\\fontscheme'.")
font_scheme = "default"
if not font_scheme in list(roman_fonts.keys()):
if font_scheme not in list(roman_fonts.keys()):
document.warning("Malformed LyX document: Unknown `\\fontscheme' `%s'." % font_scheme)
font_scheme = "default"
document.header[i : i + 1] = [
@ -1902,7 +1903,7 @@ def revert_listings_inset(document):
if i == -1:
break
else:
if not "\\usepackage{listings}" in document.preamble:
if "\\usepackage{listings}" not in document.preamble:
document.preamble.append("\\usepackage{listings}")
j = find_end_of_inset(document.body, i + 1)
if j == -1:
@ -2040,7 +2041,7 @@ def revert_include_listings(document):
if i == -1:
break
else:
if not "\\usepackage{listings}" in document.preamble:
if "\\usepackage{listings}" not in document.preamble:
document.preamble.append("\\usepackage{listings}")
j = find_end_of_inset(document.body, i + 1)
if j == -1:

View File

@ -18,8 +18,6 @@
"""Convert files to the file format generated by lyx 1.6"""
import re
import unicodedata
import sys, os
from parser_tools import find_token, find_end_of, find_tokens, get_value
from unicode_symbols import unicode_reps

View File

@ -17,15 +17,12 @@
"""Convert files to the file format generated by lyx 2.0"""
import re, string
import unicodedata
import sys, os
import re
from parser_tools import (
del_complete_lines,
find_token,
find_end_of,
find_tokens,
find_token_exact,
find_end_of_inset,
find_end_of_layout,

View File

@ -17,9 +17,7 @@
"""Convert files to the file format generated by LyX 2.1"""
import re, string
import unicodedata
import sys, os
import re
# Uncomment only what you need to import, please.

View File

@ -17,9 +17,8 @@
"""Convert files to the file format generated by lyx 2.2"""
import re, string
import unicodedata
import sys, os
import re
import os
# Uncomment only what you need to import, please.
@ -31,7 +30,6 @@ from lyx2lyx_tools import (
lyx2verbatim,
length_in_bp,
convert_info_insets,
insert_document_option,
revert_language,
)
@ -50,7 +48,6 @@ from parser_tools import (
get_quoted_value,
get_value,
is_in_inset,
get_bool_value,
set_bool_value,
)
@ -633,7 +630,7 @@ def revert_question_env(document):
"""
# Do we use theorems-ams-extended-bytype module?
if not "theorems-ams-extended-bytype" in document.get_module_list():
if "theorems-ams-extended-bytype" not in document.get_module_list():
return
consecutive = False
@ -958,7 +955,7 @@ def convert_specialchar_internal(document, forward):
i = j
continue
# else...
if not "\\SpecialChar" in document.body[i]:
if "\\SpecialChar" not in document.body[i]:
i += 1
continue
for key, value in specialchars.items():
@ -1021,7 +1018,7 @@ def revert_sigplan_doi(document):
def revert_ex_itemargs(document):
"Reverts \\item arguments of the example environments (Linguistics module) to TeX-code"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
i = 0
@ -1052,7 +1049,7 @@ def revert_ex_itemargs(document):
def revert_forest(document):
"Reverts the forest environment (Linguistics module) to TeX-code"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
i = 0
@ -1079,7 +1076,7 @@ def revert_forest(document):
def revert_glossgroup(document):
"Reverts the GroupGlossedWords inset (Linguistics module) to TeX-code"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
i = 0
@ -1106,7 +1103,7 @@ def revert_glossgroup(document):
def revert_newgloss(document):
"Reverts the new Glosse insets (Linguistics module) to the old format"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")
@ -1192,7 +1189,7 @@ def revert_newgloss(document):
def convert_newgloss(document):
"Converts Glosse insets (Linguistics module) to the new format"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")

View File

@ -26,14 +26,12 @@ from parser_tools import (
del_value,
del_complete_lines,
find_complete_lines,
find_end_of,
find_end_of_layout,
find_end_of_inset,
find_re,
find_substring,
find_token,
find_token_backwards,
find_across_lines,
get_containing_inset,
get_containing_layout,
get_bool_value,
@ -154,7 +152,7 @@ def revert_ibranches(document):
continue
if inverted:
branch = document.body[i][20:].strip()
if not branch in antibranches:
if branch not in antibranches:
antibranch = "Anti-" + branch
while antibranch in antibranches:
antibranch = "x" + antibranch
@ -1425,7 +1423,7 @@ def convert_literalparam(document):
if i == -1:
break
inset = document.body[i][pos:].strip()
if not inset in command_insets:
if inset not in command_insets:
i += 1
continue
j = find_end_of_inset(document.body, i)

View File

@ -144,10 +144,10 @@ class fontmapping:
def getfontname(self, pkg, options):
options.sort()
pkgkey = createkey(pkg, options)
if not pkgkey in self.pkg2fontmap:
if pkgkey not in self.pkg2fontmap:
return None
fontname = self.pkg2fontmap[pkgkey]
if not fontname in self.font2pkgmap:
if fontname not in self.font2pkgmap:
document.error("Something is wrong in pkgname+options <-> fontname mapping")
return None
if pkgkey == self.font2pkgmap[fontname].pkgkey:
@ -359,7 +359,7 @@ def convert_fonts(document, fm, osfoption="osf"):
del options[o]
continue
if not pkg in fm.pkginmap:
if pkg not in fm.pkginmap:
continue
# determine fontname
fn = None
@ -456,11 +456,11 @@ def revert_fonts(document, fm, fontmap, OnlyWithXOpts=False, WithXOpts=False):
val = get_value(document.header, ft, i)
words = val.split(" ") # ! splits also values like '"DejaVu Sans"'
font = words[0].strip('"') # TeX font name has no whitespace
if not font in fm.font2pkgmap:
if font not in fm.font2pkgmap:
continue
fontinfo = fm.font2pkgmap[font]
val = fontinfo.package
if not val in fontmap:
if val not in fontmap:
fontmap[val] = []
x = -1
if OnlyWithXOpts or WithXOpts:
@ -795,7 +795,7 @@ def revert_xcharter(document):
def revert_lscape(document):
"""Reverts the landscape environment (Landscape module) to TeX-code"""
if not "landscape" in document.get_module_list():
if "landscape" not in document.get_module_list():
return
i = 0
@ -1171,7 +1171,7 @@ def revert_bibencoding(document):
h = find_token(document.header, "\\biblio_options", 0)
if h != -1:
biblio_options = get_value(document.header, "\\biblio_options", h)
if not "bibencoding" in biblio_options:
if "bibencoding" not in biblio_options:
document.header[h] += ",bibencoding=%s" % encodings[encoding]
else:
bs = find_token(document.header, "\\biblatex_bibstyle", 0)
@ -2666,7 +2666,7 @@ def convert_linggloss(document):
def revert_linggloss(document):
"Revert to old ling gloss definitions"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
document.del_local_layout(gloss_inset_def)
document.del_local_layout(glosss_inset_def)
@ -2806,7 +2806,7 @@ def revert_linggloss(document):
def revert_subexarg(document):
"Revert linguistic subexamples with argument to ERT"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
cov_req = False
@ -2886,7 +2886,7 @@ def revert_subexarg(document):
def revert_drs(document):
"Revert DRS insets (linguistics) to ERT"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
cov_req = False
@ -3678,7 +3678,7 @@ def revert_texfontopts(document):
# We need to use this regex since split() does not handle quote protection
romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
roman = romanfont[1].strip('"')
if not roman in rmfonts:
if roman not in rmfonts:
return
romanfont[1] = '"default"'
document.header[i] = " ".join(romanfont)
@ -4047,7 +4047,7 @@ def revert_dupqualicites(document):
else:
engine = get_value(document.header, "\\cite_engine", i)
if not engine in ["biblatex", "biblatex-natbib"]:
if engine not in ["biblatex", "biblatex-natbib"]:
return
# Citation insets that support qualified lists, with their LaTeX code
@ -4082,7 +4082,7 @@ def revert_dupqualicites(document):
continue
cmd = get_value(document.body, "LatexCommand", k)
if not cmd in list(ql_citations.keys()):
if cmd not in list(ql_citations.keys()):
i = j + 1
continue
@ -4256,8 +4256,8 @@ def revert_theendnotes(document):
"Reverts native support of \\theendnotes to TeX-code"
if (
not "endnotes" in document.get_module_list()
and not "foottoend" in document.get_module_list()
"endnotes" not in document.get_module_list()
and "foottoend" not in document.get_module_list()
):
return
@ -4278,8 +4278,8 @@ def revert_enotez(document):
"Reverts native support of enotez package to TeX-code"
if (
not "enotez" in document.get_module_list()
and not "foottoenotez" in document.get_module_list()
"enotez" not in document.get_module_list()
and "foottoenotez" not in document.get_module_list()
):
return
@ -4490,7 +4490,7 @@ def convert_totalheight(document):
def convert_changebars(document):
"Converts the changebars module to native solution"
if not "changebars" in document.get_module_list():
if "changebars" not in document.get_module_list():
return
i = find_token(document.header, "\\output_changes", 0)
@ -6085,7 +6085,7 @@ def convert_cov_options(document):
def revert_linggloss2(document):
"Revert gloss with new args to ERT"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
cov_req = False
@ -6304,7 +6304,7 @@ def revert_linggloss2(document):
def revert_exarg2(document):
"Revert linguistic examples with new arguments to ERT"
if not "linguistics" in document.get_module_list():
if "linguistics" not in document.get_module_list():
return
cov_req = False

View File

@ -21,9 +21,9 @@ import imp
lyx2lyx = imp.load_source("lyx2lyx", "lyx2lyx", open("lyx2lyx"))
# Profiler used in the study
import hotshot, hotshot.stats
import hotshot
import hotshot.stats
import sys
import os
"""

View File

@ -17,7 +17,9 @@
"Import unicode_reps from this module for access to the unicode<->LaTeX mapping."
import sys, os, re, codecs
import os
import re
import codecs
def read_unicodesymbols():