mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-11-22 10:00:33 +00:00
Various python fixes suggested by pyupgrade.
Patch from Jose. https://www.mail-archive.com/lyx-devel@lists.lyx.org/msg217770.html
This commit is contained in:
parent
6031666b01
commit
d79637a88e
@ -145,7 +145,7 @@ def insert_to_preamble(document, text, index = 0):
|
||||
|
||||
# A dictionary of Unicode->LICR mappings for use in a Unicode string's translate() method
|
||||
# Created from the reversed list to keep the first of alternative definitions.
|
||||
licr_table = dict((ord(ch), cmd) for cmd, ch in unicode_reps[::-1])
|
||||
licr_table = {ord(ch): cmd for cmd, ch in unicode_reps[::-1]}
|
||||
|
||||
def put_cmd_in_ert(cmd, is_open=False, as_paragraph=False):
|
||||
"""
|
||||
|
@ -112,7 +112,7 @@ def update_inset_label(document):
|
||||
i = find_token(lines, '\\begin_inset Label', i)
|
||||
if i == -1:
|
||||
return
|
||||
lines[i] = '\\begin_inset LatexCommand \label{' + lines[i][19:] + '}'
|
||||
lines[i] = '\\begin_inset LatexCommand \\label{' + lines[i][19:] + '}'
|
||||
i = i + 1
|
||||
|
||||
|
||||
|
@ -69,7 +69,7 @@ def find_beginning_of_inset(lines, i):
|
||||
|
||||
|
||||
def find_end_of_inset(lines, i):
|
||||
" Finds the matching \end_inset"
|
||||
r" Finds the matching \end_inset"
|
||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||
|
||||
|
||||
@ -143,7 +143,7 @@ def get_width(mo):
|
||||
|
||||
|
||||
def remove_oldfloat(document):
|
||||
" Change \begin_float .. \end_float into \begin_inset Float .. \end_inset"
|
||||
r" Change \begin_float .. \end_float into \begin_inset Float .. \end_inset"
|
||||
lines = document.body
|
||||
i = 0
|
||||
while True:
|
||||
@ -250,7 +250,7 @@ def remove_pextra(document):
|
||||
if flag:
|
||||
flag = 0
|
||||
if hfill:
|
||||
start = ["","\hfill",""]+start
|
||||
start = ["",r"\hfill",""]+start
|
||||
else:
|
||||
start = ['\\layout %s' % document.default_layout,''] + start
|
||||
|
||||
@ -324,7 +324,7 @@ def remove_oldert(document):
|
||||
new = []
|
||||
new2 = []
|
||||
if check_token(lines[i], "\\layout LaTeX"):
|
||||
new = ['\layout %s' % document.default_layout, "", ""]
|
||||
new = [r'\layout %s' % document.default_layout, "", ""]
|
||||
|
||||
k = i+1
|
||||
while True:
|
||||
@ -808,7 +808,7 @@ def change_infoinset(document):
|
||||
note_lines = [txt]+note_lines
|
||||
|
||||
for line in note_lines:
|
||||
new = new + ['\layout %s' % document.default_layout, ""]
|
||||
new = new + [r'\layout %s' % document.default_layout, ""]
|
||||
tmp = line.split('\\')
|
||||
new = new + [tmp[0]]
|
||||
for x in tmp[1:]:
|
||||
|
@ -27,7 +27,7 @@ from parser_tools import find_token, find_end_of, get_value,\
|
||||
# Private helper functions
|
||||
|
||||
def find_end_of_inset(lines, i):
|
||||
"Finds the matching \end_inset"
|
||||
r"Finds the matching \end_inset"
|
||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||
|
||||
|
||||
|
@ -81,7 +81,7 @@ def get_next_paragraph(lines, i, format):
|
||||
|
||||
|
||||
def find_end_of_inset(lines, i):
|
||||
"Finds the matching \end_inset"
|
||||
r"Finds the matching \end_inset"
|
||||
return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
|
||||
|
||||
def del_token(lines, token, start, end):
|
||||
@ -103,7 +103,7 @@ def del_token(lines, token, start, end):
|
||||
####################################################################
|
||||
|
||||
def remove_color_default(document):
|
||||
" Remove \color default"
|
||||
r" Remove \color default"
|
||||
i = 0
|
||||
while True:
|
||||
i = find_token(document.body, "\\color default", i)
|
||||
@ -114,12 +114,12 @@ def remove_color_default(document):
|
||||
|
||||
|
||||
def add_end_header(document):
|
||||
" Add \end_header"
|
||||
r" Add \end_header"
|
||||
document.header.append("\\end_header");
|
||||
|
||||
|
||||
def rm_end_header(document):
|
||||
" Remove \end_header"
|
||||
r" Remove \end_header"
|
||||
i = find_token(document.header, "\\end_header", 0)
|
||||
if i == -1:
|
||||
return
|
||||
@ -169,14 +169,14 @@ def revert_amsmath(document):
|
||||
|
||||
|
||||
def convert_spaces(document):
|
||||
" \SpecialChar ~ -> \InsetSpace ~"
|
||||
r" \SpecialChar ~ -> \InsetSpace ~"
|
||||
for i in range(len(document.body)):
|
||||
document.body[i] = document.body[i].replace("\\SpecialChar ~",
|
||||
"\\InsetSpace ~")
|
||||
|
||||
|
||||
def revert_spaces(document):
|
||||
" \InsetSpace ~ -> \SpecialChar ~"
|
||||
r" \InsetSpace ~ -> \SpecialChar ~"
|
||||
regexp = re.compile(r'(.*)(\\InsetSpace\s+)(\S+)')
|
||||
i = 0
|
||||
while True:
|
||||
@ -197,18 +197,18 @@ def revert_spaces(document):
|
||||
|
||||
|
||||
def rename_spaces(document):
|
||||
""" \InsetSpace \, -> \InsetSpace \thinspace{}
|
||||
\InsetSpace \space -> \InsetSpace \space{}"""
|
||||
""" \\InsetSpace \\, -> \\InsetSpace \thinspace{}
|
||||
\\InsetSpace \\space -> \\InsetSpace \\space{}"""
|
||||
for i in range(len(document.body)):
|
||||
document.body[i] = document.body[i].replace("\\InsetSpace \\space",
|
||||
"\\InsetSpace \\space{}")
|
||||
document.body[i] = document.body[i].replace("\\InsetSpace \,",
|
||||
document.body[i] = document.body[i].replace("\\InsetSpace \\,",
|
||||
"\\InsetSpace \\thinspace{}")
|
||||
|
||||
|
||||
def revert_space_names(document):
|
||||
""" \InsetSpace \thinspace{} -> \InsetSpace \,
|
||||
\InsetSpace \space{} -> \InsetSpace \space"""
|
||||
""" \\InsetSpace \thinspace{} -> \\InsetSpace \\,
|
||||
\\InsetSpace \\space{} -> \\InsetSpace \\space"""
|
||||
for i in range(len(document.body)):
|
||||
document.body[i] = document.body[i].replace("\\InsetSpace \\space{}",
|
||||
"\\InsetSpace \\space")
|
||||
@ -262,7 +262,7 @@ def revert_bibtex(document):
|
||||
|
||||
|
||||
def remove_insetparent(document):
|
||||
" Remove \lyxparent"
|
||||
r" Remove \lyxparent"
|
||||
i = 0
|
||||
while True:
|
||||
i = find_token(document.body, "\\begin_inset LatexCommand \\lyxparent", i)
|
||||
@ -428,7 +428,7 @@ def revert_comment(document):
|
||||
|
||||
|
||||
def add_end_layout(document):
|
||||
" Add \end_layout"
|
||||
r" Add \end_layout"
|
||||
i = find_token(document.body, '\\layout', 0)
|
||||
|
||||
if i == -1:
|
||||
@ -502,7 +502,7 @@ def add_end_layout(document):
|
||||
|
||||
|
||||
def rm_end_layout(document):
|
||||
" Remove \end_layout"
|
||||
r" Remove \end_layout"
|
||||
i = 0
|
||||
while True:
|
||||
i = find_token(document.body, '\\end_layout', i)
|
||||
@ -544,7 +544,7 @@ def rm_body_changes(document):
|
||||
|
||||
|
||||
def layout2begin_layout(document):
|
||||
" \layout -> \begin_layout "
|
||||
r" \layout -> \begin_layout "
|
||||
i = 0
|
||||
while True:
|
||||
i = find_token(document.body, '\\layout', i)
|
||||
@ -556,7 +556,7 @@ def layout2begin_layout(document):
|
||||
|
||||
|
||||
def begin_layout2layout(document):
|
||||
" \begin_layout -> \layout "
|
||||
r" \begin_layout -> \layout "
|
||||
i = 0
|
||||
while True:
|
||||
i = find_token(document.body, '\\begin_layout', i)
|
||||
@ -1051,7 +1051,7 @@ def convert_minipage(document):
|
||||
# convert the inner_position
|
||||
if document.body[i][:14] == "inner_position":
|
||||
innerpos = inner_pos[int(document.body[i][15])]
|
||||
del document.body[i]
|
||||
del document.body[i]
|
||||
else:
|
||||
innerpos = inner_pos[0]
|
||||
|
||||
@ -1972,9 +1972,9 @@ def convert_names(document):
|
||||
'\\begin_layout %s' % document.default_layout,
|
||||
"",
|
||||
"%s" % firstname,
|
||||
"\end_layout",
|
||||
r"\end_layout",
|
||||
"",
|
||||
"\end_inset",
|
||||
r"\end_inset",
|
||||
"",
|
||||
"",
|
||||
"\\begin_inset CharStyle Surname",
|
||||
@ -2229,7 +2229,7 @@ def use_x_binary(document):
|
||||
def normalize_paragraph_params(document):
|
||||
" Place all the paragraph parameters in their own line. "
|
||||
body = document.body
|
||||
|
||||
|
||||
allowed_parameters = '\\paragraph_spacing', '\\noindent', \
|
||||
'\\align', '\\labelwidthstring', "\\start_of_appendix", \
|
||||
"\\leftindent"
|
||||
|
@ -421,7 +421,7 @@ def revert_unicode_line(document, i, insets, spec_chars, replacement_character =
|
||||
else:
|
||||
if insets and insets[-1] == "Formula":
|
||||
# avoid putting an ERT in a math; instead put command as text
|
||||
command = command.replace('\\\\', '\mathrm{')
|
||||
command = command.replace('\\\\', r'\mathrm{')
|
||||
command = command + '}'
|
||||
elif not insets or insets[-1] != "ERT":
|
||||
# add an ERT inset with the replacement character
|
||||
@ -491,7 +491,7 @@ def revert_cs_label(document):
|
||||
|
||||
|
||||
def convert_bibitem(document):
|
||||
""" Convert
|
||||
r""" Convert
|
||||
\bibitem [option]{argument}
|
||||
|
||||
to
|
||||
@ -576,16 +576,16 @@ commandparams_info = {
|
||||
def convert_commandparams(document):
|
||||
""" Convert
|
||||
|
||||
\begin_inset LatexCommand \cmdname[opt1][opt2]{arg}
|
||||
\end_inset
|
||||
\\begin_inset LatexCommand \\cmdname[opt1][opt2]{arg}
|
||||
\\end_inset
|
||||
|
||||
to
|
||||
|
||||
\begin_inset LatexCommand cmdname
|
||||
\\begin_inset LatexCommand cmdname
|
||||
name1 "opt1"
|
||||
name2 "opt2"
|
||||
name3 "arg"
|
||||
\end_inset
|
||||
\\end_inset
|
||||
|
||||
name1, name2 and name3 can be different for each command.
|
||||
"""
|
||||
@ -896,7 +896,7 @@ def revert_cleardoublepage(document):
|
||||
|
||||
|
||||
def convert_lyxline(document):
|
||||
" remove fontsize commands for \lyxline "
|
||||
r" remove fontsize commands for \lyxline "
|
||||
# The problematic is: The old \lyxline definition doesn't handle the fontsize
|
||||
# to change the line thickness. The new definiton does this so that imported
|
||||
# \lyxlines would have a different line thickness. The eventual fontsize command
|
||||
@ -1687,7 +1687,7 @@ def revert_CJK(document):
|
||||
|
||||
|
||||
def revert_preamble_listings_params(document):
|
||||
" Revert preamble option \listings_params "
|
||||
r" Revert preamble option \listings_params "
|
||||
i = find_token(document.header, "\\listings_params", 0)
|
||||
if i != -1:
|
||||
document.preamble.append('\\usepackage{listings}')
|
||||
@ -2005,10 +2005,10 @@ def convert_arabic (document):
|
||||
document.header[i] = "\\language arabic_arabtex"
|
||||
i = 0
|
||||
while i < len(document.body):
|
||||
h = document.body[i].find("\lang arabic", 0, len(document.body[i]))
|
||||
h = document.body[i].find(r"\lang arabic", 0, len(document.body[i]))
|
||||
if (h != -1):
|
||||
# change the language name
|
||||
document.body[i] = '\lang arabic_arabtex'
|
||||
document.body[i] = r'\lang arabic_arabtex'
|
||||
i = i + 1
|
||||
|
||||
|
||||
@ -2020,10 +2020,10 @@ def revert_arabic (document):
|
||||
document.header[i] = "\\language arabic"
|
||||
i = 0
|
||||
while i < len(document.body):
|
||||
h = document.body[i].find("\lang arabic_arabtex", 0, len(document.body[i]))
|
||||
h = document.body[i].find(r"\lang arabic_arabtex", 0, len(document.body[i]))
|
||||
if (h != -1):
|
||||
# change the language name
|
||||
document.body[i] = '\lang arabic'
|
||||
document.body[i] = r'\lang arabic'
|
||||
i = i + 1
|
||||
|
||||
|
||||
|
@ -152,7 +152,7 @@ def extract_argument(line):
|
||||
if not line:
|
||||
return (None, "")
|
||||
|
||||
bracere = re.compile("(\s*)(.*)")
|
||||
bracere = re.compile(r"(\s*)(.*)")
|
||||
n = bracere.match(line)
|
||||
whitespace = n.group(1)
|
||||
stuff = n.group(2)
|
||||
@ -277,7 +277,7 @@ def latex2lyx(data, isindex):
|
||||
data = data.replace('\\\\', '\\')
|
||||
|
||||
# Math:
|
||||
mathre = re.compile('^(.*?)(\$.*?\$)(.*)')
|
||||
mathre = re.compile(r'^(.*?)(\$.*?\$)(.*)')
|
||||
lines = data.split('\n')
|
||||
for line in lines:
|
||||
#document.warning("LINE: " + line)
|
||||
@ -946,7 +946,7 @@ def remove_inzip_options(document):
|
||||
|
||||
|
||||
def convert_inset_command(document):
|
||||
"""
|
||||
r"""
|
||||
Convert:
|
||||
\begin_inset LatexCommand cmd
|
||||
to
|
||||
@ -983,7 +983,7 @@ def convert_inset_command(document):
|
||||
|
||||
|
||||
def revert_inset_command(document):
|
||||
"""
|
||||
r"""
|
||||
Convert:
|
||||
\begin_inset CommandInset InsetType
|
||||
LatexCommand cmd
|
||||
@ -1558,7 +1558,7 @@ def convert_usorbian(document):
|
||||
|
||||
|
||||
def convert_macro_global(document):
|
||||
"Remove TeX code command \global when it is in front of a macro"
|
||||
r"Remove TeX code command \global when it is in front of a macro"
|
||||
# math macros are nowadays already defined \global, so that an additional
|
||||
# \global would make the document uncompilable, see
|
||||
# http://www.lyx.org/trac/ticket/5371
|
||||
@ -2339,7 +2339,7 @@ def revert_wrapplacement(document):
|
||||
|
||||
|
||||
def remove_extra_embedded_files(document):
|
||||
" Remove \extra_embedded_files from buffer params "
|
||||
r" Remove \extra_embedded_files from buffer params "
|
||||
i = find_token(document.header, '\\extra_embedded_files', 0)
|
||||
if i == -1:
|
||||
return
|
||||
|
@ -1119,7 +1119,7 @@ def revert_multirow(document):
|
||||
|
||||
|
||||
def convert_math_output(document):
|
||||
" Convert \html_use_mathml to \html_math_output "
|
||||
r" Convert \html_use_mathml to \html_math_output "
|
||||
i = find_token(document.header, "\\html_use_mathml", 0)
|
||||
if i == -1:
|
||||
return
|
||||
@ -1136,7 +1136,7 @@ def convert_math_output(document):
|
||||
|
||||
|
||||
def revert_math_output(document):
|
||||
" Revert \html_math_output to \html_use_mathml "
|
||||
r" Revert \html_math_output to \html_use_mathml "
|
||||
i = find_token(document.header, "\\html_math_output", 0)
|
||||
if i == -1:
|
||||
return
|
||||
@ -1619,8 +1619,8 @@ def revert_IEEEtran(document):
|
||||
|
||||
def convert_prettyref(document):
|
||||
" Converts prettyref references to neutral formatted refs "
|
||||
re_ref = re.compile("^\s*reference\s+\"(\w+):(\S+)\"")
|
||||
nm_ref = re.compile("^\s*name\s+\"(\w+):(\S+)\"")
|
||||
re_ref = re.compile("^\\s*reference\\s+\"(\\w+):(\\S+)\"")
|
||||
nm_ref = re.compile("^\\s*name\\s+\"(\\w+):(\\S+)\"")
|
||||
|
||||
i = 0
|
||||
while True:
|
||||
@ -1641,8 +1641,8 @@ def convert_prettyref(document):
|
||||
|
||||
def revert_refstyle(document):
|
||||
" Reverts neutral formatted refs to prettyref "
|
||||
re_ref = re.compile("^reference\s+\"(\w+):(\S+)\"")
|
||||
nm_ref = re.compile("^\s*name\s+\"(\w+):(\S+)\"")
|
||||
re_ref = re.compile("^reference\\s+\"(\\w+):(\\S+)\"")
|
||||
nm_ref = re.compile("^\\s*name\\s+\"(\\w+):(\\S+)\"")
|
||||
|
||||
i = 0
|
||||
while True:
|
||||
@ -1723,7 +1723,7 @@ def remove_Nameref(document):
|
||||
|
||||
|
||||
def revert_mathrsfs(document):
|
||||
" Load mathrsfs if \mathrsfs us use in the document "
|
||||
r" Load mathrsfs if \mathrsfs us use in the document "
|
||||
i = 0
|
||||
for line in document.body:
|
||||
if line.find("\\mathscr{") != -1:
|
||||
@ -2079,7 +2079,7 @@ def convert_passthru(document):
|
||||
if not check_passthru:
|
||||
return
|
||||
|
||||
rx = re.compile("\\\\begin_layout \s*(\w+)")
|
||||
rx = re.compile("\\\\begin_layout \\s*(\\w+)")
|
||||
beg = 0
|
||||
for lay in ["Chunk", "Scrap"]:
|
||||
while True:
|
||||
@ -2143,7 +2143,7 @@ def revert_passthru(document):
|
||||
" http://www.mail-archive.com/lyx-devel@lists.lyx.org/msg161298.html "
|
||||
if not check_passthru:
|
||||
return
|
||||
rx = re.compile("\\\\begin_layout \s*(\w+)")
|
||||
rx = re.compile("\\\\begin_layout \\s*(\\w+)")
|
||||
beg = 0
|
||||
for lay in ["Chunk", "Scrap"]:
|
||||
while True:
|
||||
@ -2435,7 +2435,7 @@ def revert_langpack(document):
|
||||
|
||||
def convert_langpack(document):
|
||||
" Add \\language_package parameter "
|
||||
i = find_token(document.header, "\language" , 0)
|
||||
i = find_token(document.header, r"\language" , 0)
|
||||
if i == -1:
|
||||
document.warning("Malformed document. No \\language defined!")
|
||||
return
|
||||
|
@ -59,7 +59,7 @@ def revert_Argument_to_TeX_brace(document, line, endline, n, nmax, environment,
|
||||
usage:
|
||||
revert_Argument_to_TeX_brace(document, LineOfBegin, LineOfEnd, StartArgument, EndArgument, isEnvironment, isOpt)
|
||||
LineOfBegin is the line of the \\begin_layout or \\begin_inset statement
|
||||
LineOfEnd is the line of the \end_layout or \end_inset statement, if "0" is given, the end of the file is used instead
|
||||
LineOfEnd is the line of the \\end_layout or \\end_inset statement, if "0" is given, the end of the file is used instead
|
||||
StartArgument is the number of the first argument that needs to be converted
|
||||
EndArgument is the number of the last argument that needs to be converted or the last defined one
|
||||
isEnvironment must be true, if the layout is for a LaTeX environment
|
||||
@ -1060,7 +1060,7 @@ def convert_table_rotation(document):
|
||||
|
||||
|
||||
def convert_listoflistings(document):
|
||||
'Convert ERT \lstlistoflistings to TOC lstlistoflistings inset'
|
||||
r'Convert ERT \lstlistoflistings to TOC lstlistoflistings inset'
|
||||
# We can support roundtrip because the command is so simple
|
||||
i = 0
|
||||
while True:
|
||||
|
@ -39,7 +39,7 @@ from parser_tools import (check_token, del_complete_lines,
|
||||
# Private helper functions
|
||||
|
||||
def revert_Argument_to_TeX_brace(document, line, endline, n, nmax, environment, opt, nolastopt):
|
||||
"""
|
||||
r"""
|
||||
Reverts an InsetArgument to TeX-code
|
||||
usage:
|
||||
revert_Argument_to_TeX_brace(document, LineOfBegin, LineOfEnd, StartArgument, EndArgument, isEnvironment, isOpt, notLastOpt)
|
||||
@ -599,7 +599,7 @@ def revert_question_env(document):
|
||||
|
||||
document.body[i : i + 1] = ["\\begin_layout Standard", ""] + begcmd
|
||||
|
||||
add_to_preamble(document, "\\providecommand{\questionname}{Question}")
|
||||
add_to_preamble(document, "\\providecommand{\\questionname}{Question}")
|
||||
|
||||
if starred:
|
||||
add_to_preamble(document, "\\theoremstyle{plain}\n" \
|
||||
@ -1354,7 +1354,7 @@ def revert_jss(document):
|
||||
if document.textclass != "jss":
|
||||
return
|
||||
|
||||
# at first revert the inset layouts because
|
||||
# at first revert the inset layouts because
|
||||
# they can be part of the In_Preamble layouts
|
||||
il_dict = {
|
||||
"Pkg" : "pkg",
|
||||
@ -2045,7 +2045,7 @@ def revert_moderncv_1(document):
|
||||
i += 1
|
||||
continue
|
||||
content = lyx2latex(document, document.body[i:j + 1])
|
||||
add_to_preamble(document, ["\\setlength{\hintscolumnwidth}{" + content + "}"])
|
||||
add_to_preamble(document, ["\\setlength{\\hintscolumnwidth}{" + content + "}"])
|
||||
del document.body[i:j + 1]
|
||||
# now change the new styles to the obsolete ones
|
||||
# \name
|
||||
@ -2383,7 +2383,7 @@ def revert_solution(document):
|
||||
add_to_preamble(document, "\\%s{%s}[thm]{\\protect\\solutionname}" % \
|
||||
(theoremName, LaTeXName))
|
||||
|
||||
add_to_preamble(document, "\\providecommand{\solutionname}{Solution}")
|
||||
add_to_preamble(document, "\\providecommand{\\solutionname}{Solution}")
|
||||
i = j
|
||||
|
||||
|
||||
|
@ -173,7 +173,7 @@ beamer_article_styles = [
|
||||
"Preamble",
|
||||
" \\usepackage{beamerarticle,pgf}",
|
||||
" % this default might be overridden by plain title style",
|
||||
" \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
|
||||
" \\newcommand\\makebeamertitle{\\frame{\\maketitle}}%",
|
||||
" \\AtBeginDocument{",
|
||||
" \\let\\origtableofcontents=\\tableofcontents",
|
||||
" \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
|
||||
@ -1627,12 +1627,12 @@ def convert_dashligatures(document):
|
||||
continue
|
||||
|
||||
# literal dash followed by a non-white-character or no-break space:
|
||||
if re.search(u"[\u2013\u2014]([\S\u00A0\u202F\u2060]|$)",
|
||||
if re.search(u"[\u2013\u2014]([\\S\u00A0\u202F\u2060]|$)",
|
||||
line, flags=re.UNICODE):
|
||||
has_literal_dashes = True
|
||||
# ligature dash followed by non-white-char or no-break space on next line:
|
||||
if (re.search(r"(\\twohyphens|\\threehyphens)", line) and
|
||||
re.match(u"[\S\u00A0\u202F\u2060]", lines[i+1], flags=re.UNICODE)):
|
||||
re.match(u"[\\S\u00A0\u202F\u2060]", lines[i+1], flags=re.UNICODE)):
|
||||
has_ligature_dashes = True
|
||||
if has_literal_dashes and has_ligature_dashes:
|
||||
# TODO: insert a warning note in the document?
|
||||
@ -1845,7 +1845,7 @@ allowbreak_emulation = [r"\begin_inset space \hspace{}",
|
||||
r""]
|
||||
|
||||
def convert_allowbreak(document):
|
||||
" Zero widths Space-inset -> \SpecialChar allowbreak. "
|
||||
r" Zero widths Space-inset -> \SpecialChar allowbreak. "
|
||||
lines = document.body
|
||||
i = find_complete_lines(lines, allowbreak_emulation, 2)
|
||||
while i != -1:
|
||||
@ -1854,7 +1854,7 @@ def convert_allowbreak(document):
|
||||
|
||||
|
||||
def revert_allowbreak(document):
|
||||
" \SpecialChar allowbreak -> Zero widths Space-inset. "
|
||||
r" \SpecialChar allowbreak -> Zero widths Space-inset. "
|
||||
i = 1
|
||||
lines = document.body
|
||||
while i < len(lines):
|
||||
|
@ -752,7 +752,7 @@ def revert_floatalignment(document):
|
||||
i += 1
|
||||
|
||||
def revert_tuftecite(document):
|
||||
"""Revert \cite commands in tufte classes"""
|
||||
r"""Revert \cite commands in tufte classes"""
|
||||
|
||||
tufte = ["tufte-book", "tufte-handout"]
|
||||
if document.textclass not in tufte:
|
||||
@ -1223,7 +1223,7 @@ def revert_dateinfo(document):
|
||||
if len(datecomps) > 1:
|
||||
argv = datecomps[0]
|
||||
isodate = datecomps[1]
|
||||
m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
|
||||
m = re.search(r'(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
|
||||
if m:
|
||||
dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
|
||||
# FIXME if we had the path to the original document (not the one in the tmp dir),
|
||||
@ -1403,11 +1403,11 @@ def revert_timeinfo(document):
|
||||
if len(timecomps) > 1:
|
||||
argv = timecomps[0]
|
||||
isotime = timecomps[1]
|
||||
m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
|
||||
m = re.search(r'(\d\d):(\d\d):(\d\d)', isotime)
|
||||
if m:
|
||||
tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
|
||||
else:
|
||||
m = re.search('(\d\d):(\d\d)', isotime)
|
||||
m = re.search(r'(\d\d):(\d\d)', isotime)
|
||||
if m:
|
||||
tme = time(int(m.group(1)), int(m.group(2)))
|
||||
# FIXME if we had the path to the original document (not the one in the tmp dir),
|
||||
@ -1875,7 +1875,7 @@ def revert_new_languages(document):
|
||||
"korean": ("", "korean"),
|
||||
}
|
||||
if document.language in new_languages:
|
||||
used_languages = set((document.language, ))
|
||||
used_languages = {document.language}
|
||||
else:
|
||||
used_languages = set()
|
||||
i = 0
|
||||
@ -4100,7 +4100,7 @@ def revert_branch_darkcols(document):
|
||||
break
|
||||
k = find_token(document.header, "\\color", i, j)
|
||||
if k != -1:
|
||||
m = re.search('\\\\color (\S+) (\S+)', document.header[k])
|
||||
m = re.search('\\\\color (\\S+) (\\S+)', document.header[k])
|
||||
if m:
|
||||
document.header[k] = "\\color " + m.group(1)
|
||||
i += 1
|
||||
@ -4222,7 +4222,7 @@ def revert_vcolumns2(document):
|
||||
el = find_token(document.body, '\\strikeout on', flt, elt)
|
||||
if el != -1:
|
||||
extralines.append("\\strikeout default")
|
||||
document.body[elt:elt+1] = extralines + put_cmd_in_ert("\\end{cellvarwidth}") + ["\end_layout"]
|
||||
document.body[elt:elt+1] = extralines + put_cmd_in_ert("\\end{cellvarwidth}") + [r"\end_layout"]
|
||||
parlang = -1
|
||||
for q in range(flt, elt):
|
||||
if document.body[q] != "" and document.body[q][0] != "\\":
|
||||
|
@ -121,8 +121,8 @@ find_end_of_layout(lines, i):
|
||||
find_end_of_sequence(lines, i):
|
||||
Find the end of the sequence of layouts of the same kind.
|
||||
Considers nesting. If the last paragraph in sequence is nested,
|
||||
the position of the last \end_deeper is returned, else
|
||||
the position of the last \end_layout.
|
||||
the position of the last \\end_deeper is returned, else
|
||||
the position of the last \\end_layout.
|
||||
|
||||
is_in_inset(lines, i, inset, default=(-1,-1)):
|
||||
Check if line i is in an inset of the given type.
|
||||
@ -139,7 +139,7 @@ is_in_inset(lines, i, inset, default=(-1,-1)):
|
||||
|
||||
get_containing_inset(lines, i):
|
||||
Finds out what kind of inset line i is within. Returns a
|
||||
list containing what follows \begin_inset on the line
|
||||
list containing what follows \\begin_inset on the line
|
||||
on which the inset begins, plus the starting and ending line.
|
||||
Returns False on any kind of error or if it isn't in an inset.
|
||||
So get_containing_inset(document.body, i) might return:
|
||||
@ -470,7 +470,7 @@ def set_bool_value(lines, token, value, start=0, end=0):
|
||||
|
||||
|
||||
def get_option_value(line, option):
|
||||
rx = option + '\s*=\s*"([^"]+)"'
|
||||
rx = option + r'\s*=\s*"([^"]+)"'
|
||||
rx = re.compile(rx)
|
||||
m = rx.search(line)
|
||||
if not m:
|
||||
@ -479,12 +479,12 @@ def get_option_value(line, option):
|
||||
|
||||
|
||||
def set_option_value(line, option, value):
|
||||
rx = '(' + option + '\s*=\s*")[^"]+"'
|
||||
rx = '(' + option + r'\s*=\s*")[^"]+"'
|
||||
rx = re.compile(rx)
|
||||
m = rx.search(line)
|
||||
if not m:
|
||||
return line
|
||||
return re.sub(rx, '\g<1>' + value + '"', line)
|
||||
return re.sub(rx, r'\g<1>' + value + '"', line)
|
||||
|
||||
|
||||
def del_token(lines, token, start=0, end=0):
|
||||
@ -612,7 +612,7 @@ def is_in_inset(lines, i, inset, default=(-1,-1)):
|
||||
def get_containing_inset(lines, i):
|
||||
'''
|
||||
Finds out what kind of inset line i is within. Returns a
|
||||
list containing (i) what follows \begin_inset on the line
|
||||
list containing (i) what follows \\begin_inset on the line
|
||||
on which the inset begins, plus the starting and ending line.
|
||||
Returns False on any kind of error or if it isn't in an inset.
|
||||
'''
|
||||
|
Loading…
Reference in New Issue
Block a user