lyx_mirror/lib/lyx2lyx/lyx_2_4.py
Juergen Spitzmueller 7efdf98fc8 Further extend Info insets:
* Add time type (time, modtime, fixtime)
* Add "name-noext" buffer type (file name w/o extension)
2018-08-07 12:14:45 +02:00

1099 lines
46 KiB
Python
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# -*- coding: utf-8 -*-
# This file is part of lyx2lyx
# Copyright (C) 2018 The LyX team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" Convert files to the file format generated by lyx 2.4"""
import re, string
import unicodedata
import sys, os
from datetime import (datetime, date, time)
# Uncomment only what you need to import, please.
from parser_tools import (count_pars_in_inset, find_end_of_inset, find_end_of_layout,
find_token, get_bool_value, get_option_value, get_value, get_quoted_value)
# del_token, del_value, del_complete_lines,
# find_complete_lines, find_end_of,
# find_re, find_substring, find_token_backwards,
# get_containing_inset, get_containing_layout,
# is_in_inset, set_bool_value
# find_tokens, find_token_exact, check_token
from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble)
# revert_font_attrs, insert_to_preamble, latex_length
# get_ert, lyx2latex, lyx2verbatim, length_in_bp, convert_info_insets
# revert_flex_inset, hex2ratio, str2bool
####################################################################
# Private helper functions
###############################################################################
###
### Conversion and reversion routines
###
###############################################################################
def removeFrontMatterStyles(document):
" Remove styles Begin/EndFromatter"
layouts = ['BeginFrontmatter', 'EndFrontmatter']
for layout in layouts:
i = 0
while True:
i = find_token(document.body, '\\begin_layout ' + layout, i)
if i == -1:
break
j = find_end_of_layout(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
i += 1
continue
while i > 0 and document.body[i-1].strip() == '':
i -= 1
while document.body[j+1].strip() == '':
j = j + 1
document.body[i:j+1] = ['']
def addFrontMatterStyles(document):
" Use styles Begin/EndFrontmatter for elsarticle"
def insertFrontmatter(prefix, line):
above = line
while above > 0 and document.body[above-1].strip() == '':
above -= 1
below = line
while document.body[below].strip() == '':
below += 1
document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
'\\begin_inset Note Note',
'status open', '',
'\\begin_layout Plain Layout',
'Keep this empty!',
'\\end_layout', '',
'\\end_inset', '', '',
'\\end_layout', '']
if document.textclass == "elsarticle":
layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
first = -1
last = -1
for layout in layouts:
i = 0
while True:
i = find_token(document.body, '\\begin_layout ' + layout, i)
if i == -1:
break
k = find_end_of_layout(document.body, i)
if k == -1:
document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
i += 1;
continue
if first == -1 or i < first:
first = i
if last == -1 or last <= k:
last = k+1
i = k+1
if first == -1:
return
insertFrontmatter('End', last)
insertFrontmatter('Begin', first)
def convert_lst_literalparam(document):
" Add param literal to include inset "
i = 0
while True:
i = find_token(document.body, '\\begin_inset CommandInset include', i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
i += 1
continue
while i < j and document.body[i].strip() != '':
i += 1
document.body.insert(i, "literal \"true\"")
def revert_lst_literalparam(document):
" Remove param literal from include inset "
i = 0
while True:
i = find_token(document.body, '\\begin_inset CommandInset include', i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
i += 1
continue
k = find_token(document.body, 'literal', i, j)
if k == -1:
i += 1
continue
del document.body[k]
def revert_paratype(document):
" Revert ParaType font definitions to LaTeX "
if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
preamble = ""
i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
i2 = find_token(document.header, "\\font_sans \"default\"", 0)
i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
sfval = get_value(document.header, "\\font_sf_scale", 0)
# cutoff " 100"
sfval = sfval[:-4]
sfoption = ""
if sfval != "100":
sfoption = "scaled=" + format(float(sfval) / 100, '.2f')
k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
ttval = get_value(document.header, "\\font_tt_scale", 0)
# cutoff " 100"
ttval = ttval[:-4]
ttoption = ""
if ttval != "100":
ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
if i1 != -1 and i2 != -1 and i3!= -1:
add_to_preamble(document, ["\\usepackage{paratype}"])
else:
if i1!= -1:
add_to_preamble(document, ["\\usepackage{PTSerif}"])
document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
if j!= -1:
if sfoption != "":
add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
else:
add_to_preamble(document, ["\\usepackage{PTSans}"])
document.header[j] = document.header[j].replace("PTSans-TLF", "default")
if k!= -1:
if ttoption != "":
add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
else:
add_to_preamble(document, ["\\usepackage{PTMono}"])
document.header[k] = document.header[k].replace("PTMono-TLF", "default")
def revert_xcharter(document):
" Revert XCharter font definitions to LaTeX "
i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
if i == -1:
return
# replace unsupported font setting
document.header[i] = document.header[i].replace("xcharter", "default")
# no need for preamble code with system fonts
if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
# transfer old style figures setting to package options
j = find_token(document.header, "\\font_osf true")
if j != -1:
options = "[osf]"
document.header[j] = "\\font_osf false"
else:
options = ""
if i != -1:
add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
def revert_lscape(document):
" Reverts the landscape environment (Landscape module) to TeX-code "
if not "landscape" in document.get_module_list():
return
i = 0
while True:
i = find_token(document.body, "\\begin_inset Flex Landscape", i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Landscape inset")
i += 1
continue
if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
add_to_preamble(document, ["\\usepackage{afterpage}"])
else:
document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
add_to_preamble(document, ["\\usepackage{pdflscape}"])
# no need to reset i
def convert_fontenc(document):
" Convert default fontenc setting "
i = find_token(document.header, "\\fontencoding global", 0)
if i == -1:
return
document.header[i] = document.header[i].replace("global", "auto")
def revert_fontenc(document):
" Revert default fontenc setting "
i = find_token(document.header, "\\fontencoding auto", 0)
if i == -1:
return
document.header[i] = document.header[i].replace("auto", "global")
def revert_nospellcheck(document):
" Remove nospellcheck font info param "
i = 0
while True:
i = find_token(document.body, '\\nospellcheck', i)
if i == -1:
return
del document.body[i]
def revert_floatpclass(document):
" Remove float placement params 'document' and 'class' "
i = 0
i = find_token(document.header, "\\float_placement class", 0)
if i != -1:
del document.header[i]
i = 0
while True:
i = find_token(document.body, '\\begin_inset Float', i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
k = find_token(document.body, 'placement class', i, i + 2)
if k == -1:
k = find_token(document.body, 'placement document', i, i + 2)
if k != -1:
del document.body[k]
i = j
continue
del document.body[k]
def revert_floatalignment(document):
" Remove float alignment params "
i = 0
i = find_token(document.header, "\\float_alignment", 0)
galignment = ""
if i != -1:
galignment = get_value(document.header, "\\float_alignment", i)
del document.header[i]
i = 0
while True:
i = find_token(document.body, '\\begin_inset Float', i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
i += 1
k = find_token(document.body, 'alignment', i, i + 4)
if k == -1:
i = j
continue
alignment = get_value(document.body, "alignment", k)
if alignment == "document":
alignment = galignment
del document.body[k]
l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
if l == -1:
document.warning("Can't find float layout!")
i = j
continue
alcmd = []
if alignment == "left":
alcmd = put_cmd_in_ert("\\raggedright{}")
elif alignment == "center":
alcmd = put_cmd_in_ert("\\centering{}")
elif alignment == "right":
alcmd = put_cmd_in_ert("\\raggedleft{}")
if len(alcmd) > 0:
document.body[l+1:l+1] = alcmd
i = j
def revert_tuftecite(document):
" Revert \cite commands in tufte classes "
tufte = ["tufte-book", "tufte-handout"]
if document.textclass not in tufte:
return
i = 0
while (True):
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of citation inset at line %d!!" %(i))
i += 1
continue
k = find_token(document.body, "LatexCommand", i, j)
if k == -1:
document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
i = j + 1
continue
cmd = get_value(document.body, "LatexCommand", k)
if cmd != "cite":
i = j + 1
continue
pre = get_quoted_value(document.body, "before", i, j)
post = get_quoted_value(document.body, "after", i, j)
key = get_quoted_value(document.body, "key", i, j)
if not key:
document.warning("Citation inset at line %d does not have a key!" %(i))
key = "???"
# Replace command with ERT
res = "\\cite"
if pre:
res += "[" + pre + "]"
if post:
res += "[" + post + "]"
elif pre:
res += "[]"
res += "{" + key + "}"
document.body[i:j+1] = put_cmd_in_ert([res])
i = j + 1
def revert_stretchcolumn(document):
" We remove the column varwidth flags or everything else will become a mess. "
i = 0
while True:
i = find_token(document.body, "\\begin_inset Tabular", i)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of tabular.")
continue
for k in range(i, j):
if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
document.body[k] = document.body[k].replace(' varwidth="true"', '')
i = i + 1
def revert_vcolumns(document):
" Revert standard columns with line breaks etc. "
i = 0
needvarwidth = False
needarray = False
try:
while True:
i = find_token(document.body, "\\begin_inset Tabular", i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Could not find end of tabular.")
i += 1
continue
# Collect necessary column information
m = i + 1
nrows = int(document.body[i+1].split('"')[3])
ncols = int(document.body[i+1].split('"')[5])
col_info = []
for k in range(ncols):
m = find_token(document.body, "<column", m)
width = get_option_value(document.body[m], 'width')
varwidth = get_option_value(document.body[m], 'varwidth')
alignment = get_option_value(document.body[m], 'alignment')
special = get_option_value(document.body[m], 'special')
col_info.append([width, varwidth, alignment, special, m])
# Now parse cells
m = i + 1
lines = []
for row in range(nrows):
for col in range(ncols):
m = find_token(document.body, "<cell", m)
multicolumn = get_option_value(document.body[m], 'multicolumn')
multirow = get_option_value(document.body[m], 'multirow')
width = get_option_value(document.body[m], 'width')
rotate = get_option_value(document.body[m], 'rotate')
# Check for: linebreaks, multipars, non-standard environments
begcell = m
endcell = find_token(document.body, "</cell>", begcell)
vcand = False
if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
vcand = True
elif count_pars_in_inset(document.body, begcell + 2) > 1:
vcand = True
elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
vcand = True
if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
needvarwidth = True
alignment = col_info[col][2]
col_line = col_info[col][4]
vval = ""
if alignment == "center":
vval = ">{\\centering}"
elif alignment == "left":
vval = ">{\\raggedright}"
elif alignment == "right":
vval = ">{\\raggedleft}"
if vval != "":
needarray = True
vval += "V{\\linewidth}"
document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
# ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
# with newlines, and we do not want that)
while True:
endcell = find_token(document.body, "</cell>", begcell)
linebreak = False
nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
if nl == -1:
nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
if nl == -1:
break
linebreak = True
nle = find_end_of_inset(document.body, nl)
del(document.body[nle:nle+1])
if linebreak:
document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
else:
document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
m += 1
i = j + 1
finally:
if needarray == True:
add_to_preamble(document, ["\\usepackage{array}"])
if needvarwidth == True:
add_to_preamble(document, ["\\usepackage{varwidth}"])
def revert_bibencoding(document):
" Revert bibliography encoding "
# Get cite engine
engine = "basic"
i = find_token(document.header, "\\cite_engine", 0)
if i == -1:
document.warning("Malformed document! Missing \\cite_engine")
else:
engine = get_value(document.header, "\\cite_engine", i)
# Check if biblatex
biblatex = False
if engine in ["biblatex", "biblatex-natbib"]:
biblatex = True
# Map lyx to latex encoding names
encodings = {
"utf8" : "utf8",
"utf8x" : "utf8x",
"armscii8" : "armscii8",
"iso8859-1" : "latin1",
"iso8859-2" : "latin2",
"iso8859-3" : "latin3",
"iso8859-4" : "latin4",
"iso8859-5" : "iso88595",
"iso8859-6" : "8859-6",
"iso8859-7" : "iso-8859-7",
"iso8859-8" : "8859-8",
"iso8859-9" : "latin5",
"iso8859-13" : "latin7",
"iso8859-15" : "latin9",
"iso8859-16" : "latin10",
"applemac" : "applemac",
"cp437" : "cp437",
"cp437de" : "cp437de",
"cp850" : "cp850",
"cp852" : "cp852",
"cp855" : "cp855",
"cp858" : "cp858",
"cp862" : "cp862",
"cp865" : "cp865",
"cp866" : "cp866",
"cp1250" : "cp1250",
"cp1251" : "cp1251",
"cp1252" : "cp1252",
"cp1255" : "cp1255",
"cp1256" : "cp1256",
"cp1257" : "cp1257",
"koi8-r" : "koi8-r",
"koi8-u" : "koi8-u",
"pt154" : "pt154",
"utf8-platex" : "utf8",
"ascii" : "ascii"
}
i = 0
bibresources = []
while (True):
i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Can't find end of bibtex inset at line %d!!" %(i))
i += 1
continue
encoding = get_quoted_value(document.body, "encoding", i, j)
if not encoding:
i += 1
continue
# remove encoding line
k = find_token(document.body, "encoding", i, j)
if k != -1:
del document.body[k]
# Re-find inset end line
j = find_end_of_inset(document.body, i)
if biblatex:
biblio_options = ""
h = find_token(document.header, "\\biblio_options", 0)
if h != -1:
biblio_options = get_value(document.header, "\\biblio_options", h)
if not "bibencoding" in biblio_options:
document.header[h] += ",bibencoding=%s" % encodings[encoding]
else:
bs = find_token(document.header, "\\biblatex_bibstyle", 0)
if bs == -1:
# this should not happen
document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
else:
document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
else:
document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
i = j + 1
def convert_vcsinfo(document):
" Separate vcs Info inset from buffer Info inset. "
types = {
"vcs-revision" : "revision",
"vcs-tree-revision" : "tree-revision",
"vcs-author" : "author",
"vcs-time" : "time",
"vcs-date" : "date"
}
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
i = i + 1
continue
tp = find_token(document.body, 'type', i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv != "buffer":
i = i + 1
continue
arg = find_token(document.body, 'arg', i, j)
argv = get_quoted_value(document.body, "arg", arg)
if argv not in list(types.keys()):
i = i + 1
continue
document.body[tp] = "type \"vcs\""
document.body[arg] = "arg \"" + types[argv] + "\""
i = i + 1
def revert_vcsinfo(document):
" Merge vcs Info inset to buffer Info inset. "
args = ["revision", "tree-revision", "author", "time", "date" ]
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
i = i + 1
continue
tp = find_token(document.body, 'type', i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv != "vcs":
i = i + 1
continue
arg = find_token(document.body, 'arg', i, j)
argv = get_quoted_value(document.body, "arg", arg)
if argv not in args:
document.warning("Malformed Info inset. Invalid vcs arg.")
i = i + 1
continue
document.body[tp] = "type \"buffer\""
document.body[arg] = "arg \"vcs-" + argv + "\""
i = i + 1
def revert_dateinfo(document):
" Revert date info insets to static text. "
# FIXME This currently only considers the main language and uses the system locale
# Ideally, it should honor context languages and switch the locale accordingly.
# The date formats for each language using strftime syntax:
# long, short, loclong, locmedium, locshort
dateformats = {
"afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
"albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"arabic_arabi" : ["%%d %%Y", "%d/%m/%Y", "%d %%Y", "%d/%m/%Y", "%d/%m/%Y"],
"arabic_arabtex" : ["%%d %%Y", "%d/%m/%Y", "%d %%Y", "%d/%m/%Y", "%d/%m/%Y"],
"armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %%Y", "%d %%Y", "%d/%m/%Y"],
"asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
"australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
"belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
"brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
"breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
"british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
"canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
"canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
"catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
"chinese-simplified" : ["%Y年%m月%d%A", "%Y/%m/%d", "%Y年%m月%d", "%Y-%m-%d", "%y-%m-%d"],
"chinese-traditional" : ["%Y年%m月%d%A", "%Y/%m/%d", "%Y年%m月%d", "%Y年%m月%d", "%y年%m月%d"],
"coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
"czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
"danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
"divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
"dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
"estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
"finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
"galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
"georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
"irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
"italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
"japanese" : ["%Y年%m月%d%A", "%Y/%m/%d", "%Y年%m月%d", "%Y/%m/%d", "%y/%m/%d"],
"japanese-cjk" : ["%Y年%m月%d%A", "%Y/%m/%d", "%Y年%m月%d", "%Y/%m/%d", "%y/%m/%d"],
"kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
"kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
"khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
"korean" : ["%Y년 %m월 %d%A", "%y. %m. %d.", "%Y년 %m월 %d", "%Y. %m. %d.", "%y. %m. %d."],
"kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
"lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
"latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
"lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
"lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
"marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
"naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
"polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
"romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
"samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
"sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
"spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
"spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
"swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
"syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
"thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
"tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
"turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
"turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
"ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
"uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
"urdu" : ["%%d %%Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
"vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
"welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
}
types = ["date", "fixdate", "moddate" ]
i = 0
i = find_token(document.header, "\\language", 0)
if i == -1:
# this should not happen
document.warning("Malformed LyX document! No \\language header found!")
return
lang = get_value(document.header, "\\language", i)
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
i = i + 1
continue
tp = find_token(document.body, 'type', i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv not in types:
i = i + 1
continue
arg = find_token(document.body, 'arg', i, j)
argv = get_quoted_value(document.body, "arg", arg)
isodate = ""
dte = date.today()
if tpv == "fixdate":
datecomps = argv.split('@')
if len(datecomps) > 1:
argv = datecomps[0]
isodate = datecomps[1]
m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
if m:
dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
# FIXME if we had the path to the original document (not the one in the tmp dir),
# we could use the mtime.
# elif tpv == "moddate":
# dte = date.fromtimestamp(os.path.getmtime(document.dir))
result = ""
if argv == "ISO":
result = dte.isodate()
elif argv == "long":
result = dte.strftime(dateformats[lang][0])
elif argv == "short":
result = dte.strftime(dateformats[lang][1])
elif argv == "loclong":
result = dte.strftime(dateformats[lang][2])
elif argv == "locmedium":
result = dte.strftime(dateformats[lang][3])
elif argv == "locshort":
result = dte.strftime(dateformats[lang][4])
else:
fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
fmt = re.sub('[^\'%]d', '%d', fmt)
fmt = fmt.replace("'", "")
result = dte.strftime(fmt)
document.body[i : j+1] = result
i = i + 1
def revert_timeinfo(document):
" Revert time info insets to static text. "
# FIXME This currently only considers the main language and uses the system locale
# Ideally, it should honor context languages and switch the locale accordingly.
# Also, the time object is "naive", i.e., it does not know of timezones (%Z will
# be empty).
# The time formats for each language using strftime syntax:
# long, short
timeformats = {
"afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
"albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
"american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
"arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"armenian" : ["%H:%M:%S %Z", "%H:%M"],
"asturian" : ["%H:%M:%S %Z", "%H:%M"],
"australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"austrian" : ["%H:%M:%S %Z", "%H:%M"],
"bahasa" : ["%H.%M.%S %Z", "%H.%M"],
"bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"basque" : ["%H:%M:%S (%Z)", "%H:%M"],
"belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
"bosnian" : ["%H:%M:%S %Z", "%H:%M"],
"brazilian" : ["%H:%M:%S %Z", "%H:%M"],
"breton" : ["%H:%M:%S %Z", "%H:%M"],
"british" : ["%H:%M:%S %Z", "%H:%M"],
"bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
"canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"canadien" : ["%H:%M:%S %Z", "%H h %M"],
"catalan" : ["%H:%M:%S %Z", "%H:%M"],
"chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
"chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
"coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
"croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
"czech" : ["%H:%M:%S %Z", "%H:%M"],
"danish" : ["%H.%M.%S %Z", "%H.%M"],
"divehi" : ["%H:%M:%S %Z", "%H:%M"],
"dutch" : ["%H:%M:%S %Z", "%H:%M"],
"english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
"estonian" : ["%H:%M:%S %Z", "%H:%M"],
"farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
"finnish" : ["%H.%M.%S %Z", "%H.%M"],
"french" : ["%H:%M:%S %Z", "%H:%M"],
"friulan" : ["%H:%M:%S %Z", "%H:%M"],
"galician" : ["%H:%M:%S %Z", "%H:%M"],
"georgian" : ["%H:%M:%S %Z", "%H:%M"],
"german" : ["%H:%M:%S %Z", "%H:%M"],
"german-ch" : ["%H:%M:%S %Z", "%H:%M"],
"german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
"greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"hebrew" : ["%H:%M:%S %Z", "%H:%M"],
"hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"icelandic" : ["%H:%M:%S %Z", "%H:%M"],
"interlingua" : ["%H:%M:%S %Z", "%H:%M"],
"irish" : ["%H:%M:%S %Z", "%H:%M"],
"italian" : ["%H:%M:%S %Z", "%H:%M"],
"japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
"japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
"kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"kazakh" : ["%H:%M:%S %Z", "%H:%M"],
"khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
"kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
"lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
"latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
"latvian" : ["%H:%M:%S %Z", "%H:%M"],
"lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
"lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
"macedonian" : ["%H:%M:%S %Z", "%H:%M"],
"magyar" : ["%H:%M:%S %Z", "%H:%M"],
"marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"mongolian" : ["%H:%M:%S %Z", "%H:%M"],
"naustrian" : ["%H:%M:%S %Z", "%H:%M"],
"newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"ngerman" : ["%H:%M:%S %Z", "%H:%M"],
"norsk" : ["%H:%M:%S %Z", "%H:%M"],
"nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
"occitan" : ["%H:%M:%S %Z", "%H:%M"],
"piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
"polish" : ["%H:%M:%S %Z", "%H:%M"],
"polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"portuguese" : ["%H:%M:%S %Z", "%H:%M"],
"romanian" : ["%H:%M:%S %Z", "%H:%M"],
"romansh" : ["%H:%M:%S %Z", "%H:%M"],
"russian" : ["%H:%M:%S %Z", "%H:%M"],
"samin" : ["%H:%M:%S %Z", "%H:%M"],
"sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
"scottish" : ["%H:%M:%S %Z", "%H:%M"],
"serbian" : ["%H:%M:%S %Z", "%H:%M"],
"serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
"slovak" : ["%H:%M:%S %Z", "%H:%M"],
"slovene" : ["%H:%M:%S %Z", "%H:%M"],
"spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
"spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
"swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
"syriac" : ["%H:%M:%S %Z", "%H:%M"],
"tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
"telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
"tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"turkish" : ["%H:%M:%S %Z", "%H:%M"],
"turkmen" : ["%H:%M:%S %Z", "%H:%M"],
"ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
"uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
"urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
"vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
"welsh" : ["%H:%M:%S %Z", "%H:%M"]
}
types = ["time", "fixtime", "modtime" ]
i = 0
i = find_token(document.header, "\\language", 0)
if i == -1:
# this should not happen
document.warning("Malformed LyX document! No \\language header found!")
return
lang = get_value(document.header, "\\language", i)
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
i = i + 1
continue
tp = find_token(document.body, 'type', i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv not in types:
i = i + 1
continue
arg = find_token(document.body, 'arg', i, j)
argv = get_quoted_value(document.body, "arg", arg)
isotime = ""
dtme = datetime.now()
tme = dtme.time()
if tpv == "fixtime":
timecomps = argv.split('@')
if len(timecomps) > 1:
argv = timecomps[0]
isotime = timecomps[1]
m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
if m:
tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
else:
m = re.search('(\d\d):(\d\d)', isotime)
if m:
tme = time(int(m.group(1)), int(m.group(2)))
# FIXME if we had the path to the original document (not the one in the tmp dir),
# we could use the mtime.
# elif tpv == "moddate":
# dte = date.fromtimestamp(os.path.getmtime(document.dir))
result = ""
if argv == "ISO":
result = tme.isoformat()
elif argv == "long":
result = tme.strftime(timeformats[lang][0])
elif argv == "short":
result = tme.strftime(timeformats[lang][1])
else:
fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
fmt = fmt.replace("'", "")
result = dte.strftime(fmt)
document.body[i : j+1] = result
i = i + 1
def revert_namenoextinfo(document):
" Merge buffer Info inset type name-noext to name. "
i = 0
while True:
i = find_token(document.body, "\\begin_inset Info", i)
if i == -1:
return
j = find_end_of_inset(document.body, i + 1)
if j == -1:
document.warning("Malformed LyX document: Could not find end of Info inset.")
i = i + 1
continue
tp = find_token(document.body, 'type', i, j)
tpv = get_quoted_value(document.body, "type", tp)
if tpv != "buffer":
i = i + 1
continue
arg = find_token(document.body, 'arg', i, j)
argv = get_quoted_value(document.body, "arg", arg)
if argv != "name-noext":
i = i + 1
continue
document.body[arg] = "arg \"name\""
i = i + 1
##
# Conversion hub
#
supported_versions = ["2.4.0", "2.4"]
convert = [
[545, [convert_lst_literalparam]],
[546, []],
[547, []],
[548, []],
[549, []],
[550, [convert_fontenc]],
[551, []],
[552, []],
[553, []],
[554, []],
[555, []],
[556, []],
[557, [convert_vcsinfo]],
[558, [removeFrontMatterStyles]],
[559, []],
[560, []]
]
revert = [
[558, [revert_timeinfo, revert_namenoextinfo]],
[558, [revert_dateinfo]],
[557, [addFrontMatterStyles]],
[556, [revert_vcsinfo]],
[555, [revert_bibencoding]],
[554, [revert_vcolumns]],
[553, [revert_stretchcolumn]],
[552, [revert_tuftecite]],
[551, [revert_floatpclass, revert_floatalignment]],
[550, [revert_nospellcheck]],
[549, [revert_fontenc]],
[548, []],# dummy format change
[547, [revert_lscape]],
[546, [revert_xcharter]],
[545, [revert_paratype]],
[544, [revert_lst_literalparam]]
]
if __name__ == "__main__":
pass