# This file is part of lyx2lyx # -*- coding: utf-8 -*- # Copyright (C) 2006 José Matos # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ Convert files to the file format generated by lyx 0.10""" def regularise_header(document): " Put each entry in header into a separate line. " i = 0 while i < len(document.header): line = document.header[i] if len(line.split('\\')) > 1: tmp = [ '\\'+ token.strip() for token in line.split('\\')][1:] document.header[i: i+1] = tmp i += len(tmp) i += 1 def find_next_space(line, j): """ Return position of next space or backslash, which one comes first, starting from position j, if none exists returns last position in line (+1).""" space_pos = line.find(' ', j) if space_pos == -1: space_pos = len(line) bksl_pos = line.find('\\', j) if bksl_pos == -1: bksl_pos = len(line) return min(space_pos, bksl_pos) def regularise_body(document): """ Place tokens starting with a backslash into a separate line. """ getline_tokens = ["added_space_bottom", "added_space_top", "align", "layout", "fill_bottom", "fill_top", "labelwidthstring", "pagebreak_top", "pagebreak_bottom", "noindent"] noargs_tokens = ["backslash", "begin_deeper", "end_deeper", "end_float", "end_inset", "hfill", "newline", "protected_separator"] onearg_tokens = ["bar", "begin_float", "family", "latex", "shape", "size", "series", "cursor"] i = 0 while i < len(document.body): line = document.body[i] j = 0 new_block = [] while j < len(line): k = line.find('\\', j) if k == -1: new_block += [line[j:]] break if k != j: #document.warning("j=%d\tk=%d\t#%s#%s#" % (j,k,line,line[j: k])) new_block += [line[j: k]] j = k k = find_next_space(line, j+1) token = line[j+1:k] # These tokens take the rest of the line if token in getline_tokens: #document.warning("getline_token:%s\tj=%d\t\t#%s#%s#" % (token,j,line,line[j:])) new_block += [line[j:]] break # These tokens take no arguments if token in noargs_tokens: new_block += [line[j:k]] j = k continue # These tokens take one argument if token in onearg_tokens: k = find_next_space(line, k + 1) new_block += [line[j:k]] j = k continue # Special treatment for insets if token in ["begin_inset"]: l = find_next_space(line, k + 1) inset = line[k+1: l] if inset == "Latex": new_block += [line[j:l]] j = l continue if inset in ["LatexCommand", "LatexDel", "Label", "Figure", "Formula"]: new_block += [line[j:]] break if inset == "Quotes": l = find_next_space(line, l + 1) new_block += [line[j:l]] j = l continue document.warning("unkown inset %s" % inset) assert(False) # We are inside a latex inset, pass the text verbatim new_block += [line[j:]] break document.body[i: i+1] = new_block i += len(new_block) supported_versions = ["0.10.%d" % i for i in range(8)] + ["0.10"] convert = [[210, [regularise_header, regularise_body]]] revert = [] if __name__ == "__main__": pass