mirror of
https://git.lyx.org/repos/lyx.git
synced 2024-09-19 22:40:26 +00:00
generate_enconding_info.py parses lib/languages and generates lyx2lyx_lang.py
as a python dictionary, ready to be used by other python modules. git-svn-id: svn://svn.lyx.org/lyx/lyx-devel/trunk@14557 a592a061-630c-0410-9148-cb99ea01b6c8
This commit is contained in:
parent
de0c8aef7c
commit
9e832550ad
62
lib/lyx2lyx/generate_enconding_info.py
Normal file
62
lib/lyx2lyx/generate_enconding_info.py
Normal file
@ -0,0 +1,62 @@
|
||||
# This file is part of lyx2lyx
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2006 José Matos <jamatos@lyx.org>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
""" This module parses lib/languages and prints it as a python
|
||||
dictionary, ready to use by other python modules"""
|
||||
|
||||
import pprint
|
||||
|
||||
def parse_line(line):
|
||||
" Parse line from languages and return it as a list. "
|
||||
j = 0
|
||||
tmp = []
|
||||
while j< len(line):
|
||||
token = line[j:].split()[0]
|
||||
if not token:
|
||||
break
|
||||
if token[0] != '"':
|
||||
tmp.append(token)
|
||||
j += len(token) + 1
|
||||
elif line[j+1:].find('"') != -1:
|
||||
k = line.find('"', j + 1)
|
||||
tmp.append(line[j+1:k])
|
||||
j = k + 1
|
||||
else:
|
||||
tmp.append(line[j+1:])
|
||||
break
|
||||
|
||||
while j < len(line) and line[j].isspace():
|
||||
j += 1
|
||||
|
||||
return tmp
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
lines = open("../languages", "rb")
|
||||
lang = {}
|
||||
for line in lines:
|
||||
if line[:1] != '#':
|
||||
tmp = parse_line(line[:-1])
|
||||
lang[tmp[0]] = tmp[1:]
|
||||
|
||||
|
||||
print "# This file is generated by generate_incoding_info.py from lib/languages file."
|
||||
print "# Do not change this file directly."
|
||||
print
|
||||
print "lang = ",
|
||||
pprint.pprint(lang)
|
101
lib/lyx2lyx/lyx2lyx_lang.py
Normal file
101
lib/lyx2lyx/lyx2lyx_lang.py
Normal file
@ -0,0 +1,101 @@
|
||||
# This file is generated by generate_incoding_info.py from lib/languages file.
|
||||
# Do not change this file directly.
|
||||
|
||||
lang = {'afrikaans': ['afrikaans', 'Afrikaans', 'false', 'iso8859-1', 'af_ZA', ''],
|
||||
'american': ['american', 'American', 'false', 'iso8859-1', 'en_US', ''],
|
||||
'arabic': ['arabic', 'Arabic', 'true', 'iso8859-6', 'ar_SA', ''],
|
||||
'austrian': ['austrian', 'Austrian', 'false', 'iso8859-1', 'de_AT', ''],
|
||||
'bahasa': ['bahasa', 'Bahasa', 'false', 'iso8859-1', 'in_ID', ''],
|
||||
'basque': ['basque', 'Basque', 'false', 'iso8859-1', 'eu_ES', ''],
|
||||
'belarusian': ['belarusian', 'Belarusian', 'false', 'cp1251', 'be_BY', ''],
|
||||
'brazil': ['brazil',
|
||||
'Portuguese (Brazil)',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'pt_BR',
|
||||
''],
|
||||
'breton': ['breton', 'Breton', 'false', 'iso8859-1', 'br_FR', ''],
|
||||
'british': ['british', 'British', 'false', 'iso8859-1', 'en_GB', ''],
|
||||
'bulgarian': ['bulgarian', 'Bulgarian', 'false', 'cp1251', 'bg_BG', ''],
|
||||
'canadian': ['canadian', 'Canadian', 'false', 'iso8859-1', 'en_CA', ''],
|
||||
'canadien': ['canadien',
|
||||
'French Canadian',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'fr_CA',
|
||||
''],
|
||||
'catalan': ['catalan', 'Catalan', 'false', 'iso8859-1', 'ca_ES', ''],
|
||||
'croatian': ['croatian', 'Croatian', 'false', 'iso8859-2', 'hr_HR', ''],
|
||||
'czech': ['czech', 'Czech', 'false', 'iso8859-2', 'cs_CZ', ''],
|
||||
'danish': ['danish', 'Danish', 'false', 'iso8859-1', 'da_DK', ''],
|
||||
'dutch': ['dutch', 'Dutch', 'false', 'iso8859-1', 'nl_NL', ''],
|
||||
'english': ['english', 'English', 'false', 'iso8859-1', 'en_US', ''],
|
||||
'esperanto': ['esperanto', 'Esperanto', 'false', 'iso8859-3', 'eo', ''],
|
||||
'estonian': ['estonian', 'Estonian', 'false', 'iso8859-1', 'et_EE', ''],
|
||||
'finnish': ['finnish', 'Finnish', 'false', 'iso8859-1', 'fi_FI', ''],
|
||||
'french': ['french',
|
||||
'French',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'fr_FR',
|
||||
'\\addto\\extrasfrench{\\providecommand{\\og}{\\leavevmode\\flqq~}\\providecommand{\\fg}{\\ifdim\\lastskip>\\z@\\unskip\\fi~\\frqq}}'],
|
||||
'galician': ['galician', 'Galician', 'false', 'iso8859-1', 'gl_ES', ''],
|
||||
'german': ['german', 'German', 'false', 'iso8859-1', 'de_DE', ''],
|
||||
'greek': ['greek', 'Greek', 'false', 'iso8859-7', 'el_GR', ''],
|
||||
'hebrew': ['hebrew', 'Hebrew', 'true', 'cp1255', 'he_IL', ''],
|
||||
'icelandic': ['icelandic', 'Icelandic', 'false', 'iso8859-1', 'is_IS', ''],
|
||||
'irish': ['irish', 'Irish', 'false', 'iso8859-1', 'ga_IE', ''],
|
||||
'italian': ['italian', 'Italian', 'false', 'iso8859-1', 'it_IT', ''],
|
||||
'kazakh': ['kazakh', 'Kazakh', 'false', 'pt154', 'kk_KZ', ''],
|
||||
'latvian': ['latvian', 'Latvian', 'false', 'iso8859-13', 'lv_LV', ''],
|
||||
'lithuanian': ['lithuanian',
|
||||
'Lithuanian',
|
||||
'false',
|
||||
'iso8859-13',
|
||||
'lt_LT',
|
||||
''],
|
||||
'magyar': ['magyar', 'Magyar', 'false', 'iso8859-2', 'hu_HU', ''],
|
||||
'naustrian': ['naustrian',
|
||||
'Austrian (new spelling)',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'de_AT',
|
||||
''],
|
||||
'ngerman': ['ngerman',
|
||||
'German (new spelling)',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'de_DE',
|
||||
''],
|
||||
'norsk': ['norsk', 'Norsk', 'false', 'iso8859-1', 'no_NO', ''],
|
||||
'nynorsk': ['nynorsk', 'Nynorsk', 'false', 'iso8859-1', 'nn_NO', ''],
|
||||
'polish': ['polish', 'Polish', 'false', 'iso8859-2', 'pl_PL', ''],
|
||||
'portuges': ['portuges', 'Portugese', 'false', 'iso8859-1', 'pt_PT', ''],
|
||||
'romanian': ['romanian', 'Romanian', 'false', 'iso8859-2', 'ro_RO', ''],
|
||||
'russian': ['russian', 'Russian', 'false', 'koi8', 'ru_RU', ''],
|
||||
'scottish': ['scottish', 'Scottish', 'false', 'iso8859-1', 'gd_GB', ''],
|
||||
'serbian': ['croatian', 'Serbian', 'false', 'iso8859-5', 'sr_HR', ''],
|
||||
'serbocroatian': ['croatian',
|
||||
'Serbo-Croatian',
|
||||
'false',
|
||||
'iso8859-2',
|
||||
'sh_HR',
|
||||
''],
|
||||
'slovak': ['slovak', 'Slovak', 'false', 'iso8859-2', 'sk_SK', ''],
|
||||
'slovene': ['slovene', 'Slovene', 'false', 'iso8859-2', 'sl_SI', ''],
|
||||
'spanish': ['spanish',
|
||||
'Spanish',
|
||||
'false',
|
||||
'iso8859-1',
|
||||
'es_ES',
|
||||
'\\deactivatetilden'],
|
||||
'swedish': ['swedish', 'Swedish', 'false', 'iso8859-1', 'sv_SE', ''],
|
||||
'thai': ['thai',
|
||||
'Thai',
|
||||
'false',
|
||||
'tis620-0',
|
||||
'th_TH',
|
||||
'\\usepackage{thswitch}'],
|
||||
'turkish': ['turkish', 'Turkish', 'false', 'iso8859-9', 'tr_TR', ''],
|
||||
'ukrainian': ['ukrainian', 'Ukrainian', 'false', 'koi8-u', 'uk_UA', ''],
|
||||
'welsh': ['welsh', 'Welsh', 'false', 'iso8859-1', 'cy_GB', '']}
|
Loading…
Reference in New Issue
Block a user