2003-08-19 10:04:35 +00:00
|
|
|
/**
|
2007-04-26 04:53:06 +00:00
|
|
|
* \file Parser.cpp
|
2003-08-19 10:04:35 +00:00
|
|
|
* This file is part of LyX, the document processor.
|
|
|
|
* Licence details can be found in the file COPYING.
|
|
|
|
*
|
2008-11-14 16:16:45 +00:00
|
|
|
* \author André Pönitz
|
2003-08-19 10:04:35 +00:00
|
|
|
*
|
2003-08-23 00:17:00 +00:00
|
|
|
* Full author contact details are available in file CREDITS.
|
2003-08-19 10:04:35 +00:00
|
|
|
*/
|
|
|
|
|
2003-04-17 06:22:07 +00:00
|
|
|
#include <config.h>
|
|
|
|
|
2007-04-26 04:53:06 +00:00
|
|
|
#include "Parser.h"
|
2003-04-17 06:22:07 +00:00
|
|
|
|
2003-02-12 21:07:47 +00:00
|
|
|
#include <iostream>
|
2003-02-12 07:53:03 +00:00
|
|
|
|
2007-12-12 10:16:00 +00:00
|
|
|
using namespace std;
|
2006-10-21 00:16:43 +00:00
|
|
|
|
|
|
|
namespace lyx {
|
|
|
|
|
2003-02-12 11:09:22 +00:00
|
|
|
namespace {
|
2003-02-12 07:53:03 +00:00
|
|
|
|
|
|
|
CatCode theCatcode[256];
|
|
|
|
|
|
|
|
void catInit()
|
|
|
|
{
|
2008-11-15 19:30:58 +00:00
|
|
|
static bool init_done = false;
|
|
|
|
if (init_done)
|
|
|
|
return;
|
|
|
|
init_done = true;
|
|
|
|
|
2003-02-12 07:53:03 +00:00
|
|
|
fill(theCatcode, theCatcode + 256, catOther);
|
|
|
|
fill(theCatcode + 'a', theCatcode + 'z' + 1, catLetter);
|
|
|
|
fill(theCatcode + 'A', theCatcode + 'Z' + 1, catLetter);
|
|
|
|
|
2003-07-18 08:46:00 +00:00
|
|
|
theCatcode[int('\\')] = catEscape;
|
|
|
|
theCatcode[int('{')] = catBegin;
|
|
|
|
theCatcode[int('}')] = catEnd;
|
|
|
|
theCatcode[int('$')] = catMath;
|
|
|
|
theCatcode[int('&')] = catAlign;
|
2004-11-08 08:24:43 +00:00
|
|
|
theCatcode[int('\n')] = catNewline;
|
2003-07-18 08:46:00 +00:00
|
|
|
theCatcode[int('#')] = catParameter;
|
|
|
|
theCatcode[int('^')] = catSuper;
|
|
|
|
theCatcode[int('_')] = catSub;
|
2004-11-08 08:24:43 +00:00
|
|
|
theCatcode[0x7f] = catIgnore;
|
2003-07-18 08:46:00 +00:00
|
|
|
theCatcode[int(' ')] = catSpace;
|
|
|
|
theCatcode[int('\t')] = catSpace;
|
2004-11-08 08:24:43 +00:00
|
|
|
theCatcode[int('\r')] = catNewline;
|
2003-07-18 08:46:00 +00:00
|
|
|
theCatcode[int('~')] = catActive;
|
|
|
|
theCatcode[int('%')] = catComment;
|
2003-03-03 17:49:26 +00:00
|
|
|
|
|
|
|
// This is wrong!
|
2003-07-18 08:46:00 +00:00
|
|
|
theCatcode[int('@')] = catLetter;
|
2003-02-12 07:53:03 +00:00
|
|
|
}
|
|
|
|
|
2004-11-08 08:24:43 +00:00
|
|
|
/*!
|
|
|
|
* Translate a line ending to '\n'.
|
|
|
|
* \p c must have catcode catNewline, and it must be the last character read
|
|
|
|
* from \p is.
|
|
|
|
*/
|
2008-11-16 17:02:00 +00:00
|
|
|
char getNewline(idocstream & is, char c)
|
2004-11-08 08:24:43 +00:00
|
|
|
{
|
|
|
|
// we have to handle 3 different line endings:
|
|
|
|
// - UNIX (\n)
|
|
|
|
// - MAC (\r)
|
|
|
|
// - DOS (\r\n)
|
|
|
|
if (c == '\r') {
|
|
|
|
// MAC or DOS
|
2008-11-16 17:02:00 +00:00
|
|
|
char_type wc;
|
|
|
|
if (is.get(wc) && wc != '\n') {
|
2004-11-08 08:24:43 +00:00
|
|
|
// MAC
|
2008-11-16 17:02:00 +00:00
|
|
|
is.putback(wc);
|
2004-11-08 08:24:43 +00:00
|
|
|
}
|
|
|
|
return '\n';
|
|
|
|
}
|
|
|
|
// UNIX
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2008-11-16 17:02:00 +00:00
|
|
|
CatCode catcode(char_type c)
|
2003-02-12 11:09:22 +00:00
|
|
|
{
|
2008-11-16 17:02:00 +00:00
|
|
|
if (c < 256)
|
|
|
|
return theCatcode[(unsigned char)c];
|
|
|
|
return catOther;
|
2003-02-12 11:09:22 +00:00
|
|
|
}
|
|
|
|
|
2008-11-16 17:02:00 +00:00
|
|
|
}
|
2003-02-12 11:09:22 +00:00
|
|
|
|
2003-02-12 07:53:03 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Token
|
|
|
|
//
|
|
|
|
|
|
|
|
ostream & operator<<(ostream & os, Token const & t)
|
|
|
|
{
|
2003-11-05 10:14:13 +00:00
|
|
|
if (t.cat() == catComment)
|
|
|
|
os << '%' << t.cs() << '\n';
|
|
|
|
else if (t.cat() == catSpace)
|
|
|
|
os << t.cs();
|
|
|
|
else if (t.cat() == catEscape)
|
2003-03-06 10:39:54 +00:00
|
|
|
os << '\\' << t.cs() << ' ';
|
|
|
|
else if (t.cat() == catLetter)
|
2008-11-16 17:02:00 +00:00
|
|
|
os << t.cs();
|
2003-04-23 15:14:43 +00:00
|
|
|
else if (t.cat() == catNewline)
|
2003-11-05 10:14:13 +00:00
|
|
|
os << "[" << t.cs().size() << "\\n," << t.cat() << "]\n";
|
2003-02-12 07:53:03 +00:00
|
|
|
else
|
2008-11-16 17:02:00 +00:00
|
|
|
os << '[' << t.cs() << ',' << t.cat() << ']';
|
2003-02-12 07:53:03 +00:00
|
|
|
return os;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-02-12 12:05:31 +00:00
|
|
|
string Token::asString() const
|
|
|
|
{
|
2008-11-16 17:02:00 +00:00
|
|
|
return cs_;
|
2003-02-12 12:05:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
string Token::asInput() const
|
|
|
|
{
|
2003-11-05 10:14:13 +00:00
|
|
|
if (cat_ == catComment)
|
|
|
|
return '%' + cs_ + '\n';
|
2008-11-16 17:02:00 +00:00
|
|
|
if (cat_ == catEscape)
|
|
|
|
return '\\' + cs_;
|
|
|
|
return cs_;
|
2003-02-12 12:05:31 +00:00
|
|
|
}
|
|
|
|
|
2003-02-12 11:09:22 +00:00
|
|
|
|
2003-02-12 07:53:03 +00:00
|
|
|
//
|
|
|
|
// Parser
|
|
|
|
//
|
|
|
|
|
|
|
|
|
2008-11-16 17:02:00 +00:00
|
|
|
Parser::Parser(idocstream & is)
|
2008-11-15 15:09:59 +00:00
|
|
|
: lineno_(0), pos_(0), iss_(0), is_(is)
|
2003-02-12 07:53:03 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2003-04-23 15:14:43 +00:00
|
|
|
|
2003-04-16 12:52:49 +00:00
|
|
|
Parser::Parser(string const & s)
|
2008-11-16 17:02:00 +00:00
|
|
|
: lineno_(0), pos_(0),
|
|
|
|
iss_(new idocstringstream(from_utf8(s))), is_(*iss_)
|
2003-04-16 12:52:49 +00:00
|
|
|
{
|
2008-11-15 15:09:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Parser::~Parser()
|
|
|
|
{
|
|
|
|
delete iss_;
|
2003-04-16 12:52:49 +00:00
|
|
|
}
|
|
|
|
|
2003-02-12 07:53:03 +00:00
|
|
|
|
|
|
|
void Parser::push_back(Token const & t)
|
|
|
|
{
|
|
|
|
tokens_.push_back(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-04-23 15:14:43 +00:00
|
|
|
Token const & Parser::prev_token() const
|
2003-11-05 10:14:13 +00:00
|
|
|
{
|
|
|
|
static const Token dummy;
|
|
|
|
return pos_ > 1 ? tokens_[pos_ - 2] : dummy;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Token const & Parser::curr_token() const
|
2003-02-12 07:53:03 +00:00
|
|
|
{
|
|
|
|
static const Token dummy;
|
|
|
|
return pos_ > 0 ? tokens_[pos_ - 1] : dummy;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-15 20:30:45 +00:00
|
|
|
Token const & Parser::next_token()
|
2003-02-12 07:53:03 +00:00
|
|
|
{
|
|
|
|
static const Token dummy;
|
|
|
|
return good() ? tokens_[pos_] : dummy;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-04-23 15:14:43 +00:00
|
|
|
Token const & Parser::get_token()
|
2003-02-12 07:53:03 +00:00
|
|
|
{
|
|
|
|
static const Token dummy;
|
|
|
|
//cerr << "looking at token " << tokens_[pos_] << " pos: " << pos_ << '\n';
|
|
|
|
return good() ? tokens_[pos_++] : dummy;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-15 20:30:45 +00:00
|
|
|
bool Parser::isParagraph()
|
2004-06-18 06:47:19 +00:00
|
|
|
{
|
|
|
|
// A new paragraph in TeX ist started
|
|
|
|
// - either by a newline, following any amount of whitespace
|
|
|
|
// characters (including zero), and another newline
|
|
|
|
// - or the token \par
|
|
|
|
if (curr_token().cat() == catNewline &&
|
|
|
|
(curr_token().cs().size() > 1 ||
|
|
|
|
(next_token().cat() == catSpace &&
|
|
|
|
pos_ < tokens_.size() - 1 &&
|
|
|
|
tokens_[pos_ + 1].cat() == catNewline)))
|
|
|
|
return true;
|
|
|
|
if (curr_token().cat() == catEscape && curr_token().cs() == "par")
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-11-05 10:14:13 +00:00
|
|
|
void Parser::skip_spaces(bool skip_comments)
|
2003-02-12 07:53:03 +00:00
|
|
|
{
|
2003-11-05 10:14:13 +00:00
|
|
|
// We just silently return if we have no more tokens.
|
|
|
|
// skip_spaces() should be callable at any time,
|
|
|
|
// the caller must check p::good() anyway.
|
|
|
|
while (good()) {
|
2004-06-18 06:47:19 +00:00
|
|
|
get_token();
|
|
|
|
if (isParagraph()) {
|
|
|
|
putback();
|
2003-11-05 10:14:13 +00:00
|
|
|
break;
|
2004-06-18 06:47:19 +00:00
|
|
|
}
|
|
|
|
if ( curr_token().cat() == catSpace ||
|
|
|
|
curr_token().cat() == catNewline ||
|
|
|
|
(curr_token().cat() == catComment && curr_token().cs().empty()))
|
|
|
|
continue;
|
|
|
|
if (skip_comments && curr_token().cat() == catComment)
|
|
|
|
cerr << " Ignoring comment: " << curr_token().asInput();
|
|
|
|
else {
|
|
|
|
putback();
|
|
|
|
break;
|
|
|
|
}
|
2003-11-05 10:14:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Parser::unskip_spaces(bool skip_comments)
|
|
|
|
{
|
|
|
|
while (pos_ > 0) {
|
|
|
|
if ( curr_token().cat() == catSpace ||
|
|
|
|
(curr_token().cat() == catNewline && curr_token().cs().size() == 1))
|
|
|
|
putback();
|
|
|
|
else if (skip_comments && curr_token().cat() == catComment) {
|
|
|
|
// TODO: Get rid of this
|
|
|
|
cerr << "Unignoring comment: " << curr_token().asInput();
|
|
|
|
putback();
|
|
|
|
}
|
2003-03-03 17:49:26 +00:00
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
2003-02-12 07:53:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Parser::putback()
|
|
|
|
{
|
|
|
|
--pos_;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-15 20:30:45 +00:00
|
|
|
bool Parser::good()
|
2003-02-12 07:53:03 +00:00
|
|
|
{
|
2008-11-15 20:30:45 +00:00
|
|
|
if (pos_ < tokens_.size())
|
|
|
|
return true;
|
|
|
|
tokenize_one();
|
2003-02-12 07:53:03 +00:00
|
|
|
return pos_ < tokens_.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
char Parser::getChar()
|
|
|
|
{
|
|
|
|
if (!good())
|
|
|
|
error("The input stream is not well...");
|
2008-11-16 17:02:00 +00:00
|
|
|
return get_token().character();
|
2003-02-12 07:53:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-01-06 13:22:20 +00:00
|
|
|
Parser::Arg Parser::getFullArg(char left, char right)
|
2003-02-12 07:53:03 +00:00
|
|
|
{
|
2003-11-05 10:14:13 +00:00
|
|
|
skip_spaces(true);
|
|
|
|
|
|
|
|
// This is needed if a partial file ends with a command without arguments,
|
|
|
|
// e. g. \medskip
|
|
|
|
if (! good())
|
2007-12-12 19:28:07 +00:00
|
|
|
return make_pair(false, string());
|
2003-02-12 07:53:03 +00:00
|
|
|
|
|
|
|
string result;
|
|
|
|
char c = getChar();
|
|
|
|
|
2005-01-06 13:22:20 +00:00
|
|
|
if (c != left) {
|
2003-02-12 07:53:03 +00:00
|
|
|
putback();
|
2007-12-12 19:28:07 +00:00
|
|
|
return make_pair(false, string());
|
2005-01-06 13:22:20 +00:00
|
|
|
} else
|
2003-11-05 10:14:13 +00:00
|
|
|
while ((c = getChar()) != right && good()) {
|
|
|
|
// Ignore comments
|
|
|
|
if (curr_token().cat() == catComment) {
|
2003-11-19 10:35:50 +00:00
|
|
|
if (!curr_token().cs().empty())
|
2003-11-05 10:14:13 +00:00
|
|
|
cerr << "Ignoring comment: " << curr_token().asInput();
|
|
|
|
}
|
|
|
|
else
|
2004-06-18 06:47:19 +00:00
|
|
|
result += curr_token().asInput();
|
2003-11-05 10:14:13 +00:00
|
|
|
}
|
2003-02-12 07:53:03 +00:00
|
|
|
|
2007-12-12 19:28:07 +00:00
|
|
|
return make_pair(true, result);
|
2005-01-06 13:22:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
string Parser::getArg(char left, char right)
|
|
|
|
{
|
|
|
|
return getFullArg(left, right).second;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
string Parser::getFullOpt()
|
|
|
|
{
|
|
|
|
Arg arg = getFullArg('[', ']');
|
|
|
|
if (arg.first)
|
|
|
|
return '[' + arg.second + ']';
|
2008-04-18 12:26:21 +00:00
|
|
|
return string();
|
2003-02-12 07:53:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-02-28 13:37:43 +00:00
|
|
|
string Parser::getOpt()
|
|
|
|
{
|
2003-05-19 07:22:16 +00:00
|
|
|
string const res = getArg('[', ']');
|
2005-01-06 13:22:20 +00:00
|
|
|
return res.empty() ? string() : '[' + res + ']';
|
2003-02-28 13:37:43 +00:00
|
|
|
}
|
|
|
|
|
2008-04-18 12:26:21 +00:00
|
|
|
|
2008-04-17 00:22:16 +00:00
|
|
|
string Parser::getFullParentheseArg()
|
2008-04-12 12:50:04 +00:00
|
|
|
{
|
|
|
|
Arg arg = getFullArg('(', ')');
|
|
|
|
if (arg.first)
|
|
|
|
return '(' + arg.second + ')';
|
2008-04-18 12:26:21 +00:00
|
|
|
return string();
|
2008-04-12 12:50:04 +00:00
|
|
|
}
|
2003-02-28 13:37:43 +00:00
|
|
|
|
2008-04-18 12:26:21 +00:00
|
|
|
|
2005-03-14 17:34:57 +00:00
|
|
|
string const Parser::verbatimEnvironment(string const & name)
|
|
|
|
{
|
|
|
|
if (!good())
|
|
|
|
return string();
|
|
|
|
|
|
|
|
ostringstream os;
|
|
|
|
for (Token t = get_token(); good(); t = get_token()) {
|
|
|
|
if (t.cat() == catBegin) {
|
|
|
|
putback();
|
|
|
|
os << '{' << verbatim_item() << '}';
|
|
|
|
} else if (t.asInput() == "\\begin") {
|
|
|
|
string const env = getArg('{', '}');
|
|
|
|
os << "\\begin{" << env << '}'
|
|
|
|
<< verbatimEnvironment(env)
|
|
|
|
<< "\\end{" << env << '}';
|
|
|
|
} else if (t.asInput() == "\\end") {
|
|
|
|
string const end = getArg('{', '}');
|
|
|
|
if (end != name)
|
|
|
|
cerr << "\\end{" << end
|
|
|
|
<< "} does not match \\begin{" << name
|
|
|
|
<< "}." << endl;
|
|
|
|
return os.str();
|
|
|
|
} else
|
|
|
|
os << t.asInput();
|
|
|
|
}
|
|
|
|
cerr << "unexpected end of input" << endl;
|
|
|
|
return os.str();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-15 15:09:59 +00:00
|
|
|
void Parser::tokenize_one()
|
2003-02-12 07:53:03 +00:00
|
|
|
{
|
2008-11-15 19:30:58 +00:00
|
|
|
catInit();
|
2008-11-16 17:02:00 +00:00
|
|
|
char_type c;
|
2008-11-15 15:09:59 +00:00
|
|
|
if (!is_.get(c))
|
2008-11-15 14:38:27 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
switch (catcode(c)) {
|
|
|
|
case catSpace: {
|
2008-11-16 17:02:00 +00:00
|
|
|
docstring s(1, c);
|
2008-11-15 15:09:59 +00:00
|
|
|
while (is_.get(c) && catcode(c) == catSpace)
|
2008-11-15 14:38:27 +00:00
|
|
|
s += c;
|
|
|
|
if (catcode(c) != catSpace)
|
2008-11-15 15:09:59 +00:00
|
|
|
is_.putback(c);
|
2008-11-15 14:38:27 +00:00
|
|
|
push_back(Token(s, catSpace));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case catNewline: {
|
|
|
|
++lineno_;
|
2008-11-16 17:02:00 +00:00
|
|
|
docstring s(1, getNewline(is_, c));
|
2008-11-15 15:09:59 +00:00
|
|
|
while (is_.get(c) && catcode(c) == catNewline) {
|
2008-11-15 14:38:27 +00:00
|
|
|
++lineno_;
|
2008-11-15 15:09:59 +00:00
|
|
|
s += getNewline(is_, c);
|
2008-11-15 14:38:27 +00:00
|
|
|
}
|
|
|
|
if (catcode(c) != catNewline)
|
2008-11-15 15:09:59 +00:00
|
|
|
is_.putback(c);
|
2008-11-15 14:38:27 +00:00
|
|
|
push_back(Token(s, catNewline));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case catComment: {
|
|
|
|
// We don't treat "%\n" combinations here specially because
|
|
|
|
// we want to preserve them in the preamble
|
2008-11-16 17:02:00 +00:00
|
|
|
docstring s;
|
2008-11-15 15:09:59 +00:00
|
|
|
while (is_.get(c) && catcode(c) != catNewline)
|
2008-11-15 14:38:27 +00:00
|
|
|
s += c;
|
|
|
|
// handle possible DOS line ending
|
|
|
|
if (catcode(c) == catNewline)
|
2008-11-15 15:09:59 +00:00
|
|
|
c = getNewline(is_, c);
|
2008-11-15 14:38:27 +00:00
|
|
|
// Note: The '%' at the beginning and the '\n' at the end
|
|
|
|
// of the comment are not stored.
|
|
|
|
++lineno_;
|
|
|
|
push_back(Token(s, catComment));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case catEscape: {
|
2008-11-15 15:09:59 +00:00
|
|
|
is_.get(c);
|
|
|
|
if (!is_) {
|
2008-11-15 14:38:27 +00:00
|
|
|
error("unexpected end of input");
|
|
|
|
} else {
|
2008-11-16 17:02:00 +00:00
|
|
|
docstring s(1, c);
|
2008-11-15 14:38:27 +00:00
|
|
|
if (catcode(c) == catLetter) {
|
|
|
|
// collect letters
|
2008-11-15 15:09:59 +00:00
|
|
|
while (is_.get(c) && catcode(c) == catLetter)
|
2003-11-05 10:14:13 +00:00
|
|
|
s += c;
|
2008-11-15 14:38:27 +00:00
|
|
|
if (catcode(c) != catLetter)
|
2008-11-15 15:09:59 +00:00
|
|
|
is_.putback(c);
|
2003-02-12 07:53:03 +00:00
|
|
|
}
|
2008-11-15 14:38:27 +00:00
|
|
|
push_back(Token(s, catEscape));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case catIgnore: {
|
2008-11-16 17:02:00 +00:00
|
|
|
cerr << "ignoring a char: " << c << "\n";
|
2008-11-15 14:38:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
2008-11-16 17:02:00 +00:00
|
|
|
push_back(Token(docstring(1, c), catcode(c)));
|
2008-11-15 14:38:27 +00:00
|
|
|
}
|
2008-11-16 17:02:00 +00:00
|
|
|
//cerr << tokens_.back();
|
2008-11-15 14:38:27 +00:00
|
|
|
}
|
2003-02-12 07:53:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
void Parser::dump() const
|
|
|
|
{
|
|
|
|
cerr << "\nTokens: ";
|
|
|
|
for (unsigned i = 0; i < tokens_.size(); ++i) {
|
|
|
|
if (i == pos_)
|
|
|
|
cerr << " <#> ";
|
|
|
|
cerr << tokens_[i];
|
|
|
|
}
|
|
|
|
cerr << " pos: " << pos_ << "\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Parser::error(string const & msg)
|
|
|
|
{
|
|
|
|
cerr << "Line ~" << lineno_ << ": parse error: " << msg << endl;
|
|
|
|
dump();
|
|
|
|
//exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
string Parser::verbatimOption()
|
|
|
|
{
|
|
|
|
string res;
|
2003-04-23 15:14:43 +00:00
|
|
|
if (next_token().character() == '[') {
|
|
|
|
Token t = get_token();
|
2008-11-16 17:02:00 +00:00
|
|
|
for (t = get_token(); t.character() != ']' && good(); t = get_token()) {
|
2003-02-12 07:53:03 +00:00
|
|
|
if (t.cat() == catBegin) {
|
|
|
|
putback();
|
2003-04-23 15:14:43 +00:00
|
|
|
res += '{' + verbatim_item() + '}';
|
2003-02-12 07:53:03 +00:00
|
|
|
} else
|
|
|
|
res += t.asString();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-04-23 15:14:43 +00:00
|
|
|
string Parser::verbatim_item()
|
2003-02-12 07:53:03 +00:00
|
|
|
{
|
2003-02-12 14:32:17 +00:00
|
|
|
if (!good())
|
|
|
|
error("stream bad");
|
2003-04-25 15:54:29 +00:00
|
|
|
skip_spaces();
|
2003-04-23 15:14:43 +00:00
|
|
|
if (next_token().cat() == catBegin) {
|
|
|
|
Token t = get_token(); // skip brace
|
2003-02-12 14:32:17 +00:00
|
|
|
string res;
|
2003-04-23 15:14:43 +00:00
|
|
|
for (Token t = get_token(); t.cat() != catEnd && good(); t = get_token()) {
|
2003-02-12 07:53:03 +00:00
|
|
|
if (t.cat() == catBegin) {
|
|
|
|
putback();
|
2003-04-23 15:14:43 +00:00
|
|
|
res += '{' + verbatim_item() + '}';
|
2003-02-12 07:53:03 +00:00
|
|
|
}
|
|
|
|
else
|
2003-02-12 13:21:32 +00:00
|
|
|
res += t.asInput();
|
2003-02-12 07:53:03 +00:00
|
|
|
}
|
2003-02-12 14:32:17 +00:00
|
|
|
return res;
|
2003-02-12 07:53:03 +00:00
|
|
|
}
|
2003-04-23 15:14:43 +00:00
|
|
|
return get_token().asInput();
|
2003-02-12 07:53:03 +00:00
|
|
|
}
|
2003-02-12 14:32:17 +00:00
|
|
|
|
2003-02-28 13:37:43 +00:00
|
|
|
|
2003-10-23 11:46:33 +00:00
|
|
|
void Parser::reset()
|
|
|
|
{
|
|
|
|
pos_ = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-02-28 13:37:43 +00:00
|
|
|
void Parser::setCatCode(char c, CatCode cat)
|
|
|
|
{
|
2003-07-18 08:46:00 +00:00
|
|
|
theCatcode[(unsigned char)c] = cat;
|
2003-02-28 13:37:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CatCode Parser::getCatCode(char c) const
|
|
|
|
{
|
2003-07-18 08:46:00 +00:00
|
|
|
return theCatcode[(unsigned char)c];
|
2003-02-28 13:37:43 +00:00
|
|
|
}
|
2006-10-21 00:16:43 +00:00
|
|
|
|
|
|
|
|
|
|
|
} // namespace lyx
|