2007-08-14 16:50:51 +00:00
|
|
|
/**
|
2007-08-20 16:30:02 +00:00
|
|
|
* \file BiblioInfo.cpp
|
2007-08-14 16:50:51 +00:00
|
|
|
* This file is part of LyX, the document processor.
|
|
|
|
* Licence details can be found in the file COPYING.
|
|
|
|
*
|
|
|
|
* \author Angus Leeming
|
2008-11-14 15:58:50 +00:00
|
|
|
* \author Herbert Voß
|
2007-08-20 16:30:02 +00:00
|
|
|
* \author Richard Heck
|
2013-01-07 14:51:19 +00:00
|
|
|
* \author Julien Rioux
|
2017-01-09 16:54:56 +00:00
|
|
|
* \author Jürgen Spitzmüller
|
2007-08-14 16:50:51 +00:00
|
|
|
*
|
|
|
|
* Full author contact details are available in file CREDITS.
|
|
|
|
*/
|
|
|
|
|
2007-08-14 22:18:27 +00:00
|
|
|
#include <config.h>
|
|
|
|
|
2007-08-20 17:04:36 +00:00
|
|
|
#include "BiblioInfo.h"
|
2007-08-20 16:30:02 +00:00
|
|
|
#include "Buffer.h"
|
|
|
|
#include "BufferParams.h"
|
2007-08-15 08:57:58 +00:00
|
|
|
#include "buffer_funcs.h"
|
2017-01-07 16:12:08 +00:00
|
|
|
#include "Citation.h"
|
2009-01-16 23:42:16 +00:00
|
|
|
#include "Encoding.h"
|
2007-08-14 16:50:51 +00:00
|
|
|
#include "InsetIterator.h"
|
2010-03-29 20:21:30 +00:00
|
|
|
#include "Language.h"
|
2020-06-08 21:27:49 +00:00
|
|
|
#include "xml.h"
|
2007-08-14 16:50:51 +00:00
|
|
|
#include "Paragraph.h"
|
2010-03-29 18:37:25 +00:00
|
|
|
#include "TextClass.h"
|
2010-01-08 16:40:41 +00:00
|
|
|
#include "TocBackend.h"
|
2007-08-14 16:50:51 +00:00
|
|
|
|
2010-01-08 18:19:13 +00:00
|
|
|
#include "support/convert.h"
|
2010-03-27 12:58:26 +00:00
|
|
|
#include "support/debug.h"
|
2007-11-01 22:17:22 +00:00
|
|
|
#include "support/docstream.h"
|
2007-11-29 07:04:28 +00:00
|
|
|
#include "support/gettext.h"
|
2008-11-19 03:52:22 +00:00
|
|
|
#include "support/lassert.h"
|
2007-11-29 07:04:28 +00:00
|
|
|
#include "support/lstrings.h"
|
2010-06-29 17:09:40 +00:00
|
|
|
#include "support/regex.h"
|
2011-10-29 11:22:17 +00:00
|
|
|
#include "support/textutils.h"
|
2007-08-14 22:18:27 +00:00
|
|
|
|
Support for "qualified citation lists"
These are biblatex-specific multicite commands that allow for multiple
pre- and postnotes, as in:
\cites(pre)(post)[pre1][post1]{key1}[pre2][post2]{key2}...
with an optional general pre- and postnote, which applies to the whole
list (like [][] in normal cite commands) and an optional pre- and
postnotes for each item, so that pagination can actually be specified in
multi-cite references, as in:
(cf. Miller 2015, 2; furthermore Smith 2013, 23-23; Jenkins 2012, 103,
also refer to chapter 6 in this book)
See the biblatex manual, sec. 3.8.3., for details.
File format change.
2017-01-21 13:25:17 +00:00
|
|
|
#include <map>
|
2010-01-08 16:40:41 +00:00
|
|
|
#include <set>
|
|
|
|
|
2007-12-12 10:16:00 +00:00
|
|
|
using namespace std;
|
2007-12-12 18:57:56 +00:00
|
|
|
using namespace lyx::support;
|
2007-08-14 16:50:51 +00:00
|
|
|
|
2007-11-05 20:33:20 +00:00
|
|
|
|
2008-04-20 15:00:11 +00:00
|
|
|
namespace lyx {
|
2007-08-14 16:50:51 +00:00
|
|
|
|
2009-04-09 12:55:47 +00:00
|
|
|
namespace {
|
2009-01-17 00:16:31 +00:00
|
|
|
|
2017-03-19 10:41:33 +00:00
|
|
|
// Remove placeholders from names
|
|
|
|
docstring renormalize(docstring const & input)
|
|
|
|
{
|
|
|
|
docstring res = subst(input, from_ascii("$$space!"), from_ascii(" "));
|
|
|
|
return subst(res, from_ascii("$$comma!"), from_ascii(","));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-19 13:03:48 +00:00
|
|
|
// Split the surname into prefix ("von-part") and family name
|
|
|
|
pair<docstring, docstring> parseSurname(docstring const & sname)
|
|
|
|
{
|
|
|
|
// Split the surname into its tokens
|
|
|
|
vector<docstring> pieces = getVectorFromString(sname, from_ascii(" "));
|
|
|
|
if (pieces.size() < 2)
|
|
|
|
return make_pair(docstring(), sname);
|
|
|
|
|
|
|
|
// Now we look for pieces that begin with a lower case letter.
|
|
|
|
// All except for the very last token constitute the "von-part".
|
|
|
|
docstring prefix;
|
|
|
|
vector<docstring>::const_iterator it = pieces.begin();
|
|
|
|
vector<docstring>::const_iterator const en = pieces.end();
|
|
|
|
bool first = true;
|
|
|
|
for (; it != en; ++it) {
|
|
|
|
if ((*it).empty())
|
|
|
|
continue;
|
|
|
|
// If this is the last piece, then what we now have is
|
|
|
|
// the family name, notwithstanding the casing.
|
|
|
|
if (it + 1 == en)
|
|
|
|
break;
|
|
|
|
char_type const c = (*it)[0];
|
|
|
|
// If the piece starts with a upper case char, we assume
|
|
|
|
// this is part of the surname.
|
|
|
|
if (!isLower(c))
|
|
|
|
break;
|
|
|
|
// Nothing of the former, so add this piece to the prename
|
|
|
|
if (!first)
|
|
|
|
prefix += " ";
|
|
|
|
else
|
|
|
|
first = false;
|
|
|
|
prefix += *it;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reconstruct the family name.
|
|
|
|
// Note that if we left the loop with because it + 1 == en,
|
|
|
|
// then this will still do the right thing, i.e., make surname
|
|
|
|
// just be the last piece.
|
|
|
|
docstring surname;
|
|
|
|
first = true;
|
|
|
|
for (; it != en; ++it) {
|
|
|
|
if (!first)
|
|
|
|
surname += " ";
|
|
|
|
else
|
|
|
|
first = false;
|
|
|
|
surname += *it;
|
|
|
|
}
|
|
|
|
return make_pair(prefix, surname);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-19 11:42:18 +00:00
|
|
|
struct name_parts {
|
|
|
|
docstring surname;
|
|
|
|
docstring prename;
|
2017-03-19 12:33:56 +00:00
|
|
|
docstring suffix;
|
2017-03-19 13:03:48 +00:00
|
|
|
docstring prefix;
|
2017-03-19 11:42:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2017-03-19 13:03:48 +00:00
|
|
|
// gets the name parts (prename, surname, prefix, suffix) from an author-type string
|
2017-03-19 11:42:18 +00:00
|
|
|
name_parts nameParts(docstring const & iname)
|
2007-11-05 20:33:20 +00:00
|
|
|
{
|
2017-03-19 11:42:18 +00:00
|
|
|
name_parts res;
|
2017-03-19 10:41:33 +00:00
|
|
|
if (iname.empty())
|
2017-03-19 11:42:18 +00:00
|
|
|
return res;
|
2007-08-14 16:50:51 +00:00
|
|
|
|
2017-03-19 10:41:33 +00:00
|
|
|
// First we check for goupings (via {...}) and replace blanks and
|
|
|
|
// commas inside groups with temporary placeholders
|
|
|
|
docstring name;
|
|
|
|
int gl = 0;
|
|
|
|
docstring::const_iterator p = iname.begin();
|
|
|
|
while (p != iname.end()) {
|
|
|
|
// count grouping level
|
|
|
|
if (*p == '{')
|
|
|
|
++gl;
|
|
|
|
else if (*p == '}')
|
|
|
|
--gl;
|
|
|
|
// generate string with probable placeholders
|
|
|
|
if (*p == ' ' && gl > 0)
|
|
|
|
name += from_ascii("$$space!");
|
|
|
|
else if (*p == ',' && gl > 0)
|
|
|
|
name += from_ascii("$$comma!");
|
|
|
|
else
|
|
|
|
name += *p;
|
|
|
|
++p;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we look for a comma, and take the last name to be everything
|
2017-03-19 13:03:48 +00:00
|
|
|
// preceding the right-most one, so that we also get the name suffix
|
|
|
|
// (aka "jr" part).
|
2017-01-09 16:54:56 +00:00
|
|
|
vector<docstring> pieces = getVectorFromString(name);
|
2017-03-19 11:42:18 +00:00
|
|
|
if (pieces.size() > 1) {
|
2017-03-19 13:03:48 +00:00
|
|
|
// Whether we have a name suffix or not, the prename is
|
|
|
|
// always last item
|
2017-03-19 11:42:18 +00:00
|
|
|
res.prename = renormalize(pieces.back());
|
2017-03-19 13:03:48 +00:00
|
|
|
// The family name, conversely, is always the first item.
|
|
|
|
// However, it might contain a prefix (aka "von" part)
|
|
|
|
docstring const sname = pieces.front();
|
|
|
|
res.prefix = renormalize(parseSurname(sname).first);
|
|
|
|
res.surname = renormalize(parseSurname(sname).second);
|
2017-03-19 11:42:18 +00:00
|
|
|
// If we have three pieces (the maximum allowed by BibTeX),
|
2017-03-19 13:03:48 +00:00
|
|
|
// the second one is the name suffix.
|
2017-03-19 11:42:18 +00:00
|
|
|
if (pieces.size() > 2)
|
2017-03-19 12:33:56 +00:00
|
|
|
res.suffix = renormalize(pieces.at(1));
|
2017-03-19 11:42:18 +00:00
|
|
|
return res;
|
|
|
|
}
|
2009-01-04 23:35:08 +00:00
|
|
|
|
2017-03-19 13:03:48 +00:00
|
|
|
// OK, so now we want to look for the last name.
|
2009-01-04 23:35:08 +00:00
|
|
|
// Split on spaces, to get various tokens.
|
2017-01-09 16:54:56 +00:00
|
|
|
pieces = getVectorFromString(name, from_ascii(" "));
|
2017-03-19 10:41:33 +00:00
|
|
|
// No space: Only a family name given
|
2017-03-19 11:42:18 +00:00
|
|
|
if (pieces.size() < 2) {
|
|
|
|
res.surname = renormalize(pieces.back());
|
|
|
|
return res;
|
|
|
|
}
|
2017-03-19 13:03:48 +00:00
|
|
|
// If we get two pieces, assume "prename surname"
|
2017-03-19 11:42:18 +00:00
|
|
|
if (pieces.size() == 2) {
|
|
|
|
res.prename = renormalize(pieces.front());
|
2017-03-19 13:03:48 +00:00
|
|
|
res.surname = renormalize(pieces.back());
|
2017-03-19 11:42:18 +00:00
|
|
|
return res;
|
|
|
|
}
|
2009-01-04 23:35:08 +00:00
|
|
|
|
2017-03-19 13:03:48 +00:00
|
|
|
// More than 3 pieces: A name prefix (aka "von" part) might be included.
|
|
|
|
// We look for the first piece that begins with a lower case letter
|
|
|
|
// (which is the name prefix, if it is not the last token) or the last token.
|
2017-01-09 16:54:56 +00:00
|
|
|
docstring prename;
|
2009-01-04 23:35:08 +00:00
|
|
|
vector<docstring>::const_iterator it = pieces.begin();
|
2017-03-12 20:43:15 +00:00
|
|
|
vector<docstring>::const_iterator const en = pieces.end();
|
2017-01-09 16:54:56 +00:00
|
|
|
bool first = true;
|
2009-01-04 23:35:08 +00:00
|
|
|
for (; it != en; ++it) {
|
2012-10-21 19:14:16 +00:00
|
|
|
if ((*it).empty())
|
2009-01-04 23:35:08 +00:00
|
|
|
continue;
|
|
|
|
char_type const c = (*it)[0];
|
2017-03-19 10:41:33 +00:00
|
|
|
// If the piece starts with a lower case char, we assume
|
2017-03-19 13:03:48 +00:00
|
|
|
// this is the name prefix and thus prename is complete.
|
2017-03-19 10:41:33 +00:00
|
|
|
if (isLower(c))
|
2009-01-04 23:35:08 +00:00
|
|
|
break;
|
2017-03-19 13:03:48 +00:00
|
|
|
// Same if this is the last piece, which is always the surname.
|
2017-03-12 20:43:15 +00:00
|
|
|
if (it + 1 == en)
|
|
|
|
break;
|
2017-03-19 10:41:33 +00:00
|
|
|
// Nothing of the former, so add this piece to the prename
|
2017-01-09 16:54:56 +00:00
|
|
|
if (!first)
|
|
|
|
prename += " ";
|
|
|
|
else
|
|
|
|
first = false;
|
|
|
|
prename += *it;
|
2009-01-04 23:35:08 +00:00
|
|
|
}
|
|
|
|
|
2017-03-19 13:03:48 +00:00
|
|
|
// Now reconstruct the family name and strip the prefix.
|
|
|
|
// Note that if we left the loop because it + 1 == en,
|
2017-03-12 20:43:15 +00:00
|
|
|
// then this will still do the right thing, i.e., make surname
|
|
|
|
// just be the last piece.
|
2017-01-09 16:54:56 +00:00
|
|
|
docstring surname;
|
|
|
|
first = true;
|
2009-01-04 23:35:08 +00:00
|
|
|
for (; it != en; ++it) {
|
|
|
|
if (!first)
|
2017-01-09 16:54:56 +00:00
|
|
|
surname += " ";
|
2011-12-03 22:15:11 +00:00
|
|
|
else
|
2009-01-04 23:35:08 +00:00
|
|
|
first = false;
|
2017-01-09 16:54:56 +00:00
|
|
|
surname += *it;
|
2009-01-04 23:35:08 +00:00
|
|
|
}
|
2017-03-19 11:42:18 +00:00
|
|
|
res.prename = renormalize(prename);
|
2017-03-19 13:03:48 +00:00
|
|
|
res.prefix = renormalize(parseSurname(surname).first);
|
|
|
|
res.surname = renormalize(parseSurname(surname).second);
|
2017-03-19 11:42:18 +00:00
|
|
|
return res;
|
2017-01-09 16:54:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-13 14:23:49 +00:00
|
|
|
docstring constructName(docstring const & name, string const & scheme)
|
2017-01-09 16:54:56 +00:00
|
|
|
{
|
|
|
|
// re-constructs a name from name parts according
|
|
|
|
// to a given scheme
|
2017-03-19 11:42:18 +00:00
|
|
|
docstring const prename = nameParts(name).prename;
|
|
|
|
docstring const surname = nameParts(name).surname;
|
2017-03-19 13:03:48 +00:00
|
|
|
docstring const prefix = nameParts(name).prefix;
|
2017-03-19 12:33:56 +00:00
|
|
|
docstring const suffix = nameParts(name).suffix;
|
2017-03-19 10:45:42 +00:00
|
|
|
string res = scheme;
|
|
|
|
static regex const reg1("(.*)(\\{%prename%\\[\\[)([^\\]]+)(\\]\\]\\})(.*)");
|
2017-03-19 12:33:56 +00:00
|
|
|
static regex const reg2("(.*)(\\{%suffix%\\[\\[)([^\\]]+)(\\]\\]\\})(.*)");
|
2017-03-19 13:03:48 +00:00
|
|
|
static regex const reg3("(.*)(\\{%prefix%\\[\\[)([^\\]]+)(\\]\\]\\})(.*)");
|
2017-03-19 10:45:42 +00:00
|
|
|
smatch sub;
|
2017-10-15 13:29:33 +00:00
|
|
|
// Changing the first parameter of regex_match() may corrupt the
|
|
|
|
// second one. In this case we use the temporary string tmp.
|
2017-03-19 10:45:42 +00:00
|
|
|
if (regex_match(scheme, sub, reg1)) {
|
|
|
|
res = sub.str(1);
|
|
|
|
if (!prename.empty())
|
|
|
|
res += sub.str(3);
|
|
|
|
res += sub.str(5);
|
|
|
|
}
|
2017-03-19 11:42:18 +00:00
|
|
|
if (regex_match(res, sub, reg2)) {
|
2017-10-15 13:29:33 +00:00
|
|
|
string tmp = sub.str(1);
|
2017-03-19 12:33:56 +00:00
|
|
|
if (!suffix.empty())
|
2017-10-15 13:29:33 +00:00
|
|
|
tmp += sub.str(3);
|
|
|
|
res = tmp + sub.str(5);
|
2017-03-19 11:42:18 +00:00
|
|
|
}
|
2017-03-19 13:03:48 +00:00
|
|
|
if (regex_match(res, sub, reg3)) {
|
2017-10-15 13:29:33 +00:00
|
|
|
string tmp = sub.str(1);
|
2017-03-19 13:03:48 +00:00
|
|
|
if (!prefix.empty())
|
2017-10-15 13:29:33 +00:00
|
|
|
tmp += sub.str(3);
|
|
|
|
res = tmp + sub.str(5);
|
2017-03-19 13:03:48 +00:00
|
|
|
}
|
2017-03-19 10:45:42 +00:00
|
|
|
docstring result = from_ascii(res);
|
2017-01-09 16:54:56 +00:00
|
|
|
result = subst(result, from_ascii("%prename%"), prename);
|
|
|
|
result = subst(result, from_ascii("%surname%"), surname);
|
2017-03-19 13:03:48 +00:00
|
|
|
result = subst(result, from_ascii("%prefix%"), prefix);
|
2017-03-19 12:33:56 +00:00
|
|
|
result = subst(result, from_ascii("%suffix%"), suffix);
|
2017-01-09 16:54:56 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-19 10:44:22 +00:00
|
|
|
vector<docstring> const getAuthors(docstring const & author)
|
|
|
|
{
|
|
|
|
// We check for goupings (via {...}) and only consider " and "
|
|
|
|
// outside groups as author separator. This is to account
|
|
|
|
// for cases such as {{Barnes and Noble, Inc.}}, which
|
|
|
|
// need to be treated as one single family name.
|
|
|
|
// We use temporary placeholders in order to differentiate the
|
|
|
|
// diverse " and " cases.
|
|
|
|
|
|
|
|
// First, we temporarily replace all ampersands. It is rather unusual
|
|
|
|
// in author names, but can happen (consider cases such as "C \& A Corp.").
|
|
|
|
docstring iname = subst(author, from_ascii("&"), from_ascii("$$amp!"));
|
|
|
|
// Then, we temporarily make all " and " strings to ampersands in order
|
|
|
|
// to handle them later on a per-char level.
|
|
|
|
iname = subst(iname, from_ascii(" and "), from_ascii(" & "));
|
|
|
|
// Now we traverse through the string and replace the "&" by the proper
|
|
|
|
// output in- and outside groups
|
|
|
|
docstring name;
|
|
|
|
int gl = 0;
|
|
|
|
docstring::const_iterator p = iname.begin();
|
|
|
|
while (p != iname.end()) {
|
|
|
|
// count grouping level
|
|
|
|
if (*p == '{')
|
|
|
|
++gl;
|
|
|
|
else if (*p == '}')
|
|
|
|
--gl;
|
|
|
|
// generate string with probable placeholders
|
|
|
|
if (*p == '&') {
|
|
|
|
if (gl > 0)
|
|
|
|
// Inside groups, we output "and"
|
|
|
|
name += from_ascii("and");
|
|
|
|
else
|
|
|
|
// Outside groups, we output a separator
|
|
|
|
name += from_ascii("$$namesep!");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
name += *p;
|
|
|
|
++p;
|
|
|
|
}
|
|
|
|
|
|
|
|
// re-insert the literal ampersands
|
|
|
|
name = subst(name, from_ascii("$$amp!"), from_ascii("&"));
|
|
|
|
|
|
|
|
// Now construct the actual vector
|
|
|
|
return getVectorFromString(name, from_ascii(" $$namesep! "));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-13 14:23:49 +00:00
|
|
|
bool multipleAuthors(docstring const & author)
|
2017-01-09 16:54:56 +00:00
|
|
|
{
|
2017-03-19 10:44:22 +00:00
|
|
|
return getAuthors(author).size() > 1;
|
2007-11-05 20:33:20 +00:00
|
|
|
}
|
2007-08-14 16:50:51 +00:00
|
|
|
|
2012-10-27 13:45:27 +00:00
|
|
|
|
2009-04-09 12:55:47 +00:00
|
|
|
// converts a string containing LaTeX commands into unicode
|
|
|
|
// for display.
|
|
|
|
docstring convertLaTeXCommands(docstring const & str)
|
|
|
|
{
|
|
|
|
docstring val = str;
|
|
|
|
docstring ret;
|
|
|
|
|
|
|
|
bool scanning_cmd = false;
|
|
|
|
bool scanning_math = false;
|
|
|
|
bool escaped = false; // used to catch \$, etc.
|
2012-10-21 19:14:16 +00:00
|
|
|
while (!val.empty()) {
|
2009-04-09 12:55:47 +00:00
|
|
|
char_type const ch = val[0];
|
|
|
|
|
|
|
|
// if we're scanning math, we output everything until we
|
|
|
|
// find an unescaped $, at which point we break out.
|
|
|
|
if (scanning_math) {
|
|
|
|
if (escaped)
|
|
|
|
escaped = false;
|
|
|
|
else if (ch == '\\')
|
|
|
|
escaped = true;
|
2011-12-03 22:15:11 +00:00
|
|
|
else if (ch == '$')
|
2009-04-09 12:55:47 +00:00
|
|
|
scanning_math = false;
|
|
|
|
ret += ch;
|
|
|
|
val = val.substr(1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we're scanning a command name, then we just
|
|
|
|
// discard characters until we hit something that
|
|
|
|
// isn't alpha.
|
|
|
|
if (scanning_cmd) {
|
|
|
|
if (isAlphaASCII(ch)) {
|
|
|
|
val = val.substr(1);
|
|
|
|
escaped = false;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// so we're done with this command.
|
|
|
|
// now we fall through and check this character.
|
|
|
|
scanning_cmd = false;
|
|
|
|
}
|
|
|
|
|
2009-12-13 21:14:36 +00:00
|
|
|
// was the last character a \? If so, then this is something like:
|
|
|
|
// \\ or \$, so we'll just output it. That's probably not always right...
|
2009-04-09 12:55:47 +00:00
|
|
|
if (escaped) {
|
2009-12-13 09:50:30 +00:00
|
|
|
// exception: output \, as THIN SPACE
|
|
|
|
if (ch == ',')
|
|
|
|
ret.push_back(0x2009);
|
|
|
|
else
|
|
|
|
ret += ch;
|
2009-04-09 12:55:47 +00:00
|
|
|
val = val.substr(1);
|
|
|
|
escaped = false;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ch == '$') {
|
|
|
|
ret += ch;
|
|
|
|
val = val.substr(1);
|
|
|
|
scanning_math = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-03-19 15:15:03 +00:00
|
|
|
// Change text mode accents in the form
|
|
|
|
// {\v a} to \v{a} (see #9340).
|
|
|
|
// FIXME: This is a sort of mini-tex2lyx.
|
|
|
|
// Use the real tex2lyx instead!
|
|
|
|
static lyx::regex const tma_reg("^\\{\\\\[bcCdfGhHkrtuUv]\\s\\w\\}");
|
|
|
|
if (lyx::regex_search(to_utf8(val), tma_reg)) {
|
|
|
|
val = val.substr(1);
|
|
|
|
val.replace(2, 1, from_ascii("{"));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Apart from the above, we just ignore braces
|
2009-04-09 12:55:47 +00:00
|
|
|
if (ch == '{' || ch == '}') {
|
|
|
|
val = val.substr(1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// we're going to check things that look like commands, so if
|
|
|
|
// this doesn't, just output it.
|
|
|
|
if (ch != '\\') {
|
|
|
|
ret += ch;
|
|
|
|
val = val.substr(1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ok, could be a command of some sort
|
|
|
|
// let's see if it corresponds to some unicode
|
|
|
|
// unicodesymbols has things in the form: \"{u},
|
|
|
|
// whereas we may see things like: \"u. So we'll
|
|
|
|
// look for that and change it, if necessary.
|
2012-03-25 13:36:00 +00:00
|
|
|
// FIXME: This is a sort of mini-tex2lyx.
|
|
|
|
// Use the real tex2lyx instead!
|
2010-06-29 17:09:40 +00:00
|
|
|
static lyx::regex const reg("^\\\\\\W\\w");
|
|
|
|
if (lyx::regex_search(to_utf8(val), reg)) {
|
2009-04-09 12:55:47 +00:00
|
|
|
val.insert(3, from_ascii("}"));
|
|
|
|
val.insert(2, from_ascii("{"));
|
|
|
|
}
|
2012-03-25 13:36:00 +00:00
|
|
|
bool termination;
|
2009-04-09 12:55:47 +00:00
|
|
|
docstring rem;
|
2011-12-08 20:05:51 +00:00
|
|
|
docstring const cnvtd = Encodings::fromLaTeXCommand(val,
|
2012-03-25 13:36:00 +00:00
|
|
|
Encodings::TEXT_CMD, termination, rem);
|
2009-04-09 12:55:47 +00:00
|
|
|
if (!cnvtd.empty()) {
|
|
|
|
// it did, so we'll take that bit and proceed with what's left
|
|
|
|
ret += cnvtd;
|
|
|
|
val = rem;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// it's a command of some sort
|
|
|
|
scanning_cmd = true;
|
|
|
|
escaped = true;
|
|
|
|
val = val.substr(1);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-01-08 14:35:50 +00:00
|
|
|
|
|
|
|
// Escape '<' and '>' and remove richtext markers (e.g. {!this is richtext!}) from a string.
|
|
|
|
docstring processRichtext(docstring const & str, bool richtext)
|
|
|
|
{
|
|
|
|
docstring val = str;
|
|
|
|
docstring ret;
|
|
|
|
|
|
|
|
bool scanning_rich = false;
|
|
|
|
while (!val.empty()) {
|
|
|
|
char_type const ch = val[0];
|
|
|
|
if (ch == '{' && val.size() > 1 && val[1] == '!') {
|
|
|
|
// beginning of rich text
|
|
|
|
scanning_rich = true;
|
|
|
|
val = val.substr(2);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (scanning_rich && ch == '!' && val.size() > 1 && val[1] == '}') {
|
|
|
|
// end of rich text
|
|
|
|
scanning_rich = false;
|
|
|
|
val = val.substr(2);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (richtext) {
|
|
|
|
if (scanning_rich)
|
|
|
|
ret += ch;
|
|
|
|
else {
|
|
|
|
// we need to escape '<' and '>'
|
|
|
|
if (ch == '<')
|
|
|
|
ret += "<";
|
|
|
|
else if (ch == '>')
|
|
|
|
ret += ">";
|
|
|
|
else
|
|
|
|
ret += ch;
|
|
|
|
}
|
|
|
|
} else if (!scanning_rich /* && !richtext */)
|
|
|
|
ret += ch;
|
|
|
|
// else the character is discarded, which will happen only if
|
|
|
|
// richtext == false and we are scanning rich text
|
|
|
|
val = val.substr(1);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-23 11:11:54 +00:00
|
|
|
} // namespace
|
2009-04-09 12:55:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// BibTeXInfo
|
|
|
|
//
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
BibTeXInfo::BibTeXInfo(docstring const & key, docstring const & type)
|
2019-08-07 11:00:29 +00:00
|
|
|
: is_bibtex_(true), bib_key_(key), num_bib_key_(0), entry_type_(type), info_(),
|
2010-01-11 16:11:55 +00:00
|
|
|
modifier_(0)
|
2009-04-09 12:55:47 +00:00
|
|
|
{}
|
|
|
|
|
|
|
|
|
2017-03-12 20:27:36 +00:00
|
|
|
|
2017-01-09 16:54:56 +00:00
|
|
|
docstring const BibTeXInfo::getAuthorOrEditorList(Buffer const * buf,
|
|
|
|
bool full, bool forceshort) const
|
|
|
|
{
|
|
|
|
docstring author = operator[]("author");
|
|
|
|
if (author.empty())
|
|
|
|
author = operator[]("editor");
|
|
|
|
|
|
|
|
return getAuthorList(buf, author, full, forceshort);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-03 17:53:14 +00:00
|
|
|
docstring const BibTeXInfo::getAuthorList(Buffer const * buf,
|
|
|
|
docstring const & author, bool const full, bool const forceshort,
|
2017-03-12 20:27:36 +00:00
|
|
|
bool const allnames, bool const beginning) const
|
2007-08-14 16:50:51 +00:00
|
|
|
{
|
2017-01-07 16:32:45 +00:00
|
|
|
// Maxnames treshold depend on engine
|
|
|
|
size_t maxnames = buf ?
|
|
|
|
buf->params().documentClass().max_citenames() : 2;
|
|
|
|
|
2008-11-19 03:52:22 +00:00
|
|
|
if (!is_bibtex_) {
|
2009-06-11 20:29:37 +00:00
|
|
|
docstring const opt = label();
|
2008-11-19 03:52:22 +00:00
|
|
|
if (opt.empty())
|
|
|
|
return docstring();
|
|
|
|
|
|
|
|
docstring authors;
|
2010-01-21 19:48:53 +00:00
|
|
|
docstring const remainder = trim(split(opt, authors, '('));
|
|
|
|
if (remainder.empty())
|
2011-12-03 22:15:11 +00:00
|
|
|
// in this case, we didn't find a "(",
|
2010-01-21 19:48:53 +00:00
|
|
|
// so we don't have author (year)
|
|
|
|
return docstring();
|
2019-04-19 07:33:31 +00:00
|
|
|
if (full) {
|
|
|
|
// Natbib syntax is "Jones et al.(1990)Jones, Baker, and Williams"
|
|
|
|
docstring const fullauthors = trim(rsplit(remainder, ')'));
|
|
|
|
if (!fullauthors.empty())
|
|
|
|
return fullauthors;
|
|
|
|
}
|
2008-11-19 03:52:22 +00:00
|
|
|
return authors;
|
|
|
|
}
|
|
|
|
|
2017-01-09 16:54:56 +00:00
|
|
|
if (author.empty())
|
|
|
|
return author;
|
2007-08-14 16:50:51 +00:00
|
|
|
|
2007-11-05 20:33:20 +00:00
|
|
|
// OK, we've got some names. Let's format them.
|
2017-03-19 10:44:22 +00:00
|
|
|
// Try to split the author list
|
|
|
|
vector<docstring> const authors = getAuthors(author);
|
2008-11-19 03:52:22 +00:00
|
|
|
|
2017-01-07 16:32:45 +00:00
|
|
|
docstring retval;
|
2012-03-01 00:41:30 +00:00
|
|
|
|
2017-01-07 16:32:45 +00:00
|
|
|
CiteEngineType const engine_type = buf ? buf->params().citeEngineType()
|
|
|
|
: ENGINE_TYPE_DEFAULT;
|
|
|
|
|
|
|
|
// These are defined in the styles
|
|
|
|
string const etal =
|
2018-01-27 13:26:36 +00:00
|
|
|
buf ? buf->params().documentClass().getCiteMacro(engine_type, "B_etal")
|
2017-01-07 16:32:45 +00:00
|
|
|
: " et al.";
|
|
|
|
string const namesep =
|
2018-01-27 13:26:36 +00:00
|
|
|
buf ? buf->params().documentClass().getCiteMacro(engine_type, "B_namesep")
|
2017-01-07 16:32:45 +00:00
|
|
|
: ", ";
|
|
|
|
string const lastnamesep =
|
2018-01-27 13:26:36 +00:00
|
|
|
buf ? buf->params().documentClass().getCiteMacro(engine_type, "B_lastnamesep")
|
2017-01-07 16:32:45 +00:00
|
|
|
: ", and ";
|
|
|
|
string const pairnamesep =
|
2018-01-27 13:26:36 +00:00
|
|
|
buf ? buf->params().documentClass().getCiteMacro(engine_type, "B_pairnamesep")
|
2017-01-07 16:32:45 +00:00
|
|
|
: " and ";
|
2017-01-09 16:54:56 +00:00
|
|
|
string firstnameform =
|
|
|
|
buf ? buf->params().documentClass().getCiteMacro(engine_type, "!firstnameform")
|
2017-03-19 13:03:48 +00:00
|
|
|
: "{%prefix%[[%prefix% ]]}%surname%{%suffix%[[, %suffix%]]}{%prename%[[, %prename%]]}";
|
2017-01-09 16:54:56 +00:00
|
|
|
if (!beginning)
|
|
|
|
firstnameform = buf ? buf->params().documentClass().getCiteMacro(engine_type, "!firstbynameform")
|
2017-03-19 13:03:48 +00:00
|
|
|
: "%prename% {%prefix%[[%prefix% ]]}%surname%{%suffix%[[, %suffix%]]}";
|
2017-01-09 16:54:56 +00:00
|
|
|
string othernameform = buf ? buf->params().documentClass().getCiteMacro(engine_type, "!othernameform")
|
2017-03-19 13:03:48 +00:00
|
|
|
: "{%prefix%[[%prefix% ]]}%surname%{%suffix%[[, %suffix%]]}{%prename%[[, %prename%]]}";
|
2017-01-09 16:54:56 +00:00
|
|
|
if (!beginning)
|
|
|
|
othernameform = buf ? buf->params().documentClass().getCiteMacro(engine_type, "!otherbynameform")
|
2017-03-19 13:03:48 +00:00
|
|
|
: "%prename% {%prefix%[[%prefix% ]]}%surname%{%suffix%[[, %suffix%]]}";
|
|
|
|
string citenameform = buf ? buf->params().documentClass().getCiteMacro(engine_type, "!citenameform")
|
|
|
|
: "{%prefix%[[%prefix% ]]}%surname%";
|
2017-01-07 16:32:45 +00:00
|
|
|
|
|
|
|
// Shorten the list (with et al.) if forceshort is set
|
2017-03-12 20:27:36 +00:00
|
|
|
// and the list can actually be shortened, else if maxcitenames
|
2017-01-07 16:32:45 +00:00
|
|
|
// is passed and full is not set.
|
|
|
|
bool shorten = forceshort && authors.size() > 1;
|
|
|
|
vector<docstring>::const_iterator it = authors.begin();
|
|
|
|
vector<docstring>::const_iterator en = authors.end();
|
|
|
|
for (size_t i = 0; it != en; ++it, ++i) {
|
|
|
|
if (i >= maxnames && !full) {
|
|
|
|
shorten = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (*it == "others") {
|
|
|
|
retval += buf ? buf->B_(etal) : from_ascii(etal);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i > 0 && i == authors.size() - 1) {
|
|
|
|
if (authors.size() == 2)
|
|
|
|
retval += buf ? buf->B_(pairnamesep) : from_ascii(pairnamesep);
|
|
|
|
else
|
|
|
|
retval += buf ? buf->B_(lastnamesep) : from_ascii(lastnamesep);
|
|
|
|
} else if (i > 0)
|
|
|
|
retval += buf ? buf->B_(namesep) : from_ascii(namesep);
|
2017-01-09 16:54:56 +00:00
|
|
|
if (allnames)
|
|
|
|
retval += (i == 0) ? constructName(*it, firstnameform)
|
|
|
|
: constructName(*it, othernameform);
|
|
|
|
else
|
2017-03-19 13:03:48 +00:00
|
|
|
retval += constructName(*it, citenameform);
|
2017-01-09 16:54:56 +00:00
|
|
|
}
|
|
|
|
if (shorten) {
|
|
|
|
if (allnames)
|
|
|
|
retval = constructName(authors[0], firstnameform) + (buf ? buf->B_(etal) : from_ascii(etal));
|
|
|
|
else
|
2017-03-19 13:03:48 +00:00
|
|
|
retval = constructName(authors[0], citenameform) + (buf ? buf->B_(etal) : from_ascii(etal));
|
2016-07-18 02:50:17 +00:00
|
|
|
}
|
2007-11-05 20:33:20 +00:00
|
|
|
|
2014-02-11 04:11:35 +00:00
|
|
|
return convertLaTeXCommands(retval);
|
2007-08-14 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
2007-11-05 20:33:20 +00:00
|
|
|
|
2007-08-20 16:30:02 +00:00
|
|
|
docstring const BibTeXInfo::getYear() const
|
2007-08-14 16:50:51 +00:00
|
|
|
{
|
2016-09-18 08:33:33 +00:00
|
|
|
if (is_bibtex_) {
|
|
|
|
// first try legacy year field
|
|
|
|
docstring year = operator[]("year");
|
|
|
|
if (!year.empty())
|
|
|
|
return year;
|
|
|
|
// now try biblatex's date field
|
|
|
|
year = operator[]("date");
|
|
|
|
// Format is [-]YYYY-MM-DD*/[-]YYYY-MM-DD*
|
|
|
|
// We only want the years.
|
|
|
|
static regex const yreg("[-]?([\\d]{4}).*");
|
2016-09-18 10:59:43 +00:00
|
|
|
static regex const ereg(".*/[-]?([\\d]{4}).*");
|
2016-09-18 08:33:33 +00:00
|
|
|
smatch sm;
|
|
|
|
string const date = to_utf8(year);
|
2017-03-10 09:32:38 +00:00
|
|
|
if (!regex_match(date, sm, yreg))
|
|
|
|
// cannot parse year.
|
|
|
|
return docstring();
|
2016-09-18 08:33:33 +00:00
|
|
|
year = from_ascii(sm[1]);
|
|
|
|
// check for an endyear
|
|
|
|
if (regex_match(date, sm, ereg))
|
|
|
|
year += char_type(0x2013) + from_ascii(sm[1]);
|
|
|
|
return year;
|
|
|
|
}
|
2008-11-19 03:52:22 +00:00
|
|
|
|
2009-06-11 20:29:37 +00:00
|
|
|
docstring const opt = label();
|
2009-04-04 21:04:26 +00:00
|
|
|
if (opt.empty())
|
|
|
|
return docstring();
|
2008-11-19 03:52:22 +00:00
|
|
|
|
2009-04-04 21:04:26 +00:00
|
|
|
docstring authors;
|
2010-01-21 19:48:53 +00:00
|
|
|
docstring tmp = split(opt, authors, '(');
|
2011-12-03 22:15:11 +00:00
|
|
|
if (tmp.empty())
|
2010-01-21 19:48:53 +00:00
|
|
|
// we don't have author (year)
|
|
|
|
return docstring();
|
2009-04-04 21:04:26 +00:00
|
|
|
docstring year;
|
2010-01-21 19:48:53 +00:00
|
|
|
tmp = split(tmp, year, ')');
|
2007-08-14 16:50:51 +00:00
|
|
|
return year;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-08-20 06:33:40 +00:00
|
|
|
void BibTeXInfo::getLocators(docstring & doi, docstring & url, docstring & file) const
|
|
|
|
{
|
|
|
|
if (is_bibtex_) {
|
|
|
|
// get "doi" entry from citation record
|
|
|
|
doi = operator[]("doi");
|
|
|
|
if (!doi.empty() && !prefixIs(doi,from_ascii("http")))
|
|
|
|
doi = "https://doi.org/" + doi;
|
|
|
|
// get "url" entry from citation record
|
|
|
|
url = operator[]("url");
|
|
|
|
// get "file" entry from citation record
|
|
|
|
file = operator[]("file");
|
|
|
|
|
|
|
|
// Jabref case, field has a format:
|
|
|
|
// Description:Location:Filetype;Description:Location:Filetype...
|
|
|
|
// We will grab only first pdf
|
|
|
|
if (!file.empty()) {
|
|
|
|
docstring ret, filedest, tmp;
|
|
|
|
ret = split(file, tmp, ':');
|
|
|
|
tmp = split(ret, filedest, ':');
|
|
|
|
//TODO howto deal with relative directories?
|
|
|
|
FileName f(to_utf8(filedest));
|
|
|
|
if (f.exists())
|
|
|
|
file = "file:///" + filedest;
|
|
|
|
}
|
|
|
|
|
|
|
|
// kbibtex case, format:
|
|
|
|
// file1.pdf;file2.pdf
|
|
|
|
// We will grab only first pdf
|
|
|
|
docstring kfile;
|
|
|
|
if (file.empty())
|
|
|
|
kfile = operator[]("localfile");
|
|
|
|
if (!kfile.empty()) {
|
|
|
|
docstring filedest, tmp;
|
|
|
|
tmp = split(kfile, filedest, ';');
|
|
|
|
//TODO howto deal with relative directories?
|
|
|
|
FileName f(to_utf8(filedest));
|
|
|
|
if (f.exists())
|
|
|
|
file = "file:///" + filedest;
|
|
|
|
}
|
|
|
|
|
2020-08-21 01:51:06 +00:00
|
|
|
if (!url.empty())
|
2020-08-20 06:33:40 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
// try biblatex specific fields, see its manual
|
|
|
|
// 3.13.7 "Electronic Publishing Informationl"
|
|
|
|
docstring eprinttype = operator[]("eprinttype");
|
|
|
|
docstring eprint = operator[]("eprint");
|
|
|
|
if (eprint.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (eprinttype == "arxiv")
|
|
|
|
url = "https://arxiv.org/abs/" + eprint;
|
|
|
|
if (eprinttype == "jstor")
|
|
|
|
url = "https://www.jstor.org/stable/" + eprint;
|
|
|
|
if (eprinttype == "pubmed")
|
|
|
|
url = "http://www.ncbi.nlm.nih.gov/pubmed/" + eprint;
|
|
|
|
if (eprinttype == "hdl")
|
|
|
|
url = "https://hdl.handle.net/" + eprint;
|
|
|
|
if (eprinttype == "googlebooks")
|
|
|
|
url = "http://books.google.com/books?id=" + eprint;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Here can be handled the bibliography environment. All one could do
|
|
|
|
// here is let LyX scan the entry for URL or HRef insets.
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-03-27 12:58:26 +00:00
|
|
|
namespace {
|
2012-10-27 13:45:27 +00:00
|
|
|
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring parseOptions(docstring const & format, string & optkey,
|
|
|
|
docstring & ifpart, docstring & elsepart);
|
2012-10-27 13:45:27 +00:00
|
|
|
|
|
|
|
// Calls parseOptions to deal with an embedded option, such as:
|
|
|
|
// {%number%[[, no.~%number%]]}
|
|
|
|
// which must appear at the start of format. ifelsepart gets the
|
|
|
|
// whole of the option, and we return what's left after the option.
|
|
|
|
// we return format if there is an error.
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring parseEmbeddedOption(docstring const & format, docstring & ifelsepart)
|
2012-10-27 13:45:27 +00:00
|
|
|
{
|
|
|
|
LASSERT(format[0] == '{' && format[1] == '%', return format);
|
|
|
|
string optkey;
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring ifpart;
|
|
|
|
docstring elsepart;
|
|
|
|
docstring const rest = parseOptions(format, optkey, ifpart, elsepart);
|
2012-10-27 13:45:27 +00:00
|
|
|
if (format == rest) { // parse error
|
|
|
|
LYXERR0("ERROR! Couldn't parse `" << format <<"'.");
|
|
|
|
return format;
|
2010-03-27 12:58:26 +00:00
|
|
|
}
|
2013-04-25 21:27:10 +00:00
|
|
|
LASSERT(rest.size() <= format.size(),
|
2013-07-20 14:05:52 +00:00
|
|
|
{ ifelsepart = docstring(); return format; });
|
2012-10-27 13:45:27 +00:00
|
|
|
ifelsepart = format.substr(0, format.size() - rest.size());
|
2013-04-25 21:27:10 +00:00
|
|
|
return rest;
|
2012-10-27 13:45:27 +00:00
|
|
|
}
|
2011-12-03 22:15:11 +00:00
|
|
|
|
|
|
|
|
2012-10-27 13:45:27 +00:00
|
|
|
// Gets a "clause" from a format string, where the clause is
|
|
|
|
// delimited by '[[' and ']]'. Returns what is left after the
|
|
|
|
// clause is removed, and returns format if there is an error.
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring getClause(docstring const & format, docstring & clause)
|
2012-10-27 13:45:27 +00:00
|
|
|
{
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring fmt = format;
|
2012-10-27 13:45:27 +00:00
|
|
|
// remove '[['
|
|
|
|
fmt = fmt.substr(2);
|
|
|
|
// we'll remove characters from the front of fmt as we
|
|
|
|
// deal with them
|
|
|
|
while (!fmt.empty()) {
|
|
|
|
if (fmt[0] == ']' && fmt.size() > 1 && fmt[1] == ']') {
|
|
|
|
// that's the end
|
|
|
|
fmt = fmt.substr(2);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// check for an embedded option
|
|
|
|
if (fmt[0] == '{' && fmt.size() > 1 && fmt[1] == '%') {
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring part;
|
|
|
|
docstring const rest = parseEmbeddedOption(fmt, part);
|
2012-10-27 13:45:27 +00:00
|
|
|
if (fmt == rest) {
|
|
|
|
LYXERR0("ERROR! Couldn't parse embedded option in `" << format <<"'.");
|
|
|
|
return format;
|
2010-03-27 12:58:26 +00:00
|
|
|
}
|
2012-10-27 13:45:27 +00:00
|
|
|
clause += part;
|
|
|
|
fmt = rest;
|
|
|
|
} else { // it's just a normal character
|
2010-03-27 12:58:26 +00:00
|
|
|
clause += fmt[0];
|
|
|
|
fmt = fmt.substr(1);
|
|
|
|
}
|
|
|
|
}
|
2012-10-27 13:45:27 +00:00
|
|
|
return fmt;
|
|
|
|
}
|
2010-03-27 12:58:26 +00:00
|
|
|
|
|
|
|
|
2012-10-27 13:45:27 +00:00
|
|
|
// parse an options string, which must appear at the start of the
|
|
|
|
// format parameter. puts the parsed bits in optkey, ifpart, and
|
|
|
|
// elsepart and returns what's left after the option is removed.
|
|
|
|
// if there's an error, it returns format itself.
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring parseOptions(docstring const & format, string & optkey,
|
|
|
|
docstring & ifpart, docstring & elsepart)
|
2012-10-27 13:45:27 +00:00
|
|
|
{
|
|
|
|
LASSERT(format[0] == '{' && format[1] == '%', return format);
|
|
|
|
// strip '{%'
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring fmt = format.substr(2);
|
2012-10-27 13:45:27 +00:00
|
|
|
size_t pos = fmt.find('%'); // end of key
|
|
|
|
if (pos == string::npos) {
|
|
|
|
LYXERR0("Error parsing `" << format <<"'. Can't find end of key.");
|
|
|
|
return format;
|
|
|
|
}
|
2013-07-20 14:05:52 +00:00
|
|
|
optkey = to_utf8(fmt.substr(0, pos));
|
2012-10-27 13:45:27 +00:00
|
|
|
fmt = fmt.substr(pos + 1);
|
|
|
|
// [[format]] should be next
|
|
|
|
if (fmt[0] != '[' || fmt[1] != '[') {
|
|
|
|
LYXERR0("Error parsing `" << format <<"'. Can't find '[[' after key.");
|
|
|
|
return format;
|
|
|
|
}
|
2010-03-27 12:58:26 +00:00
|
|
|
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring curfmt = fmt;
|
2012-10-27 13:45:27 +00:00
|
|
|
fmt = getClause(curfmt, ifpart);
|
|
|
|
if (fmt == curfmt) {
|
|
|
|
LYXERR0("Error parsing `" << format <<"'. Couldn't get if clause.");
|
|
|
|
return format;
|
|
|
|
}
|
2010-03-27 12:58:26 +00:00
|
|
|
|
2012-10-27 13:45:27 +00:00
|
|
|
if (fmt[0] == '}') // we're done, no else clause
|
|
|
|
return fmt.substr(1);
|
2011-12-03 22:15:11 +00:00
|
|
|
|
2012-10-27 13:45:27 +00:00
|
|
|
// else part should follow
|
|
|
|
if (fmt[0] != '[' || fmt[1] != '[') {
|
|
|
|
LYXERR0("Error parsing `" << format <<"'. Can't find else clause.");
|
|
|
|
return format;
|
|
|
|
}
|
2011-12-03 22:15:11 +00:00
|
|
|
|
2012-10-27 13:45:27 +00:00
|
|
|
curfmt = fmt;
|
|
|
|
fmt = getClause(curfmt, elsepart);
|
|
|
|
// we should be done
|
|
|
|
if (fmt == curfmt || fmt[0] != '}') {
|
|
|
|
LYXERR0("Error parsing `" << format <<"'. Can't find end of option.");
|
|
|
|
return format;
|
|
|
|
}
|
|
|
|
return fmt.substr(1);
|
2010-03-27 12:58:26 +00:00
|
|
|
}
|
|
|
|
|
2012-10-27 13:45:27 +00:00
|
|
|
|
2017-07-23 11:11:54 +00:00
|
|
|
} // namespace
|
2010-03-27 12:58:26 +00:00
|
|
|
|
2014-05-23 15:36:12 +00:00
|
|
|
/* FIXME
|
|
|
|
Bug #9131 revealed an oddity in how we are generating citation information
|
2017-07-03 17:53:14 +00:00
|
|
|
when more than one key is given. We end up building a longer and longer format
|
2014-05-23 15:36:12 +00:00
|
|
|
string as we go, which we then have to re-parse, over and over and over again,
|
|
|
|
rather than generating the information for the individual keys and then putting
|
|
|
|
all of that together. We do that to deal with the way separators work, from what
|
|
|
|
I can tell, but it still feels like a hack. Fixing this would require quite a
|
|
|
|
bit of work, however.
|
|
|
|
*/
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring BibTeXInfo::expandFormat(docstring const & format,
|
2019-09-13 15:49:29 +00:00
|
|
|
BibTeXInfoList const & xrefs, int & counter, Buffer const & buf,
|
2017-01-07 16:12:08 +00:00
|
|
|
CiteItem const & ci, bool next, bool second) const
|
2010-03-27 12:58:26 +00:00
|
|
|
{
|
2010-03-29 20:01:28 +00:00
|
|
|
// incorrect use of macros could put us in an infinite loop
|
2013-10-07 22:59:05 +00:00
|
|
|
static int const max_passes = 5000;
|
2014-02-11 04:11:35 +00:00
|
|
|
// the use of overly large keys can lead to performance problems, due
|
|
|
|
// to eventual attempts to convert LaTeX macros to unicode. See bug
|
2017-01-07 16:46:18 +00:00
|
|
|
// #8944. By default, the size is limited to 128 (in CiteItem), but
|
|
|
|
// for specific purposes (such as XHTML export), it needs to be enlarged
|
|
|
|
// This is perhaps not the best solution, but it will have to do for now.
|
|
|
|
size_t const max_keysize = ci.max_key_size;
|
2013-07-20 14:05:52 +00:00
|
|
|
odocstringstream ret; // return value
|
2010-03-27 22:51:38 +00:00
|
|
|
string key;
|
2010-03-27 12:58:26 +00:00
|
|
|
bool scanning_key = false;
|
2010-03-27 13:54:14 +00:00
|
|
|
bool scanning_rich = false;
|
2010-03-27 12:58:26 +00:00
|
|
|
|
2012-03-01 00:41:30 +00:00
|
|
|
CiteEngineType const engine_type = buf.params().citeEngineType();
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring fmt = format;
|
2011-12-03 22:15:11 +00:00
|
|
|
// we'll remove characters from the front of fmt as we
|
2010-03-27 12:58:26 +00:00
|
|
|
// deal with them
|
2012-10-21 19:14:16 +00:00
|
|
|
while (!fmt.empty()) {
|
2014-05-23 14:59:12 +00:00
|
|
|
if (counter > max_passes) {
|
2011-12-03 22:15:11 +00:00
|
|
|
LYXERR0("Recursion limit reached while parsing `"
|
2010-03-29 20:01:28 +00:00
|
|
|
<< format << "'.");
|
|
|
|
return _("ERROR!");
|
|
|
|
}
|
2011-12-03 22:15:11 +00:00
|
|
|
|
2010-03-27 12:58:26 +00:00
|
|
|
char_type thischar = fmt[0];
|
2011-12-03 22:15:11 +00:00
|
|
|
if (thischar == '%') {
|
2010-03-27 12:58:26 +00:00
|
|
|
// beginning or end of key
|
2011-12-03 22:15:11 +00:00
|
|
|
if (scanning_key) {
|
2010-03-27 12:58:26 +00:00
|
|
|
// end of key
|
|
|
|
scanning_key = false;
|
|
|
|
// so we replace the key with its value, which may be empty
|
2010-03-29 20:01:28 +00:00
|
|
|
if (key[0] == '!') {
|
|
|
|
// macro
|
2011-12-03 22:15:11 +00:00
|
|
|
string const val =
|
2012-03-01 00:41:30 +00:00
|
|
|
buf.params().documentClass().getCiteMacro(engine_type, key);
|
2013-07-20 14:05:52 +00:00
|
|
|
fmt = from_utf8(val) + fmt.substr(1);
|
2014-05-23 14:59:12 +00:00
|
|
|
counter += 1;
|
2010-03-29 20:01:28 +00:00
|
|
|
continue;
|
2018-01-27 13:26:36 +00:00
|
|
|
} else if (prefixIs(key, "B_")) {
|
|
|
|
// a translatable bit (to the Buffer language)
|
2011-12-03 22:15:11 +00:00
|
|
|
string const val =
|
2012-03-01 00:41:30 +00:00
|
|
|
buf.params().documentClass().getCiteMacro(engine_type, key);
|
2011-12-03 22:15:11 +00:00
|
|
|
docstring const trans =
|
2013-01-14 14:25:26 +00:00
|
|
|
translateIfPossible(from_utf8(val), buf.params().language->code());
|
2013-07-20 14:05:52 +00:00
|
|
|
ret << trans;
|
2018-01-27 13:26:36 +00:00
|
|
|
} else if (key[0] == '_') {
|
|
|
|
// a translatable bit (to the GUI language)
|
|
|
|
string const val =
|
|
|
|
buf.params().documentClass().getCiteMacro(engine_type, key);
|
|
|
|
docstring const trans =
|
|
|
|
translateIfPossible(from_utf8(val));
|
|
|
|
ret << trans;
|
2010-03-29 20:01:28 +00:00
|
|
|
} else {
|
2013-01-07 14:51:19 +00:00
|
|
|
docstring const val =
|
2017-01-07 16:12:08 +00:00
|
|
|
getValueForKey(key, buf, ci, xrefs, max_keysize);
|
2013-01-10 14:24:53 +00:00
|
|
|
if (!scanning_rich)
|
2013-07-20 14:05:52 +00:00
|
|
|
ret << from_ascii("{!<span class=\"bib-" + key + "\">!}");
|
|
|
|
ret << val;
|
2013-01-10 14:24:53 +00:00
|
|
|
if (!scanning_rich)
|
2013-07-20 14:05:52 +00:00
|
|
|
ret << from_ascii("{!</span>!}");
|
2010-03-29 20:01:28 +00:00
|
|
|
}
|
2010-03-27 12:58:26 +00:00
|
|
|
} else {
|
|
|
|
// beginning of key
|
2010-03-29 20:01:28 +00:00
|
|
|
key.clear();
|
2010-03-27 12:58:26 +00:00
|
|
|
scanning_key = true;
|
|
|
|
}
|
2010-03-29 20:01:28 +00:00
|
|
|
}
|
2011-12-03 22:15:11 +00:00
|
|
|
else if (thischar == '{') {
|
2010-03-27 12:58:26 +00:00
|
|
|
// beginning of option?
|
|
|
|
if (scanning_key) {
|
|
|
|
LYXERR0("ERROR: Found `{' when scanning key in `" << format << "'.");
|
|
|
|
return _("ERROR!");
|
|
|
|
}
|
2010-03-27 13:54:14 +00:00
|
|
|
if (fmt.size() > 1) {
|
|
|
|
if (fmt[1] == '%') {
|
|
|
|
// it is the beginning of an optional format
|
2010-03-27 22:51:38 +00:00
|
|
|
string optkey;
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring ifpart;
|
|
|
|
docstring elsepart;
|
|
|
|
docstring const newfmt =
|
2010-03-27 12:58:26 +00:00
|
|
|
parseOptions(fmt, optkey, ifpart, elsepart);
|
2010-03-27 13:54:14 +00:00
|
|
|
if (newfmt == fmt) // parse error
|
|
|
|
return _("ERROR!");
|
|
|
|
fmt = newfmt;
|
2013-01-07 14:51:19 +00:00
|
|
|
docstring const val =
|
2017-01-07 16:12:08 +00:00
|
|
|
getValueForKey(optkey, buf, ci, xrefs);
|
2012-03-01 00:41:30 +00:00
|
|
|
if (optkey == "next" && next)
|
2013-07-20 14:05:52 +00:00
|
|
|
ret << ifpart; // without expansion
|
2017-01-07 16:41:59 +00:00
|
|
|
else if (optkey == "second" && second) {
|
|
|
|
int newcounter = 0;
|
|
|
|
ret << expandFormat(ifpart, xrefs, newcounter, buf,
|
|
|
|
ci, next);
|
|
|
|
} else if (!val.empty()) {
|
2014-05-23 14:59:12 +00:00
|
|
|
int newcounter = 0;
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
ret << expandFormat(ifpart, xrefs, newcounter, buf,
|
2017-01-07 16:12:08 +00:00
|
|
|
ci, next);
|
2014-05-23 14:59:12 +00:00
|
|
|
} else if (!elsepart.empty()) {
|
|
|
|
int newcounter = 0;
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
ret << expandFormat(elsepart, xrefs, newcounter, buf,
|
2017-01-07 16:12:08 +00:00
|
|
|
ci, next);
|
2014-05-23 14:59:12 +00:00
|
|
|
}
|
2010-03-27 13:54:14 +00:00
|
|
|
// fmt will have been shortened for us already
|
2011-12-03 22:15:11 +00:00
|
|
|
continue;
|
2010-03-27 13:54:14 +00:00
|
|
|
}
|
|
|
|
if (fmt[1] == '!') {
|
|
|
|
// beginning of rich text
|
|
|
|
scanning_rich = true;
|
|
|
|
fmt = fmt.substr(2);
|
2013-07-20 14:05:52 +00:00
|
|
|
ret << from_ascii("{!");
|
2010-03-27 13:54:14 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2011-12-03 22:15:11 +00:00
|
|
|
// we are here if '{' was not followed by % or !.
|
2010-03-29 20:01:28 +00:00
|
|
|
// So it's just a character.
|
2013-07-20 14:05:52 +00:00
|
|
|
ret << thischar;
|
2010-03-27 13:54:14 +00:00
|
|
|
}
|
2011-12-03 22:15:11 +00:00
|
|
|
else if (scanning_rich && thischar == '!'
|
2010-03-27 13:54:14 +00:00
|
|
|
&& fmt.size() > 1 && fmt[1] == '}') {
|
|
|
|
// end of rich text
|
|
|
|
scanning_rich = false;
|
|
|
|
fmt = fmt.substr(2);
|
2013-07-20 14:05:52 +00:00
|
|
|
ret << from_ascii("!}");
|
2010-03-27 13:54:14 +00:00
|
|
|
continue;
|
|
|
|
}
|
2010-03-27 12:58:26 +00:00
|
|
|
else if (scanning_key)
|
2010-03-28 22:22:24 +00:00
|
|
|
key += char(thischar);
|
2013-07-20 14:05:52 +00:00
|
|
|
else {
|
|
|
|
try {
|
|
|
|
ret.put(thischar);
|
|
|
|
} catch (EncodingException & /* e */) {
|
|
|
|
LYXERR0("Uncodable character '" << docstring(1, thischar) << " in citation label!");
|
|
|
|
}
|
|
|
|
}
|
2010-03-27 12:58:26 +00:00
|
|
|
fmt = fmt.substr(1);
|
|
|
|
} // for loop
|
|
|
|
if (scanning_key) {
|
|
|
|
LYXERR0("Never found end of key in `" << format << "'!");
|
|
|
|
return _("ERROR!");
|
|
|
|
}
|
2010-03-27 13:54:14 +00:00
|
|
|
if (scanning_rich) {
|
|
|
|
LYXERR0("Never found end of rich text in `" << format << "'!");
|
|
|
|
return _("ERROR!");
|
|
|
|
}
|
2013-07-20 14:05:52 +00:00
|
|
|
return ret.str();
|
2010-03-27 12:58:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-13 15:49:29 +00:00
|
|
|
docstring const & BibTeXInfo::getInfo(BibTeXInfoList const & xrefs,
|
2020-08-27 06:45:41 +00:00
|
|
|
Buffer const & buf, CiteItem const & ci, docstring const & format_in) const
|
2007-08-14 16:50:51 +00:00
|
|
|
{
|
2017-01-07 16:12:08 +00:00
|
|
|
bool const richtext = ci.richtext;
|
|
|
|
|
2020-08-27 09:42:03 +00:00
|
|
|
if (!richtext && !info_.empty()) {
|
|
|
|
info_ = convertLaTeXCommands(processRichtext(info_, false));
|
2009-01-16 23:40:37 +00:00
|
|
|
return info_;
|
2020-08-27 09:42:03 +00:00
|
|
|
}
|
2013-01-08 15:20:52 +00:00
|
|
|
if (richtext && !info_richtext_.empty())
|
|
|
|
return info_richtext_;
|
2009-01-16 23:40:37 +00:00
|
|
|
|
2008-02-14 17:00:40 +00:00
|
|
|
if (!is_bibtex_) {
|
2007-08-20 16:30:02 +00:00
|
|
|
BibTeXInfo::const_iterator it = find(from_ascii("ref"));
|
2009-01-16 23:40:37 +00:00
|
|
|
info_ = it->second;
|
|
|
|
return info_;
|
2007-08-14 16:50:51 +00:00
|
|
|
}
|
2007-11-05 20:33:20 +00:00
|
|
|
|
2012-03-01 00:41:30 +00:00
|
|
|
CiteEngineType const engine_type = buf.params().citeEngineType();
|
2010-03-29 18:37:25 +00:00
|
|
|
DocumentClass const & dc = buf.params().documentClass();
|
2020-08-27 06:45:41 +00:00
|
|
|
docstring const & format = format_in.empty()?
|
|
|
|
from_utf8(dc.getCiteFormat(engine_type, to_utf8(entry_type_)))
|
|
|
|
: format_in;
|
2010-03-29 20:52:07 +00:00
|
|
|
int counter = 0;
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
info_ = expandFormat(format, xrefs, counter, buf,
|
2017-01-07 16:12:08 +00:00
|
|
|
ci, false, false);
|
2007-11-05 20:33:20 +00:00
|
|
|
|
2014-05-23 14:48:22 +00:00
|
|
|
if (info_.empty()) {
|
|
|
|
// this probably shouldn't happen
|
|
|
|
return info_;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (richtext) {
|
2013-01-08 15:20:52 +00:00
|
|
|
info_richtext_ = convertLaTeXCommands(processRichtext(info_, true));
|
2014-05-23 14:48:22 +00:00
|
|
|
return info_richtext_;
|
2013-01-08 15:20:52 +00:00
|
|
|
}
|
2014-05-23 14:48:22 +00:00
|
|
|
|
|
|
|
info_ = convertLaTeXCommands(processRichtext(info_, false));
|
2010-03-27 12:58:26 +00:00
|
|
|
return info_;
|
2007-08-14 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
docstring const BibTeXInfo::getLabel(BibTeXInfoList const xrefs,
|
2017-01-07 16:12:08 +00:00
|
|
|
Buffer const & buf, docstring const & format,
|
|
|
|
CiteItem const & ci, bool next, bool second) const
|
2012-03-01 00:41:30 +00:00
|
|
|
{
|
2012-05-11 13:13:07 +00:00
|
|
|
docstring loclabel;
|
2012-03-01 00:41:30 +00:00
|
|
|
|
|
|
|
int counter = 0;
|
2017-01-07 16:12:08 +00:00
|
|
|
loclabel = expandFormat(format, xrefs, counter, buf, ci, next, second);
|
2012-03-01 00:41:30 +00:00
|
|
|
|
2013-01-08 14:35:50 +00:00
|
|
|
if (!loclabel.empty() && !next) {
|
2017-01-07 16:12:08 +00:00
|
|
|
loclabel = processRichtext(loclabel, ci.richtext);
|
2012-05-11 13:13:07 +00:00
|
|
|
loclabel = convertLaTeXCommands(loclabel);
|
2013-01-08 14:35:50 +00:00
|
|
|
}
|
2013-07-20 14:05:52 +00:00
|
|
|
|
2012-05-11 13:13:07 +00:00
|
|
|
return loclabel;
|
2012-03-01 00:41:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-04-09 12:55:47 +00:00
|
|
|
docstring const & BibTeXInfo::operator[](docstring const & field) const
|
|
|
|
{
|
|
|
|
BibTeXInfo::const_iterator it = find(field);
|
|
|
|
if (it != end())
|
|
|
|
return it->second;
|
|
|
|
static docstring const empty_value = docstring();
|
|
|
|
return empty_value;
|
|
|
|
}
|
2011-12-03 22:15:11 +00:00
|
|
|
|
|
|
|
|
2009-04-09 12:55:47 +00:00
|
|
|
docstring const & BibTeXInfo::operator[](string const & field) const
|
|
|
|
{
|
|
|
|
return operator[](from_ascii(field));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-27 19:46:32 +00:00
|
|
|
docstring BibTeXInfo::getValueForKey(string const & oldkey, Buffer const & buf,
|
2019-09-13 15:49:29 +00:00
|
|
|
CiteItem const & ci, BibTeXInfoList const & xrefs, size_t maxsize) const
|
2009-04-09 12:55:47 +00:00
|
|
|
{
|
2014-02-11 04:11:35 +00:00
|
|
|
// anything less is pointless
|
|
|
|
LASSERT(maxsize >= 16, maxsize = 16);
|
2013-03-27 19:46:32 +00:00
|
|
|
string key = oldkey;
|
|
|
|
bool cleanit = false;
|
|
|
|
if (prefixIs(oldkey, "clean:")) {
|
|
|
|
key = oldkey.substr(6);
|
|
|
|
cleanit = true;
|
|
|
|
}
|
2013-05-13 18:58:12 +00:00
|
|
|
|
2012-03-01 00:41:30 +00:00
|
|
|
docstring ret = operator[](key);
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
if (ret.empty() && !xrefs.empty()) {
|
2020-02-28 06:06:22 +00:00
|
|
|
// xr is a (reference to a) BibTeXInfo const *
|
|
|
|
for (auto const & xr : xrefs) {
|
|
|
|
if (xr && !(*xr)[key].empty()) {
|
|
|
|
ret = (*xr)[key];
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-03-27 19:46:32 +00:00
|
|
|
if (ret.empty()) {
|
|
|
|
// some special keys
|
|
|
|
// FIXME: dialog, textbefore and textafter have nothing to do with this
|
2017-01-07 16:12:08 +00:00
|
|
|
if (key == "dialog" && ci.context == CiteItem::Dialog)
|
|
|
|
ret = from_ascii("x"); // any non-empty string will do
|
2017-01-07 16:41:59 +00:00
|
|
|
else if (key == "export" && ci.context == CiteItem::Export)
|
|
|
|
ret = from_ascii("x"); // any non-empty string will do
|
2017-01-07 16:32:45 +00:00
|
|
|
else if (key == "ifstar" && ci.Starred)
|
|
|
|
ret = from_ascii("x"); // any non-empty string will do
|
Support for "qualified citation lists"
These are biblatex-specific multicite commands that allow for multiple
pre- and postnotes, as in:
\cites(pre)(post)[pre1][post1]{key1}[pre2][post2]{key2}...
with an optional general pre- and postnote, which applies to the whole
list (like [][] in normal cite commands) and an optional pre- and
postnotes for each item, so that pagination can actually be specified in
multi-cite references, as in:
(cf. Miller 2015, 2; furthermore Smith 2013, 23-23; Jenkins 2012, 103,
also refer to chapter 6 in this book)
See the biblatex manual, sec. 3.8.3., for details.
File format change.
2017-01-21 13:25:17 +00:00
|
|
|
else if (key == "ifqualified" && ci.isQualified)
|
|
|
|
ret = from_ascii("x"); // any non-empty string will do
|
2013-03-27 19:46:32 +00:00
|
|
|
else if (key == "entrytype")
|
|
|
|
ret = entry_type_;
|
2017-01-07 16:41:59 +00:00
|
|
|
else if (prefixIs(key, "ifentrytype:")
|
|
|
|
&& from_ascii(key.substr(12)) == entry_type_)
|
|
|
|
ret = from_ascii("x"); // any non-empty string will do
|
2013-03-27 19:46:32 +00:00
|
|
|
else if (key == "key")
|
|
|
|
ret = bib_key_;
|
|
|
|
else if (key == "label")
|
|
|
|
ret = label_;
|
2013-05-13 18:58:12 +00:00
|
|
|
else if (key == "modifier" && modifier_ != 0)
|
|
|
|
ret = modifier_;
|
2013-05-14 19:50:13 +00:00
|
|
|
else if (key == "numericallabel")
|
|
|
|
ret = cite_number_;
|
2017-01-09 16:54:56 +00:00
|
|
|
else if (prefixIs(key, "ifmultiple:")) {
|
|
|
|
// Return whether we have multiple authors
|
|
|
|
docstring const kind = operator[](from_ascii(key.substr(11)));
|
|
|
|
if (multipleAuthors(kind))
|
|
|
|
ret = from_ascii("x"); // any non-empty string will do
|
|
|
|
}
|
|
|
|
else if (prefixIs(key, "abbrvnames:")) {
|
|
|
|
// Special key to provide abbreviated name list,
|
|
|
|
// with respect to maxcitenames. Suitable for Bibliography
|
|
|
|
// beginnings.
|
|
|
|
docstring const kind = operator[](from_ascii(key.substr(11)));
|
|
|
|
ret = getAuthorList(&buf, kind, false, false, true);
|
|
|
|
if (ci.forceUpperCase && isLowerCase(ret[0]))
|
|
|
|
ret[0] = uppercase(ret[0]);
|
|
|
|
} else if (prefixIs(key, "fullnames:")) {
|
|
|
|
// Return a full name list. Suitable for Bibliography
|
|
|
|
// beginnings.
|
|
|
|
docstring const kind = operator[](from_ascii(key.substr(10)));
|
|
|
|
ret = getAuthorList(&buf, kind, true, false, true);
|
|
|
|
if (ci.forceUpperCase && isLowerCase(ret[0]))
|
|
|
|
ret[0] = uppercase(ret[0]);
|
|
|
|
} else if (prefixIs(key, "forceabbrvnames:")) {
|
|
|
|
// Special key to provide abbreviated name lists,
|
|
|
|
// irrespective of maxcitenames. Suitable for Bibliography
|
|
|
|
// beginnings.
|
|
|
|
docstring const kind = operator[](from_ascii(key.substr(15)));
|
|
|
|
ret = getAuthorList(&buf, kind, false, true, true);
|
|
|
|
if (ci.forceUpperCase && isLowerCase(ret[0]))
|
|
|
|
ret[0] = uppercase(ret[0]);
|
|
|
|
} else if (prefixIs(key, "abbrvbynames:")) {
|
|
|
|
// Special key to provide abbreviated name list,
|
|
|
|
// with respect to maxcitenames. Suitable for further names inside a
|
|
|
|
// bibliography item // (such as "ed. by ...")
|
|
|
|
docstring const kind = operator[](from_ascii(key.substr(11)));
|
|
|
|
ret = getAuthorList(&buf, kind, false, false, true, false);
|
|
|
|
if (ci.forceUpperCase && isLowerCase(ret[0]))
|
|
|
|
ret[0] = uppercase(ret[0]);
|
|
|
|
} else if (prefixIs(key, "fullbynames:")) {
|
|
|
|
// Return a full name list. Suitable for further names inside a
|
|
|
|
// bibliography item // (such as "ed. by ...")
|
|
|
|
docstring const kind = operator[](from_ascii(key.substr(10)));
|
|
|
|
ret = getAuthorList(&buf, kind, true, false, true, false);
|
|
|
|
if (ci.forceUpperCase && isLowerCase(ret[0]))
|
|
|
|
ret[0] = uppercase(ret[0]);
|
|
|
|
} else if (prefixIs(key, "forceabbrvbynames:")) {
|
|
|
|
// Special key to provide abbreviated name lists,
|
|
|
|
// irrespective of maxcitenames. Suitable for further names inside a
|
|
|
|
// bibliography item // (such as "ed. by ...")
|
|
|
|
docstring const kind = operator[](from_ascii(key.substr(15)));
|
|
|
|
ret = getAuthorList(&buf, kind, false, true, true, false);
|
|
|
|
if (ci.forceUpperCase && isLowerCase(ret[0]))
|
|
|
|
ret[0] = uppercase(ret[0]);
|
|
|
|
} else if (key == "abbrvciteauthor") {
|
|
|
|
// Special key to provide abbreviated author or
|
|
|
|
// editor names (suitable for citation labels),
|
2017-01-07 16:32:45 +00:00
|
|
|
// with respect to maxcitenames.
|
2017-01-09 16:54:56 +00:00
|
|
|
ret = getAuthorOrEditorList(&buf, false, false);
|
2017-01-07 16:32:45 +00:00
|
|
|
if (ci.forceUpperCase && isLowerCase(ret[0]))
|
|
|
|
ret[0] = uppercase(ret[0]);
|
2017-01-09 16:54:56 +00:00
|
|
|
} else if (key == "fullciteauthor") {
|
|
|
|
// Return a full author or editor list (for citation labels)
|
|
|
|
ret = getAuthorOrEditorList(&buf, true, false);
|
2017-01-07 16:32:45 +00:00
|
|
|
if (ci.forceUpperCase && isLowerCase(ret[0]))
|
|
|
|
ret[0] = uppercase(ret[0]);
|
2017-01-09 16:54:56 +00:00
|
|
|
} else if (key == "forceabbrvciteauthor") {
|
|
|
|
// Special key to provide abbreviated author or
|
|
|
|
// editor names (suitable for citation labels),
|
2017-01-07 16:32:45 +00:00
|
|
|
// irrespective of maxcitenames.
|
2017-01-09 16:54:56 +00:00
|
|
|
ret = getAuthorOrEditorList(&buf, false, true);
|
2017-01-07 16:21:41 +00:00
|
|
|
if (ci.forceUpperCase && isLowerCase(ret[0]))
|
|
|
|
ret[0] = uppercase(ret[0]);
|
2013-03-27 19:46:32 +00:00
|
|
|
} else if (key == "bibentry") {
|
|
|
|
// Special key to provide the full bibliography entry: see getInfo()
|
|
|
|
CiteEngineType const engine_type = buf.params().citeEngineType();
|
|
|
|
DocumentClass const & dc = buf.params().documentClass();
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring const & format =
|
2017-01-07 16:55:38 +00:00
|
|
|
from_utf8(dc.getCiteFormat(engine_type, to_utf8(entry_type_), false));
|
2013-03-27 19:46:32 +00:00
|
|
|
int counter = 0;
|
2017-01-07 16:12:08 +00:00
|
|
|
ret = expandFormat(format, xrefs, counter, buf, ci, false, false);
|
2013-03-27 19:46:32 +00:00
|
|
|
} else if (key == "textbefore")
|
2017-01-07 16:12:08 +00:00
|
|
|
ret = ci.textBefore;
|
2013-03-27 19:46:32 +00:00
|
|
|
else if (key == "textafter")
|
2017-01-07 16:12:08 +00:00
|
|
|
ret = ci.textAfter;
|
2019-08-07 11:00:29 +00:00
|
|
|
else if (key == "curpretext") {
|
|
|
|
vector<pair<docstring, docstring>> pres = ci.getPretexts();
|
|
|
|
vector<pair<docstring, docstring>>::iterator it = pres.begin();
|
|
|
|
int numkey = 1;
|
|
|
|
for (; it != pres.end() ; ++it) {
|
|
|
|
if ((*it).first == bib_key_ && numkey == num_bib_key_) {
|
|
|
|
ret = (*it).second;
|
|
|
|
pres.erase(it);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if ((*it).first == bib_key_)
|
|
|
|
++numkey;
|
|
|
|
}
|
|
|
|
} else if (key == "curposttext") {
|
|
|
|
vector<pair<docstring, docstring>> posts = ci.getPosttexts();
|
|
|
|
vector<pair<docstring, docstring>>::iterator it = posts.begin();
|
|
|
|
int numkey = 1;
|
|
|
|
for (; it != posts.end() ; ++it) {
|
|
|
|
if ((*it).first == bib_key_ && numkey == num_bib_key_) {
|
|
|
|
ret = (*it).second;
|
|
|
|
posts.erase(it);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if ((*it).first == bib_key_)
|
|
|
|
++numkey;
|
|
|
|
}
|
|
|
|
} else if (key == "year")
|
2013-03-27 19:46:32 +00:00
|
|
|
ret = getYear();
|
|
|
|
}
|
2014-02-11 04:11:35 +00:00
|
|
|
|
2013-03-27 19:46:32 +00:00
|
|
|
if (cleanit)
|
2019-05-09 23:35:40 +00:00
|
|
|
ret = xml::cleanAttr(ret);
|
2013-05-13 18:58:12 +00:00
|
|
|
|
2014-02-11 04:11:35 +00:00
|
|
|
// make sure it is not too big
|
2015-10-04 18:38:47 +00:00
|
|
|
support::truncateWithEllipsis(ret, maxsize);
|
2012-03-01 00:41:30 +00:00
|
|
|
return ret;
|
2009-04-09 12:55:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-05 20:33:20 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////
|
2007-08-20 16:30:02 +00:00
|
|
|
//
|
|
|
|
// BiblioInfo
|
|
|
|
//
|
2007-11-05 20:33:20 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////
|
2007-08-20 16:30:02 +00:00
|
|
|
|
2007-08-14 16:50:51 +00:00
|
|
|
namespace {
|
2012-10-27 13:45:27 +00:00
|
|
|
|
2007-12-12 19:28:07 +00:00
|
|
|
// A functor for use with sort, leading to case insensitive sorting
|
2020-05-04 23:41:18 +00:00
|
|
|
bool compareNoCase(const docstring & a, const docstring & b) {
|
|
|
|
return compare_no_case(a, b) < 0;
|
|
|
|
}
|
2012-10-27 13:45:27 +00:00
|
|
|
|
2017-07-23 11:11:54 +00:00
|
|
|
} // namespace
|
2007-08-20 16:30:02 +00:00
|
|
|
|
2007-08-14 16:50:51 +00:00
|
|
|
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
vector<docstring> const BiblioInfo::getXRefs(BibTeXInfo const & data, bool const nested) const
|
|
|
|
{
|
|
|
|
vector<docstring> result;
|
|
|
|
if (!data.isBibTeX())
|
|
|
|
return result;
|
|
|
|
// Legacy crossref field. This is not nestable.
|
2016-09-20 09:34:17 +00:00
|
|
|
if (!nested && !data["crossref"].empty()) {
|
|
|
|
docstring const xrefkey = data["crossref"];
|
|
|
|
result.push_back(xrefkey);
|
|
|
|
// However, check for nested xdatas
|
|
|
|
BiblioInfo::const_iterator it = find(xrefkey);
|
|
|
|
if (it != end()) {
|
|
|
|
BibTeXInfo const & xref = it->second;
|
|
|
|
vector<docstring> const nxdata = getXRefs(xref, true);
|
|
|
|
if (!nxdata.empty())
|
|
|
|
result.insert(result.end(), nxdata.begin(), nxdata.end());
|
|
|
|
}
|
|
|
|
}
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
// Biblatex's xdata field. Infinitely nestable.
|
2016-09-19 17:09:42 +00:00
|
|
|
// XData field can consist of a comma-separated list of keys
|
|
|
|
vector<docstring> const xdatakeys = getVectorFromString(data["xdata"]);
|
|
|
|
if (!xdatakeys.empty()) {
|
2020-02-28 06:06:22 +00:00
|
|
|
for (auto const & xdatakey : xdatakeys) {
|
2016-09-19 17:09:42 +00:00
|
|
|
result.push_back(xdatakey);
|
|
|
|
BiblioInfo::const_iterator it = find(xdatakey);
|
|
|
|
if (it != end()) {
|
|
|
|
BibTeXInfo const & xdata = it->second;
|
|
|
|
vector<docstring> const nxdata = getXRefs(xdata, true);
|
|
|
|
if (!nxdata.empty())
|
|
|
|
result.insert(result.end(), nxdata.begin(), nxdata.end());
|
|
|
|
}
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-20 16:30:02 +00:00
|
|
|
vector<docstring> const BiblioInfo::getKeys() const
|
2007-08-14 16:50:51 +00:00
|
|
|
{
|
2007-08-20 16:30:02 +00:00
|
|
|
vector<docstring> bibkeys;
|
2020-02-28 06:06:22 +00:00
|
|
|
for (auto const & bi : *this)
|
|
|
|
bibkeys.push_back(bi.first);
|
2020-05-04 23:41:18 +00:00
|
|
|
sort(bibkeys.begin(), bibkeys.end(), &compareNoCase);
|
2007-08-20 16:30:02 +00:00
|
|
|
return bibkeys;
|
2007-08-14 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-20 16:30:02 +00:00
|
|
|
vector<docstring> const BiblioInfo::getFields() const
|
2007-08-14 16:50:51 +00:00
|
|
|
{
|
2007-08-20 16:30:02 +00:00
|
|
|
vector<docstring> bibfields;
|
2020-02-28 06:06:22 +00:00
|
|
|
for (auto const & fn : field_names_)
|
|
|
|
bibfields.push_back(fn);
|
2007-12-12 19:28:07 +00:00
|
|
|
sort(bibfields.begin(), bibfields.end());
|
2007-08-20 16:30:02 +00:00
|
|
|
return bibfields;
|
|
|
|
}
|
2007-08-14 16:50:51 +00:00
|
|
|
|
2007-08-20 16:30:02 +00:00
|
|
|
|
|
|
|
vector<docstring> const BiblioInfo::getEntries() const
|
|
|
|
{
|
|
|
|
vector<docstring> bibentries;
|
2020-02-28 06:06:22 +00:00
|
|
|
for (auto const & et : entry_types_)
|
|
|
|
bibentries.push_back(et);
|
2007-12-12 19:28:07 +00:00
|
|
|
sort(bibentries.begin(), bibentries.end());
|
2007-08-20 16:30:02 +00:00
|
|
|
return bibentries;
|
|
|
|
}
|
2007-08-14 16:50:51 +00:00
|
|
|
|
|
|
|
|
2017-01-09 16:54:56 +00:00
|
|
|
docstring const BiblioInfo::getAuthorOrEditorList(docstring const & key, Buffer const & buf) const
|
2007-08-14 16:50:51 +00:00
|
|
|
{
|
2007-08-20 16:30:02 +00:00
|
|
|
BiblioInfo::const_iterator it = find(key);
|
|
|
|
if (it == end())
|
|
|
|
return docstring();
|
|
|
|
BibTeXInfo const & data = it->second;
|
2017-01-09 16:54:56 +00:00
|
|
|
return data.getAuthorOrEditorList(&buf, false);
|
2007-08-20 16:30:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-11 16:11:55 +00:00
|
|
|
docstring const BiblioInfo::getCiteNumber(docstring const & key) const
|
|
|
|
{
|
|
|
|
BiblioInfo::const_iterator it = find(key);
|
|
|
|
if (it == end())
|
|
|
|
return docstring();
|
|
|
|
BibTeXInfo const & data = it->second;
|
|
|
|
return data.citeNumber();
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:33:40 +00:00
|
|
|
void BiblioInfo::getLocators(docstring const & key, docstring & doi, docstring & url, docstring & file) const
|
|
|
|
{
|
|
|
|
BiblioInfo::const_iterator it = find(key);
|
|
|
|
if (it == end())
|
2020-08-21 01:51:06 +00:00
|
|
|
return;
|
2020-08-20 06:33:40 +00:00
|
|
|
BibTeXInfo const & data = it->second;
|
|
|
|
data.getLocators(doi,url,file);
|
|
|
|
}
|
|
|
|
|
2010-01-11 16:11:55 +00:00
|
|
|
|
2013-01-14 14:25:26 +00:00
|
|
|
docstring const BiblioInfo::getYear(docstring const & key, bool use_modifier) const
|
2007-08-20 16:30:02 +00:00
|
|
|
{
|
|
|
|
BiblioInfo::const_iterator it = find(key);
|
|
|
|
if (it == end())
|
|
|
|
return docstring();
|
|
|
|
BibTeXInfo const & data = it->second;
|
2009-01-17 00:16:31 +00:00
|
|
|
docstring year = data.getYear();
|
2010-01-11 16:11:55 +00:00
|
|
|
if (year.empty()) {
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
// let's try the crossrefs
|
|
|
|
vector<docstring> const xrefs = getXRefs(data);
|
|
|
|
if (xrefs.empty())
|
2013-01-07 14:51:19 +00:00
|
|
|
// no luck
|
2013-01-14 14:25:26 +00:00
|
|
|
return docstring();
|
2017-12-14 14:41:54 +00:00
|
|
|
for (docstring const & xref : xrefs) {
|
|
|
|
BiblioInfo::const_iterator const xrefit = find(xref);
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
if (xrefit == end())
|
|
|
|
continue;
|
|
|
|
BibTeXInfo const & xref_data = xrefit->second;
|
|
|
|
year = xref_data.getYear();
|
|
|
|
if (!year.empty())
|
|
|
|
// success!
|
|
|
|
break;
|
|
|
|
}
|
2010-01-11 16:11:55 +00:00
|
|
|
}
|
|
|
|
if (use_modifier && data.modifier() != 0)
|
|
|
|
year += data.modifier();
|
|
|
|
return year;
|
2007-08-14 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-14 14:25:26 +00:00
|
|
|
docstring const BiblioInfo::getYear(docstring const & key, Buffer const & buf, bool use_modifier) const
|
|
|
|
{
|
|
|
|
docstring const year = getYear(key, use_modifier);
|
|
|
|
if (year.empty())
|
|
|
|
return buf.B_("No year");
|
|
|
|
return year;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-03 22:15:11 +00:00
|
|
|
docstring const BiblioInfo::getInfo(docstring const & key,
|
2020-08-27 06:45:41 +00:00
|
|
|
Buffer const & buf, CiteItem const & ci, docstring const & format) const
|
2007-08-14 16:50:51 +00:00
|
|
|
{
|
2007-08-20 16:30:02 +00:00
|
|
|
BiblioInfo::const_iterator it = find(key);
|
|
|
|
if (it == end())
|
2011-12-05 13:17:55 +00:00
|
|
|
return docstring(_("Bibliography entry not found!"));
|
2007-08-20 16:30:02 +00:00
|
|
|
BibTeXInfo const & data = it->second;
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
BibTeXInfoList xrefptrs;
|
2017-12-14 14:41:54 +00:00
|
|
|
for (docstring const & xref : getXRefs(data)) {
|
|
|
|
BiblioInfo::const_iterator const xrefit = find(xref);
|
|
|
|
if (xrefit != end())
|
|
|
|
xrefptrs.push_back(&(xrefit->second));
|
2009-01-17 00:16:31 +00:00
|
|
|
}
|
2020-08-27 06:45:41 +00:00
|
|
|
return data.getInfo(xrefptrs, buf, ci, format);
|
2007-08-20 16:30:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-05-23 14:59:12 +00:00
|
|
|
docstring const BiblioInfo::getLabel(vector<docstring> keys,
|
2017-01-07 16:12:08 +00:00
|
|
|
Buffer const & buf, string const & style, CiteItem const & ci) const
|
2012-03-01 00:41:30 +00:00
|
|
|
{
|
2017-01-07 16:12:08 +00:00
|
|
|
size_t max_size = ci.max_size;
|
2014-05-23 14:59:12 +00:00
|
|
|
// shorter makes no sense
|
|
|
|
LASSERT(max_size >= 16, max_size = 16);
|
|
|
|
|
|
|
|
// we can't display more than 10 of these, anyway
|
2019-07-09 12:48:11 +00:00
|
|
|
// but since we truncate in the middle,
|
|
|
|
// we need to split into two halfs.
|
2014-05-23 14:59:12 +00:00
|
|
|
bool const too_many_keys = keys.size() > 10;
|
2019-07-09 12:48:11 +00:00
|
|
|
vector<docstring> lkeys;
|
|
|
|
if (too_many_keys) {
|
|
|
|
lkeys.insert(lkeys.end(), keys.end() - 5, keys.end());
|
|
|
|
keys.resize(5);
|
|
|
|
keys.insert(keys.end(), lkeys.begin(), lkeys.end());
|
|
|
|
}
|
2014-05-23 14:59:12 +00:00
|
|
|
|
2012-03-01 00:41:30 +00:00
|
|
|
CiteEngineType const engine_type = buf.params().citeEngineType();
|
|
|
|
DocumentClass const & dc = buf.params().documentClass();
|
2017-01-08 14:24:39 +00:00
|
|
|
docstring const & format = from_utf8(dc.getCiteFormat(engine_type, style, false, "cite"));
|
2013-07-20 14:05:52 +00:00
|
|
|
docstring ret = format;
|
2012-03-01 00:41:30 +00:00
|
|
|
vector<docstring>::const_iterator key = keys.begin();
|
|
|
|
vector<docstring>::const_iterator ken = keys.end();
|
2019-08-07 11:00:29 +00:00
|
|
|
vector<docstring> handled_keys;
|
2017-01-07 16:41:59 +00:00
|
|
|
for (int i = 0; key != ken; ++key, ++i) {
|
2019-08-07 11:00:29 +00:00
|
|
|
handled_keys.push_back(*key);
|
|
|
|
int n = 0;
|
2020-02-28 06:06:22 +00:00
|
|
|
for (auto const & k : handled_keys) {
|
2019-08-07 11:00:29 +00:00
|
|
|
if (k == *key)
|
|
|
|
++n;
|
|
|
|
}
|
2012-03-01 00:41:30 +00:00
|
|
|
BiblioInfo::const_iterator it = find(*key);
|
|
|
|
BibTeXInfo empty_data;
|
|
|
|
empty_data.key(*key);
|
|
|
|
BibTeXInfo & data = empty_data;
|
Improve info display for biblatex databases, part II
In addition to the classic crossref, biblatex introduces xdata
references in order to source-out common data of entries. Entries
that have "xdata = {somekey}" just inherit all fields from the
respective @xdata entry, if the field is not already defined in
the entry itself (just like crossref, with the exception that @xdata
entries themselves are _never_ output on their own). @xdata entries can
themselves inherit to other @xdata entries (ad infinitum). So you can,
for instance, setup an xdata entry for a book series with series name
that inherits an xdata entry with information of the publisher
(publisher, address). Any book of that series would just need to refer
to the series xdata and add the number.
BiblioInfo now checks, in addition to crossrefs, for such xdata
references and inherits missing fields.
Nte that biblatex also introduces an "xref" field as an alternative to
crossref. We must not care about that, since the point of xref is that
it does not inherit fields from the target (just cites that one if a
given number of refs to it exist)
2016-09-18 10:44:12 +00:00
|
|
|
vector<BibTeXInfo const *> xrefptrs;
|
2012-03-01 00:41:30 +00:00
|
|
|
if (it != end()) {
|
|
|
|
data = it->second;
|
2017-12-14 14:41:54 +00:00
|
|
|
for (docstring const & xref : getXRefs(data)) {
|
|
|
|
BiblioInfo::const_iterator const xrefit = find(xref);
|
|
|
|
if (xrefit != end())
|
|
|
|
xrefptrs.push_back(&(xrefit->second));
|
2012-03-01 00:41:30 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-07 11:00:29 +00:00
|
|
|
data.numKey(n);
|
2017-01-07 16:12:08 +00:00
|
|
|
ret = data.getLabel(xrefptrs, buf, ret, ci, key + 1 != ken, i == 1);
|
2014-05-23 14:59:12 +00:00
|
|
|
}
|
|
|
|
|
2019-07-09 12:48:11 +00:00
|
|
|
support::truncateWithEllipsis(ret, max_size, true);
|
|
|
|
|
2012-03-01 00:41:30 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-13 15:22:49 +00:00
|
|
|
bool BiblioInfo::isBibtex(docstring const & key) const
|
2010-02-12 16:08:30 +00:00
|
|
|
{
|
2014-10-14 19:41:42 +00:00
|
|
|
docstring key1;
|
|
|
|
split(key, key1, ',');
|
|
|
|
BiblioInfo::const_iterator it = find(key1);
|
2010-02-12 16:08:30 +00:00
|
|
|
if (it == end())
|
|
|
|
return false;
|
|
|
|
return it->second.isBibTeX();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-06-10 12:29:07 +00:00
|
|
|
BiblioInfo::CiteStringMap const BiblioInfo::getCiteStrings(
|
2012-03-01 00:41:30 +00:00
|
|
|
vector<docstring> const & keys, vector<CitationStyle> const & styles,
|
2017-01-07 16:12:08 +00:00
|
|
|
Buffer const & buf, CiteItem const & ci) const
|
2007-08-20 16:30:02 +00:00
|
|
|
{
|
|
|
|
if (empty())
|
2017-06-10 12:29:07 +00:00
|
|
|
return vector<pair<docstring,docstring>>();
|
2007-08-20 16:30:02 +00:00
|
|
|
|
2012-03-01 00:41:30 +00:00
|
|
|
string style;
|
2017-06-10 12:29:07 +00:00
|
|
|
CiteStringMap csm(styles.size());
|
|
|
|
for (size_t i = 0; i != csm.size(); ++i) {
|
2017-01-03 12:11:11 +00:00
|
|
|
style = styles[i].name;
|
2017-06-10 12:29:07 +00:00
|
|
|
csm[i] = make_pair(from_ascii(style), getLabel(keys, buf, style, ci));
|
2007-08-20 16:30:02 +00:00
|
|
|
}
|
|
|
|
|
2017-06-10 12:29:07 +00:00
|
|
|
return csm;
|
2007-08-20 16:30:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-04-25 20:03:03 +00:00
|
|
|
void BiblioInfo::mergeBiblioInfo(BiblioInfo const & info)
|
|
|
|
{
|
|
|
|
bimap_.insert(info.begin(), info.end());
|
2011-04-12 17:32:16 +00:00
|
|
|
field_names_.insert(info.field_names_.begin(), info.field_names_.end());
|
|
|
|
entry_types_.insert(info.entry_types_.begin(), info.entry_types_.end());
|
2007-08-20 16:30:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-08 16:40:41 +00:00
|
|
|
namespace {
|
2012-10-27 13:45:27 +00:00
|
|
|
|
|
|
|
// used in xhtml to sort a list of BibTeXInfo objects
|
|
|
|
bool lSorter(BibTeXInfo const * lhs, BibTeXInfo const * rhs)
|
|
|
|
{
|
2017-01-09 16:54:56 +00:00
|
|
|
docstring const lauth = lhs->getAuthorOrEditorList();
|
|
|
|
docstring const rauth = rhs->getAuthorOrEditorList();
|
2012-10-27 13:45:27 +00:00
|
|
|
docstring const lyear = lhs->getYear();
|
|
|
|
docstring const ryear = rhs->getYear();
|
|
|
|
docstring const ltitl = lhs->operator[]("title");
|
|
|
|
docstring const rtitl = rhs->operator[]("title");
|
|
|
|
return (lauth < rauth)
|
|
|
|
|| (lauth == rauth && lyear < ryear)
|
|
|
|
|| (lauth == rauth && lyear == ryear && ltitl < rtitl);
|
|
|
|
}
|
|
|
|
|
2017-07-23 11:11:54 +00:00
|
|
|
} // namespace
|
2010-01-08 16:40:41 +00:00
|
|
|
|
|
|
|
|
|
|
|
void BiblioInfo::collectCitedEntries(Buffer const & buf)
|
|
|
|
{
|
|
|
|
cited_entries_.clear();
|
|
|
|
// We are going to collect all the citation keys used in the document,
|
|
|
|
// getting them from the TOC.
|
|
|
|
// FIXME We may want to collect these differently, in the first case,
|
|
|
|
// so that we might have them in order of appearance.
|
|
|
|
set<docstring> citekeys;
|
2020-02-29 10:55:20 +00:00
|
|
|
Toc const & toc = *buf.tocBackend().toc("citation");
|
2020-02-28 06:06:22 +00:00
|
|
|
for (auto const & t : toc) {
|
|
|
|
if (t.str().empty())
|
2010-01-08 16:40:41 +00:00
|
|
|
continue;
|
2020-02-28 06:06:22 +00:00
|
|
|
vector<docstring> const keys = getVectorFromString(t.str());
|
2010-01-08 16:40:41 +00:00
|
|
|
citekeys.insert(keys.begin(), keys.end());
|
|
|
|
}
|
|
|
|
if (citekeys.empty())
|
|
|
|
return;
|
2011-12-03 22:15:11 +00:00
|
|
|
|
2010-01-08 16:40:41 +00:00
|
|
|
// We have a set of the keys used in this document.
|
2011-12-03 22:15:11 +00:00
|
|
|
// We will now convert it to a list of the BibTeXInfo objects used in
|
2010-01-08 16:40:41 +00:00
|
|
|
// this document...
|
|
|
|
vector<BibTeXInfo const *> bi;
|
2020-02-28 06:06:22 +00:00
|
|
|
for (auto const & ck : citekeys) {
|
|
|
|
BiblioInfo::const_iterator const bt = find(ck);
|
2010-01-08 16:40:41 +00:00
|
|
|
if (bt == end() || !bt->second.isBibTeX())
|
|
|
|
continue;
|
|
|
|
bi.push_back(&(bt->second));
|
|
|
|
}
|
|
|
|
// ...and sort it.
|
|
|
|
sort(bi.begin(), bi.end(), lSorter);
|
2011-12-03 22:15:11 +00:00
|
|
|
|
2010-01-08 16:40:41 +00:00
|
|
|
// Now we can write the sorted keys
|
2020-02-28 06:06:22 +00:00
|
|
|
// b is a BibTeXInfo const *
|
|
|
|
for (auto const & b : bi)
|
|
|
|
cited_entries_.push_back(b->key());
|
2010-01-08 16:40:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-08 18:19:13 +00:00
|
|
|
void BiblioInfo::makeCitationLabels(Buffer const & buf)
|
|
|
|
{
|
|
|
|
collectCitedEntries(buf);
|
2012-01-09 13:16:38 +00:00
|
|
|
CiteEngineType const engine_type = buf.params().citeEngineType();
|
2013-05-16 14:00:54 +00:00
|
|
|
bool const numbers = (engine_type & ENGINE_TYPE_NUMERICAL);
|
2010-01-11 16:11:55 +00:00
|
|
|
|
|
|
|
int keynumber = 0;
|
|
|
|
char modifier = 0;
|
|
|
|
// used to remember the last one we saw
|
|
|
|
// we'll be comparing entries to see if we need to add
|
|
|
|
// modifiers, like "1984a"
|
2017-03-08 16:03:48 +00:00
|
|
|
map<docstring, BibTeXInfo>::iterator last = bimap_.end();
|
2010-01-11 16:11:55 +00:00
|
|
|
|
2020-02-28 06:06:22 +00:00
|
|
|
// add letters to years
|
|
|
|
for (auto const & ce : cited_entries_) {
|
|
|
|
map<docstring, BibTeXInfo>::iterator const biit = bimap_.find(ce);
|
2010-01-08 18:19:13 +00:00
|
|
|
// this shouldn't happen, but...
|
|
|
|
if (biit == bimap_.end())
|
2010-01-11 16:11:55 +00:00
|
|
|
// ...fail gracefully, anyway.
|
2010-01-08 18:19:13 +00:00
|
|
|
continue;
|
|
|
|
BibTeXInfo & entry = biit->second;
|
2010-01-11 16:11:55 +00:00
|
|
|
if (numbers) {
|
|
|
|
docstring const num = convert<docstring>(++keynumber);
|
|
|
|
entry.setCiteNumber(num);
|
|
|
|
} else {
|
2017-03-08 22:34:07 +00:00
|
|
|
// The first test here is checking whether this is the first
|
|
|
|
// time through the loop. If so, then we do not have anything
|
|
|
|
// with which to compare.
|
2017-03-08 16:03:48 +00:00
|
|
|
if (last != bimap_.end()
|
2017-01-09 16:54:56 +00:00
|
|
|
&& entry.getAuthorOrEditorList() == last->second.getAuthorOrEditorList()
|
2010-01-11 16:11:55 +00:00
|
|
|
// we access the year via getYear() so as to get it from the xref,
|
|
|
|
// if we need to do so
|
|
|
|
&& getYear(entry.key()) == getYear(last->second.key())) {
|
|
|
|
if (modifier == 0) {
|
|
|
|
// so the last one should have been 'a'
|
|
|
|
last->second.setModifier('a');
|
|
|
|
modifier = 'b';
|
|
|
|
} else if (modifier == 'z')
|
|
|
|
modifier = 'A';
|
|
|
|
else
|
|
|
|
modifier++;
|
|
|
|
} else {
|
|
|
|
modifier = 0;
|
|
|
|
}
|
2011-12-03 22:15:11 +00:00
|
|
|
entry.setModifier(modifier);
|
2010-01-11 16:11:55 +00:00
|
|
|
// remember the last one
|
|
|
|
last = biit;
|
|
|
|
}
|
2010-01-08 18:19:13 +00:00
|
|
|
}
|
2013-02-01 15:18:37 +00:00
|
|
|
// Set the labels
|
2020-02-28 06:06:22 +00:00
|
|
|
for (auto const & ce : cited_entries_) {
|
|
|
|
map<docstring, BibTeXInfo>::iterator const biit = bimap_.find(ce);
|
2013-02-01 15:18:37 +00:00
|
|
|
// this shouldn't happen, but...
|
|
|
|
if (biit == bimap_.end())
|
|
|
|
// ...fail gracefully, anyway.
|
|
|
|
continue;
|
|
|
|
BibTeXInfo & entry = biit->second;
|
|
|
|
if (numbers) {
|
|
|
|
entry.label(entry.citeNumber());
|
|
|
|
} else {
|
2017-01-09 16:54:56 +00:00
|
|
|
docstring const auth = entry.getAuthorOrEditorList(&buf, false);
|
2013-02-01 15:18:37 +00:00
|
|
|
// we do it this way so as to access the xref, if necessary
|
|
|
|
// note that this also gives us the modifier
|
2020-02-28 06:06:22 +00:00
|
|
|
docstring const year = getYear(ce, buf, true);
|
2013-02-01 15:18:37 +00:00
|
|
|
if (!auth.empty() && !year.empty())
|
|
|
|
entry.label(auth + ' ' + year);
|
|
|
|
else
|
|
|
|
entry.label(entry.key());
|
|
|
|
}
|
|
|
|
}
|
2010-01-08 18:19:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-05 20:33:20 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////
|
2007-08-20 16:30:02 +00:00
|
|
|
//
|
|
|
|
// CitationStyle
|
|
|
|
//
|
2007-11-05 20:33:20 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////
|
2007-08-20 16:30:02 +00:00
|
|
|
|
2007-08-14 16:50:51 +00:00
|
|
|
|
2017-01-03 12:11:11 +00:00
|
|
|
CitationStyle citationStyleFromString(string const & command,
|
|
|
|
BufferParams const & params)
|
2007-08-14 16:50:51 +00:00
|
|
|
{
|
2012-03-01 00:41:30 +00:00
|
|
|
CitationStyle cs;
|
2007-08-14 16:50:51 +00:00
|
|
|
if (command.empty())
|
2012-03-01 00:41:30 +00:00
|
|
|
return cs;
|
2007-08-14 16:50:51 +00:00
|
|
|
|
2017-01-03 12:11:11 +00:00
|
|
|
string const alias = params.getCiteAlias(command);
|
|
|
|
string cmd = alias.empty() ? command : alias;
|
|
|
|
if (isUpperCase(command[0])) {
|
2012-03-01 00:41:30 +00:00
|
|
|
cs.forceUpperCase = true;
|
2017-01-03 09:17:09 +00:00
|
|
|
cmd[0] = lowercase(cmd[0]);
|
2007-08-14 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 12:11:11 +00:00
|
|
|
size_t const n = command.size() - 1;
|
|
|
|
if (command[n] == '*') {
|
2017-01-03 16:25:41 +00:00
|
|
|
cs.hasStarredVersion = true;
|
2017-01-03 12:11:11 +00:00
|
|
|
if (suffixIs(cmd, '*'))
|
|
|
|
cmd = cmd.substr(0, cmd.size() - 1);
|
2007-08-14 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 12:11:11 +00:00
|
|
|
cs.name = cmd;
|
2012-03-01 00:41:30 +00:00
|
|
|
return cs;
|
2007-08-14 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-01-03 12:11:11 +00:00
|
|
|
string citationStyleToString(const CitationStyle & cs, bool const latex)
|
2007-08-14 16:50:51 +00:00
|
|
|
{
|
2017-01-03 12:11:11 +00:00
|
|
|
string cmd = latex ? cs.cmd : cs.name;
|
2012-03-01 00:41:30 +00:00
|
|
|
if (cs.forceUpperCase)
|
2016-07-12 23:38:28 +00:00
|
|
|
cmd[0] = uppercase(cmd[0]);
|
2017-01-03 16:25:41 +00:00
|
|
|
if (cs.hasStarredVersion)
|
2012-03-01 00:41:30 +00:00
|
|
|
cmd += '*';
|
|
|
|
return cmd;
|
2007-08-14 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
2020-06-08 21:27:49 +00:00
|
|
|
|
|
|
|
docstring authorsToDocBookAuthorGroup(docstring const & authorsString, XMLStream & xs, Buffer const & buf)
|
|
|
|
{
|
2020-07-08 16:15:34 +00:00
|
|
|
// This function closely mimics getAuthorList, but produces DocBook instead of text.
|
|
|
|
// It has been greatly simplified, as the complete list of authors is always produced. No separators are required,
|
|
|
|
// as the output has a database-like shape.
|
|
|
|
// constructName has also been merged within, as it becomes really simple and leads to no copy-paste.
|
|
|
|
|
|
|
|
if (authorsString.empty()) {
|
|
|
|
return docstring();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Split the input list of authors into individual authors.
|
|
|
|
vector<docstring> const authors = getAuthors(authorsString);
|
|
|
|
|
|
|
|
// Retrieve the "et al." variation.
|
|
|
|
string const etal = buf.params().documentClass().getCiteMacro(buf.params().citeEngineType(), "_etal");
|
|
|
|
|
|
|
|
// Output the list of authors.
|
|
|
|
xs << xml::StartTag("authorgroup");
|
|
|
|
auto it = authors.cbegin();
|
|
|
|
auto en = authors.cend();
|
|
|
|
for (size_t i = 0; it != en; ++it, ++i) {
|
|
|
|
xs << xml::StartTag("author");
|
|
|
|
xs << xml::CR();
|
|
|
|
xs << xml::StartTag("personname");
|
|
|
|
xs << xml::CR();
|
|
|
|
docstring name = *it;
|
|
|
|
|
|
|
|
// All authors go in a <personname>. If more structure is known, use it; otherwise (just "et al."), print it as such.
|
|
|
|
if (name == "others") {
|
|
|
|
xs << buf.B_(etal);
|
|
|
|
} else {
|
|
|
|
name_parts parts = nameParts(name);
|
|
|
|
if (! parts.prefix.empty()) {
|
|
|
|
xs << xml::StartTag("honorific");
|
|
|
|
xs << parts.prefix;
|
|
|
|
xs << xml::EndTag("honorific");
|
|
|
|
xs << xml::CR();
|
|
|
|
}
|
|
|
|
if (! parts.prename.empty()) {
|
|
|
|
xs << xml::StartTag("firstname");
|
|
|
|
xs << parts.prename;
|
|
|
|
xs << xml::EndTag("firstname");
|
|
|
|
xs << xml::CR();
|
|
|
|
}
|
|
|
|
if (! parts.surname.empty()) {
|
|
|
|
xs << xml::StartTag("surname");
|
|
|
|
xs << parts.surname;
|
|
|
|
xs << xml::EndTag("surname");
|
|
|
|
xs << xml::CR();
|
|
|
|
}
|
|
|
|
if (! parts.suffix.empty()) {
|
|
|
|
xs << xml::StartTag("othername", "role=\"suffix\"");
|
|
|
|
xs << parts.suffix;
|
|
|
|
xs << xml::EndTag("othername");
|
|
|
|
xs << xml::CR();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
xs << xml::EndTag("personname");
|
|
|
|
xs << xml::CR();
|
|
|
|
xs << xml::EndTag("author");
|
|
|
|
xs << xml::CR();
|
|
|
|
|
|
|
|
// Could add an affiliation after <personname>, but not stored in BibTeX.
|
|
|
|
}
|
|
|
|
xs << xml::EndTag("authorgroup");
|
|
|
|
xs << xml::CR();
|
|
|
|
|
|
|
|
return docstring();
|
2020-06-08 21:27:49 +00:00
|
|
|
}
|
|
|
|
|
2007-08-14 16:50:51 +00:00
|
|
|
} // namespace lyx
|