cpython/Parser/tokenizer.c

1502 lines
32 KiB
C
Raw Normal View History

1991-02-19 08:39:46 -04:00
1990-10-14 09:07:46 -03:00
/* Tokenizer implementation */
#include "Python.h"
1990-12-20 11:06:42 -04:00
#include "pgenheaders.h"
1990-10-14 09:07:46 -03:00
#include <ctype.h>
#include <assert.h>
1990-10-14 09:07:46 -03:00
#include "tokenizer.h"
#include "errcode.h"
#ifndef PGEN
#include "unicodeobject.h"
#include "stringobject.h"
#include "fileobject.h"
#include "codecs.h"
#include "abstract.h"
#endif /* PGEN */
extern char *PyOS_Readline(FILE *, FILE *, char *);
/* Return malloc'ed string including trailing \n;
empty malloc'ed string for EOF;
NULL if interrupted */
/* Don't ever change this -- it would break the portability of Python code */
1990-10-14 09:07:46 -03:00
#define TABSIZE 8
/* Convert a possibly signed character to a nonnegative int */
/* XXX This assumes characters are 8 bits wide */
#ifdef __CHAR_UNSIGNED__
#define Py_CHARMASK(c) (c)
#else
#define Py_CHARMASK(c) ((c) & 0xff)
#endif
1990-12-20 11:06:42 -04:00
/* Forward */
static struct tok_state *tok_new(void);
static int tok_nextc(struct tok_state *tok);
static void tok_backup(struct tok_state *tok, int c);
1990-12-20 11:06:42 -04:00
1990-10-14 09:07:46 -03:00
/* Token names */
1997-04-29 18:03:06 -03:00
char *_PyParser_TokenNames[] = {
1990-10-14 09:07:46 -03:00
"ENDMARKER",
"NAME",
"NUMBER",
"STRING",
"NEWLINE",
"INDENT",
"DEDENT",
"LPAR",
"RPAR",
"LSQB",
"RSQB",
"COLON",
"COMMA",
"SEMI",
"PLUS",
"MINUS",
"STAR",
"SLASH",
"VBAR",
"AMPER",
"LESS",
"GREATER",
"EQUAL",
"DOT",
"PERCENT",
"LBRACE",
"RBRACE",
"EQEQUAL",
"NOTEQUAL",
"LESSEQUAL",
"GREATEREQUAL",
"TILDE",
"CIRCUMFLEX",
"LEFTSHIFT",
"RIGHTSHIFT",
1996-01-11 21:31:58 -04:00
"DOUBLESTAR",
"PLUSEQUAL",
"MINEQUAL",
"STAREQUAL",
"SLASHEQUAL",
"PERCENTEQUAL",
"AMPEREQUAL",
"VBAREQUAL",
"CIRCUMFLEXEQUAL",
"LEFTSHIFTEQUAL",
"RIGHTSHIFTEQUAL",
"DOUBLESTAREQUAL",
"DOUBLESLASH",
"DOUBLESLASHEQUAL",
"AT",
/* This table must match the #defines in token.h! */
1990-10-14 09:07:46 -03:00
"OP",
"<ERRORTOKEN>",
"<N_TOKENS>"
};
/* Create and initialize a new tok_state structure */
static struct tok_state *
tok_new(void)
1990-10-14 09:07:46 -03:00
{
struct tok_state *tok = (struct tok_state *)PyMem_MALLOC(
sizeof(struct tok_state));
1990-10-14 09:07:46 -03:00
if (tok == NULL)
return NULL;
tok->buf = tok->cur = tok->end = tok->inp = tok->start = NULL;
1990-10-14 09:07:46 -03:00
tok->done = E_OK;
tok->fp = NULL;
tok->tabsize = TABSIZE;
tok->indent = 0;
tok->indstack[0] = 0;
tok->atbol = 1;
tok->pendin = 0;
tok->prompt = tok->nextprompt = NULL;
tok->lineno = 0;
tok->level = 0;
tok->filename = NULL;
tok->altwarning = 1;
tok->alterror = 1;
tok->alttabsize = 1;
tok->altindstack[0] = 0;
tok->decoding_state = 0;
tok->decoding_erred = 0;
tok->read_coding_spec = 0;
tok->encoding = NULL;
tok->cont_line = 0;
#ifndef PGEN
tok->decoding_readline = NULL;
tok->decoding_buffer = NULL;
#endif
1990-10-14 09:07:46 -03:00
return tok;
}
#ifdef PGEN
static char *
decoding_fgets(char *s, int size, struct tok_state *tok)
{
return fgets(s, size, tok->fp);
}
static int
decoding_feof(struct tok_state *tok)
{
return feof(tok->fp);
}
static const char *
decode_str(const char *str, struct tok_state *tok)
{
return str;
}
#else /* PGEN */
static char *
error_ret(struct tok_state *tok) /* XXX */
{
tok->decoding_erred = 1;
if (tok->fp != NULL && tok->buf != NULL) /* see PyTokenizer_Free */
PyMem_FREE(tok->buf);
tok->buf = NULL;
return NULL; /* as if it were EOF */
}
static char *
2006-02-15 13:27:45 -04:00
new_string(const char *s, Py_ssize_t len)
{
char* result = (char *)PyMem_MALLOC(len + 1);
if (result != NULL) {
memcpy(result, s, len);
result[len] = '\0';
}
return result;
}
static char *
get_normal_name(char *s) /* for utf-8 and latin-1 */
{
char buf[13];
int i;
for (i = 0; i < 12; i++) {
int c = s[i];
if (c == '\0') break;
else if (c == '_') buf[i] = '-';
else buf[i] = tolower(c);
}
buf[i] = '\0';
if (strcmp(buf, "utf-8") == 0 ||
strncmp(buf, "utf-8-", 6) == 0) return "utf-8";
else if (strcmp(buf, "latin-1") == 0 ||
strcmp(buf, "iso-8859-1") == 0 ||
strcmp(buf, "iso-latin-1") == 0 ||
strncmp(buf, "latin-1-", 8) == 0 ||
strncmp(buf, "iso-8859-1-", 11) == 0 ||
strncmp(buf, "iso-latin-1-", 12) == 0) return "iso-8859-1";
else return s;
}
/* Return the coding spec in S, or NULL if none is found. */
static char *
2006-02-15 13:27:45 -04:00
get_coding_spec(const char *s, Py_ssize_t size)
{
2006-02-15 13:27:45 -04:00
Py_ssize_t i;
/* Coding spec must be in a comment, and that comment must be
* the only statement on the source code line. */
for (i = 0; i < size - 6; i++) {
if (s[i] == '#')
break;
if (s[i] != ' ' && s[i] != '\t' && s[i] != '\014')
return NULL;
}
for (; i < size - 6; i++) { /* XXX inefficient search */
const char* t = s + i;
if (strncmp(t, "coding", 6) == 0) {
const char* begin = NULL;
t += 6;
if (t[0] != ':' && t[0] != '=')
continue;
do {
t++;
} while (t[0] == '\x20' || t[0] == '\t');
begin = t;
while (isalnum(Py_CHARMASK(t[0])) ||
t[0] == '-' || t[0] == '_' || t[0] == '.')
t++;
if (begin < t) {
char* r = new_string(begin, t - begin);
char* q = get_normal_name(r);
if (r != q) {
PyMem_FREE(r);
r = new_string(q, strlen(q));
}
return r;
}
}
}
return NULL;
}
/* Check whether the line contains a coding spec. If it does,
invoke the set_readline function for the new encoding.
This function receives the tok_state and the new encoding.
Return 1 on success, 0 on failure. */
static int
2006-02-15 13:27:45 -04:00
check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
int set_readline(struct tok_state *, const char *))
{
2002-09-03 12:39:58 -03:00
char * cs;
int r = 1;
2002-09-03 12:39:58 -03:00
if (tok->cont_line)
/* It's a continuation line, so it can't be a coding spec. */
return 1;
2002-09-03 12:39:58 -03:00
cs = get_coding_spec(line, size);
if (cs != NULL) {
tok->read_coding_spec = 1;
if (tok->encoding == NULL) {
assert(tok->decoding_state == 1); /* raw */
if (strcmp(cs, "utf-8") == 0 ||
strcmp(cs, "iso-8859-1") == 0) {
tok->encoding = cs;
} else {
#ifdef Py_USING_UNICODE
r = set_readline(tok, cs);
if (r) {
tok->encoding = cs;
tok->decoding_state = -1;
}
else
PyMem_FREE(cs);
#else
/* Without Unicode support, we cannot
process the coding spec. Since there
won't be any Unicode literals, that
won't matter. */
PyMem_FREE(cs);
#endif
}
} else { /* then, compare cs with BOM */
r = (strcmp(tok->encoding, cs) == 0);
PyMem_FREE(cs);
}
}
if (!r) {
cs = tok->encoding;
if (!cs)
cs = "with BOM";
PyErr_Format(PyExc_SyntaxError, "encoding problem: %s", cs);
}
return r;
}
/* See whether the file starts with a BOM. If it does,
invoke the set_readline function with the new encoding.
Return 1 on success, 0 on failure. */
static int
check_bom(int get_char(struct tok_state *),
void unget_char(int, struct tok_state *),
int set_readline(struct tok_state *, const char *),
struct tok_state *tok)
{
int ch = get_char(tok);
tok->decoding_state = 1;
if (ch == EOF) {
return 1;
} else if (ch == 0xEF) {
ch = get_char(tok); if (ch != 0xBB) goto NON_BOM;
ch = get_char(tok); if (ch != 0xBF) goto NON_BOM;
#if 0
/* Disable support for UTF-16 BOMs until a decision
is made whether this needs to be supported. */
} else if (ch == 0xFE) {
ch = get_char(tok); if (ch != 0xFF) goto NON_BOM;
if (!set_readline(tok, "utf-16-be")) return 0;
tok->decoding_state = -1;
} else if (ch == 0xFF) {
ch = get_char(tok); if (ch != 0xFE) goto NON_BOM;
if (!set_readline(tok, "utf-16-le")) return 0;
tok->decoding_state = -1;
#endif
} else {
unget_char(ch, tok);
return 1;
}
if (tok->encoding != NULL)
PyMem_FREE(tok->encoding);
tok->encoding = new_string("utf-8", 5); /* resulting is in utf-8 */
return 1;
NON_BOM:
/* any token beginning with '\xEF', '\xFE', '\xFF' is a bad token */
unget_char(0xFF, tok); /* XXX this will cause a syntax error */
return 1;
}
/* Read a line of text from TOK into S, using the stream in TOK.
Return NULL on failure, else S.
On entry, tok->decoding_buffer will be one of:
1) NULL: need to call tok->decoding_readline to get a new line
2) PyUnicodeObject *: decoding_feof has called tok->decoding_readline and
stored the result in tok->decoding_buffer
3) PyStringObject *: previous call to fp_readl did not have enough room
(in the s buffer) to copy entire contents of the line read
by tok->decoding_readline. tok->decoding_buffer has the overflow.
In this case, fp_readl is called in a loop (with an expanded buffer)
until the buffer ends with a '\n' (or until the end of the file is
reached): see tok_nextc and its calls to decoding_fgets.
*/
static char *
fp_readl(char *s, int size, struct tok_state *tok)
{
#ifndef Py_USING_UNICODE
/* In a non-Unicode built, this should never be called. */
2002-08-07 12:18:57 -03:00
Py_FatalError("fp_readl should not be called in this build.");
return NULL; /* Keep compiler happy (not reachable) */
#else
PyObject* utf8 = NULL;
PyObject* buf = tok->decoding_buffer;
char *str;
2006-02-16 10:35:38 -04:00
Py_ssize_t utf8len;
/* Ask for one less byte so we can terminate it */
assert(size > 0);
size--;
if (buf == NULL) {
buf = PyObject_CallObject(tok->decoding_readline, NULL);
2002-08-04 15:28:44 -03:00
if (buf == NULL)
return error_ret(tok);
} else {
tok->decoding_buffer = NULL;
if (PyString_CheckExact(buf))
utf8 = buf;
}
if (utf8 == NULL) {
utf8 = PyUnicode_AsUTF8String(buf);
Py_DECREF(buf);
if (utf8 == NULL)
return error_ret(tok);
}
str = PyString_AsString(utf8);
utf8len = PyString_GET_SIZE(utf8);
if (utf8len > size) {
tok->decoding_buffer = PyString_FromStringAndSize(str+size, utf8len-size);
if (tok->decoding_buffer == NULL) {
Py_DECREF(utf8);
return error_ret(tok);
}
utf8len = size;
}
memcpy(s, str, utf8len);
s[utf8len] = '\0';
Py_DECREF(utf8);
if (utf8len == 0) return NULL; /* EOF */
return s;
#endif
}
/* Set the readline function for TOK to a StreamReader's
readline function. The StreamReader is named ENC.
This function is called from check_bom and check_coding_spec.
ENC is usually identical to the future value of tok->encoding,
except for the (currently unsupported) case of UTF-16.
Return 1 on success, 0 on failure. */
static int
fp_setreadl(struct tok_state *tok, const char* enc)
{
PyObject *reader, *stream, *readline;
/* XXX: constify filename argument. */
stream = PyFile_FromFile(tok->fp, (char*)tok->filename, "rb", NULL);
2002-08-04 15:28:44 -03:00
if (stream == NULL)
return 0;
reader = PyCodec_StreamReader(enc, stream, NULL);
Py_DECREF(stream);
2002-08-04 15:28:44 -03:00
if (reader == NULL)
return 0;
readline = PyObject_GetAttrString(reader, "readline");
Py_DECREF(reader);
2002-08-04 15:28:44 -03:00
if (readline == NULL)
return 0;
tok->decoding_readline = readline;
return 1;
}
/* Fetch the next byte from TOK. */
static int fp_getc(struct tok_state *tok) {
return getc(tok->fp);
}
/* Unfetch the last byte back into TOK. */
static void fp_ungetc(int c, struct tok_state *tok) {
ungetc(c, tok->fp);
}
/* Read a line of input from TOK. Determine encoding
if necessary. */
static char *
decoding_fgets(char *s, int size, struct tok_state *tok)
{
2002-08-07 12:18:57 -03:00
char *line = NULL;
int badchar = 0;
2002-08-04 15:28:44 -03:00
for (;;) {
if (tok->decoding_state < 0) {
/* We already have a codec associated with
this input. */
line = fp_readl(s, size, tok);
break;
} else if (tok->decoding_state > 0) {
/* We want a 'raw' read. */
line = Py_UniversalNewlineFgets(s, size,
tok->fp, NULL);
break;
} else {
/* We have not yet determined the encoding.
If an encoding is found, use the file-pointer
reader functions from now on. */
if (!check_bom(fp_getc, fp_ungetc, fp_setreadl, tok))
return error_ret(tok);
assert(tok->decoding_state != 0);
}
2002-08-04 15:28:44 -03:00
}
if (line != NULL && tok->lineno < 2 && !tok->read_coding_spec) {
if (!check_coding_spec(line, strlen(line), tok, fp_setreadl)) {
return error_ret(tok);
}
}
#ifndef PGEN
/* The default encoding is ASCII, so make sure we don't have any
non-ASCII bytes in it. */
if (line && !tok->encoding) {
unsigned char *c;
for (c = (unsigned char *)line; *c; c++)
if (*c > 127) {
badchar = *c;
break;
}
}
if (badchar) {
char buf[500];
/* Need to add 1 to the line number, since this line
has not been counted, yet. */
sprintf(buf,
"Non-ASCII character '\\x%.2x' "
"in file %.200s on line %i, "
"but no encoding declared; "
"see http://www.python.org/peps/pep-0263.html for details",
badchar, tok->filename, tok->lineno + 1);
PyErr_SetString(PyExc_SyntaxError, buf);
return error_ret(tok);
}
#endif
return line;
}
static int
decoding_feof(struct tok_state *tok)
{
if (tok->decoding_state >= 0) {
return feof(tok->fp);
} else {
PyObject* buf = tok->decoding_buffer;
if (buf == NULL) {
buf = PyObject_CallObject(tok->decoding_readline, NULL);
if (buf == NULL) {
error_ret(tok);
return 1;
} else {
tok->decoding_buffer = buf;
}
}
return PyObject_Length(buf) == 0;
}
}
/* Fetch a byte from TOK, using the string buffer. */
static int
buf_getc(struct tok_state *tok) {
return Py_CHARMASK(*tok->str++);
}
/* Unfetch a byte from TOK, using the string buffer. */
static void
buf_ungetc(int c, struct tok_state *tok) {
tok->str--;
assert(Py_CHARMASK(*tok->str) == c); /* tok->cur may point to read-only segment */
}
/* Set the readline function for TOK to ENC. For the string-based
tokenizer, this means to just record the encoding. */
static int
buf_setreadl(struct tok_state *tok, const char* enc) {
tok->enc = enc;
return 1;
}
/* Return a UTF-8 encoding Python string object from the
C byte string STR, which is encoded with ENC. */
#ifdef Py_USING_UNICODE
static PyObject *
translate_into_utf8(const char* str, const char* enc) {
PyObject *utf8;
PyObject* buf = PyUnicode_Decode(str, strlen(str), enc, NULL);
if (buf == NULL)
return NULL;
utf8 = PyUnicode_AsUTF8String(buf);
Py_DECREF(buf);
return utf8;
}
#endif
/* Decode a byte string STR for use as the buffer of TOK.
Look for encoding declarations inside STR, and record them
inside TOK. */
static const char *
decode_str(const char *str, struct tok_state *tok)
{
PyObject* utf8 = NULL;
const char *s;
int lineno = 0;
tok->enc = NULL;
tok->str = str;
if (!check_bom(buf_getc, buf_ungetc, buf_setreadl, tok))
return error_ret(tok);
str = tok->str; /* string after BOM if any */
assert(str);
#ifdef Py_USING_UNICODE
if (tok->enc != NULL) {
utf8 = translate_into_utf8(str, tok->enc);
if (utf8 == NULL)
return error_ret(tok);
str = PyString_AsString(utf8);
}
#endif
for (s = str;; s++) {
if (*s == '\0') break;
else if (*s == '\n') {
lineno++;
if (lineno == 2) break;
}
}
tok->enc = NULL;
if (!check_coding_spec(str, s - str, tok, buf_setreadl))
return error_ret(tok);
#ifdef Py_USING_UNICODE
if (tok->enc != NULL) {
assert(utf8 == NULL);
utf8 = translate_into_utf8(str, tok->enc);
if (utf8 == NULL) {
PyErr_Format(PyExc_SyntaxError,
"unknown encoding: %s", tok->enc);
return error_ret(tok);
}
str = PyString_AsString(utf8);
}
#endif
assert(tok->decoding_buffer == NULL);
tok->decoding_buffer = utf8; /* CAUTION */
return str;
}
#endif /* PGEN */
1990-10-14 09:07:46 -03:00
/* Set up tokenizer for string */
struct tok_state *
PyTokenizer_FromString(const char *str)
1990-10-14 09:07:46 -03:00
{
struct tok_state *tok = tok_new();
if (tok == NULL)
return NULL;
str = (char *)decode_str(str, tok);
if (str == NULL) {
PyTokenizer_Free(tok);
return NULL;
}
/* XXX: constify members. */
tok->buf = tok->cur = tok->end = tok->inp = (char*)str;
1990-10-14 09:07:46 -03:00
return tok;
}
/* Set up tokenizer for file */
1990-10-14 09:07:46 -03:00
struct tok_state *
PyTokenizer_FromFile(FILE *fp, char *ps1, char *ps2)
1990-10-14 09:07:46 -03:00
{
struct tok_state *tok = tok_new();
if (tok == NULL)
return NULL;
if ((tok->buf = (char *)PyMem_MALLOC(BUFSIZ)) == NULL) {
PyTokenizer_Free(tok);
1990-10-14 09:07:46 -03:00
return NULL;
}
tok->cur = tok->inp = tok->buf;
tok->end = tok->buf + BUFSIZ;
tok->fp = fp;
tok->prompt = ps1;
tok->nextprompt = ps2;
return tok;
}
/* Free a tok_state structure */
void
PyTokenizer_Free(struct tok_state *tok)
1990-10-14 09:07:46 -03:00
{
if (tok->encoding != NULL)
PyMem_FREE(tok->encoding);
#ifndef PGEN
Py_XDECREF(tok->decoding_readline);
Py_XDECREF(tok->decoding_buffer);
#endif
1990-10-14 09:07:46 -03:00
if (tok->fp != NULL && tok->buf != NULL)
PyMem_FREE(tok->buf);
PyMem_FREE(tok);
1990-10-14 09:07:46 -03:00
}
#if !defined(PGEN) && defined(Py_USING_UNICODE)
static int
tok_stdin_decode(struct tok_state *tok, char **inp)
{
PyObject *enc, *sysstdin, *decoded, *utf8;
const char *encoding;
char *converted;
if (PySys_GetFile((char *)"stdin", NULL) != stdin)
return 0;
sysstdin = PySys_GetObject("stdin");
if (sysstdin == NULL || !PyFile_Check(sysstdin))
return 0;
enc = ((PyFileObject *)sysstdin)->f_encoding;
if (enc == NULL || !PyString_Check(enc))
return 0;
Py_INCREF(enc);
encoding = PyString_AsString(enc);
decoded = PyUnicode_Decode(*inp, strlen(*inp), encoding, NULL);
if (decoded == NULL)
goto error_clear;
utf8 = PyUnicode_AsEncodedString(decoded, "utf-8", NULL);
Py_DECREF(decoded);
if (utf8 == NULL)
goto error_clear;
assert(PyString_Check(utf8));
converted = new_string(PyString_AS_STRING(utf8),
PyString_GET_SIZE(utf8));
Py_DECREF(utf8);
if (converted == NULL)
goto error_nomem;
PyMem_FREE(*inp);
*inp = converted;
if (tok->encoding != NULL)
PyMem_FREE(tok->encoding);
tok->encoding = new_string(encoding, strlen(encoding));
if (tok->encoding == NULL)
goto error_nomem;
Py_DECREF(enc);
return 0;
error_nomem:
Py_DECREF(enc);
tok->done = E_NOMEM;
return -1;
error_clear:
/* Fallback to iso-8859-1: for backward compatibility */
Py_DECREF(enc);
PyErr_Clear();
return 0;
}
#endif
1990-10-14 09:07:46 -03:00
/* Get next char, updating state; error code goes into tok->done */
static int
tok_nextc(register struct tok_state *tok)
1990-10-14 09:07:46 -03:00
{
for (;;) {
if (tok->cur != tok->inp) {
return Py_CHARMASK(*tok->cur++); /* Fast path */
}
if (tok->done != E_OK)
return EOF;
1990-10-14 09:07:46 -03:00
if (tok->fp == NULL) {
char *end = strchr(tok->inp, '\n');
if (end != NULL)
end++;
else {
end = strchr(tok->inp, '\0');
if (end == tok->inp) {
tok->done = E_EOF;
return EOF;
}
}
if (tok->start == NULL)
tok->buf = tok->cur;
tok->line_start = tok->cur;
tok->lineno++;
tok->inp = end;
return Py_CHARMASK(*tok->cur++);
1990-10-14 09:07:46 -03:00
}
if (tok->prompt != NULL) {
char *newtok = PyOS_Readline(stdin, stdout, tok->prompt);
1990-10-14 09:07:46 -03:00
if (tok->nextprompt != NULL)
tok->prompt = tok->nextprompt;
if (newtok == NULL)
tok->done = E_INTR;
else if (*newtok == '\0') {
PyMem_FREE(newtok);
1990-10-14 09:07:46 -03:00
tok->done = E_EOF;
}
#if !defined(PGEN) && defined(Py_USING_UNICODE)
else if (tok_stdin_decode(tok, &newtok) != 0)
PyMem_FREE(newtok);
#endif
else if (tok->start != NULL) {
size_t start = tok->start - tok->buf;
size_t oldlen = tok->cur - tok->buf;
size_t newlen = oldlen + strlen(newtok);
char *buf = tok->buf;
buf = (char *)PyMem_REALLOC(buf, newlen+1);
tok->lineno++;
if (buf == NULL) {
PyMem_FREE(tok->buf);
tok->buf = NULL;
PyMem_FREE(newtok);
tok->done = E_NOMEM;
return EOF;
}
tok->buf = buf;
tok->cur = tok->buf + oldlen;
tok->line_start = tok->cur;
strcpy(tok->buf + oldlen, newtok);
PyMem_FREE(newtok);
tok->inp = tok->buf + newlen;
tok->end = tok->inp + 1;
tok->start = tok->buf + start;
}
1990-10-14 09:07:46 -03:00
else {
tok->lineno++;
if (tok->buf != NULL)
PyMem_FREE(tok->buf);
tok->buf = newtok;
tok->line_start = tok->buf;
tok->cur = tok->buf;
tok->line_start = tok->buf;
tok->inp = strchr(tok->buf, '\0');
tok->end = tok->inp + 1;
1990-10-14 09:07:46 -03:00
}
}
else {
int done = 0;
2006-02-15 13:27:45 -04:00
Py_ssize_t cur = 0;
char *pt;
if (tok->start == NULL) {
if (tok->buf == NULL) {
tok->buf = (char *)
PyMem_MALLOC(BUFSIZ);
if (tok->buf == NULL) {
tok->done = E_NOMEM;
return EOF;
}
tok->end = tok->buf + BUFSIZ;
}
if (decoding_fgets(tok->buf, (int)(tok->end - tok->buf),
tok) == NULL) {
tok->done = E_EOF;
done = 1;
}
else {
tok->done = E_OK;
tok->inp = strchr(tok->buf, '\0');
done = tok->inp[-1] == '\n';
}
}
else {
cur = tok->cur - tok->buf;
if (decoding_feof(tok)) {
1995-01-17 12:12:13 -04:00
tok->done = E_EOF;
done = 1;
}
else
tok->done = E_OK;
}
tok->lineno++;
/* Read until '\n' or EOF */
while (!done) {
2006-02-15 13:27:45 -04:00
Py_ssize_t curstart = tok->start == NULL ? -1 :
tok->start - tok->buf;
Py_ssize_t curvalid = tok->inp - tok->buf;
Py_ssize_t newsize = curvalid + BUFSIZ;
char *newbuf = tok->buf;
newbuf = (char *)PyMem_REALLOC(newbuf,
newsize);
if (newbuf == NULL) {
tok->done = E_NOMEM;
tok->cur = tok->inp;
return EOF;
}
tok->buf = newbuf;
tok->inp = tok->buf + curvalid;
tok->end = tok->buf + newsize;
tok->start = curstart < 0 ? NULL :
tok->buf + curstart;
if (decoding_fgets(tok->inp,
(int)(tok->end - tok->inp),
tok) == NULL) {
/* Break out early on decoding
errors, as tok->buf will be NULL
*/
if (tok->decoding_erred)
return EOF;
/* Last line does not end in \n,
fake one */
strcpy(tok->inp, "\n");
}
tok->inp = strchr(tok->inp, '\0');
done = tok->inp[-1] == '\n';
}
Partially merge trunk into p3yk. The removal of Mac/Tools is confusing svn merge in bad ways, so I'll have to merge that extra-carefully (probably manually.) Merged revisions 46495-46605 via svnmerge from svn+ssh://pythondev@svn.python.org/python/trunk ........ r46495 | tim.peters | 2006-05-28 03:52:38 +0200 (Sun, 28 May 2006) | 2 lines Added missing svn:eol-style property to text files. ........ r46497 | tim.peters | 2006-05-28 12:41:29 +0200 (Sun, 28 May 2006) | 3 lines PyErr_Display(), PyErr_WriteUnraisable(): Coverity found a cut-and-paste bug in both: `className` was referenced before being checked for NULL. ........ r46499 | fredrik.lundh | 2006-05-28 14:06:46 +0200 (Sun, 28 May 2006) | 5 lines needforspeed: added Py_MEMCPY macro (currently tuned for Visual C only), and use it for string copy operations. this gives a 20% speedup on some string benchmarks. ........ r46501 | michael.hudson | 2006-05-28 17:51:40 +0200 (Sun, 28 May 2006) | 26 lines Quality control, meet exceptions.c. Fix a number of problems with the need for speed code: One is doing this sort of thing: Py_DECREF(self->field); self->field = newval; Py_INCREF(self->field); without being very sure that self->field doesn't start with a value that has a __del__, because that almost certainly can lead to segfaults. As self->args is constrained to be an exact tuple we may as well exploit this fact consistently. This leads to quite a lot of simplification (and, hey, probably better performance). Add some error checking in places lacking it. Fix some rather strange indentation in the Unicode code. Delete some trailing whitespace. More to come, I haven't fixed all the reference leaks yet... ........ r46502 | george.yoshida | 2006-05-28 18:39:09 +0200 (Sun, 28 May 2006) | 3 lines Patch #1080727: add "encoding" parameter to doctest.DocFileSuite Contributed by Bjorn Tillenius. ........ r46503 | martin.v.loewis | 2006-05-28 18:57:38 +0200 (Sun, 28 May 2006) | 4 lines Rest of patch #1490384: Commit icon source, remove claim that Erik von Blokland is the author of the installer picture. ........ r46504 | michael.hudson | 2006-05-28 19:40:29 +0200 (Sun, 28 May 2006) | 16 lines Quality control, meet exceptions.c, round two. Make some functions that should have been static static. Fix a bunch of refleaks by fixing the definition of MiddlingExtendsException. Remove all the __new__ implementations apart from BaseException_new. Rewrite most code that needs it to cope with NULL fields (such code could get excercised anyway, the __new__-removal just makes it more likely). This involved editing the code for WindowsError, which I can't test. This fixes all the refleaks in at least the start of a regrtest -R :: run. ........ r46505 | marc-andre.lemburg | 2006-05-28 19:46:58 +0200 (Sun, 28 May 2006) | 10 lines Initial version of systimes - a module to provide platform dependent performance measurements. The module is currently just a proof-of-concept implementation, but will integrated into pybench once it is stable enough. License: pybench license. Author: Marc-Andre Lemburg. ........ r46507 | armin.rigo | 2006-05-28 21:13:17 +0200 (Sun, 28 May 2006) | 15 lines ("Forward-port" of r46506) Remove various dependencies on dictionary order in the standard library tests, and one (clearly an oversight, potentially critical) in the standard library itself - base64.py. Remaining open issues: * test_extcall is an output test, messy to make robust * tarfile.py has a potential bug here, but I'm not familiar enough with this code. Filed in as SF bug #1496501. * urllib2.HTTPPasswordMgr() returns a random result if there is more than one matching root path. I'm asking python-dev for clarification... ........ r46508 | georg.brandl | 2006-05-28 22:11:45 +0200 (Sun, 28 May 2006) | 4 lines The empty string is a valid import path. (fixes #1496539) ........ r46509 | georg.brandl | 2006-05-28 22:23:12 +0200 (Sun, 28 May 2006) | 3 lines Patch #1496206: urllib2 PasswordMgr ./. default ports ........ r46510 | georg.brandl | 2006-05-28 22:57:09 +0200 (Sun, 28 May 2006) | 3 lines Fix refleaks in UnicodeError get and set methods. ........ r46511 | michael.hudson | 2006-05-28 23:19:03 +0200 (Sun, 28 May 2006) | 3 lines use the UnicodeError traversal and clearing functions in UnicodeError subclasses. ........ r46512 | thomas.wouters | 2006-05-28 23:32:12 +0200 (Sun, 28 May 2006) | 4 lines Make last patch valid C89 so Windows compilers can deal with it. ........ r46513 | georg.brandl | 2006-05-28 23:42:54 +0200 (Sun, 28 May 2006) | 3 lines Fix ref-antileak in _struct.c which eventually lead to deallocating None. ........ r46514 | georg.brandl | 2006-05-28 23:57:35 +0200 (Sun, 28 May 2006) | 4 lines Correct None refcount issue in Mac modules. (Are they still used?) ........ r46515 | armin.rigo | 2006-05-29 00:07:08 +0200 (Mon, 29 May 2006) | 3 lines A clearer error message when passing -R to regrtest.py with release builds of Python. ........ r46516 | georg.brandl | 2006-05-29 00:14:04 +0200 (Mon, 29 May 2006) | 3 lines Fix C function calling conventions in _sre module. ........ r46517 | georg.brandl | 2006-05-29 00:34:51 +0200 (Mon, 29 May 2006) | 3 lines Convert audioop over to METH_VARARGS. ........ r46518 | georg.brandl | 2006-05-29 00:38:57 +0200 (Mon, 29 May 2006) | 3 lines METH_NOARGS functions do get called with two args. ........ r46519 | georg.brandl | 2006-05-29 11:46:51 +0200 (Mon, 29 May 2006) | 4 lines Fix refleak in socketmodule. Replace bogus Py_BuildValue calls. Fix refleak in exceptions. ........ r46520 | nick.coghlan | 2006-05-29 14:43:05 +0200 (Mon, 29 May 2006) | 7 lines Apply modified version of Collin Winter's patch #1478788 Renames functional extension module to _functools and adds a Python functools module so that utility functions like update_wrapper can be added easily. ........ r46522 | georg.brandl | 2006-05-29 15:53:16 +0200 (Mon, 29 May 2006) | 3 lines Convert fmmodule to METH_VARARGS. ........ r46523 | georg.brandl | 2006-05-29 16:13:21 +0200 (Mon, 29 May 2006) | 3 lines Fix #1494605. ........ r46524 | georg.brandl | 2006-05-29 16:28:05 +0200 (Mon, 29 May 2006) | 3 lines Handle PyMem_Malloc failure in pystrtod.c. Closes #1494671. ........ r46525 | georg.brandl | 2006-05-29 16:33:55 +0200 (Mon, 29 May 2006) | 3 lines Fix compiler warning. ........ r46526 | georg.brandl | 2006-05-29 16:39:00 +0200 (Mon, 29 May 2006) | 3 lines Fix #1494787 (pyclbr counts whitespace as superclass name) ........ r46527 | bob.ippolito | 2006-05-29 17:47:29 +0200 (Mon, 29 May 2006) | 1 line simplify the struct code a bit (no functional changes) ........ r46528 | armin.rigo | 2006-05-29 19:59:47 +0200 (Mon, 29 May 2006) | 2 lines Silence a warning. ........ r46529 | georg.brandl | 2006-05-29 21:39:45 +0200 (Mon, 29 May 2006) | 3 lines Correct some value converting strangenesses. ........ r46530 | nick.coghlan | 2006-05-29 22:27:44 +0200 (Mon, 29 May 2006) | 1 line When adding a module like functools, it helps to let SVN know about the file. ........ r46531 | georg.brandl | 2006-05-29 22:52:54 +0200 (Mon, 29 May 2006) | 4 lines Patches #1497027 and #972322: try HTTP digest auth first, and watch out for handler name collisions. ........ r46532 | georg.brandl | 2006-05-29 22:57:01 +0200 (Mon, 29 May 2006) | 3 lines Add News entry for last commit. ........ r46533 | georg.brandl | 2006-05-29 23:04:52 +0200 (Mon, 29 May 2006) | 4 lines Make use of METH_O and METH_NOARGS where possible. Use Py_UnpackTuple instead of PyArg_ParseTuple where possible. ........ r46534 | georg.brandl | 2006-05-29 23:58:42 +0200 (Mon, 29 May 2006) | 3 lines Convert more modules to METH_VARARGS. ........ r46535 | georg.brandl | 2006-05-30 00:00:30 +0200 (Tue, 30 May 2006) | 3 lines Whoops. ........ r46536 | fredrik.lundh | 2006-05-30 00:42:07 +0200 (Tue, 30 May 2006) | 4 lines fixed "abc".count("", 100) == -96 error (hopefully, nobody's relying on the current behaviour ;-) ........ r46537 | bob.ippolito | 2006-05-30 00:55:48 +0200 (Tue, 30 May 2006) | 1 line struct: modulo math plus warning on all endian-explicit formats for compatibility with older struct usage (ugly) ........ r46539 | bob.ippolito | 2006-05-30 02:26:01 +0200 (Tue, 30 May 2006) | 1 line Add a length check to aifc to ensure it doesn't write a bogus file ........ r46540 | tim.peters | 2006-05-30 04:25:25 +0200 (Tue, 30 May 2006) | 10 lines deprecated_err(): Stop bizarre warning messages when the tests are run in the order: test_genexps (or any other doctest-based test) test_struct test_doctest The `warnings` module needs an advertised way to save/restore its internal filter list. ........ r46541 | tim.peters | 2006-05-30 04:26:46 +0200 (Tue, 30 May 2006) | 2 lines Whitespace normalization. ........ r46542 | tim.peters | 2006-05-30 04:30:30 +0200 (Tue, 30 May 2006) | 2 lines Set a binary svn:mime-type property on this UTF-8 encoded file. ........ r46543 | neal.norwitz | 2006-05-30 05:18:50 +0200 (Tue, 30 May 2006) | 1 line Simplify further by using AddStringConstant ........ r46544 | tim.peters | 2006-05-30 06:16:25 +0200 (Tue, 30 May 2006) | 6 lines Convert relevant dict internals to Py_ssize_t. I don't have a box with nearly enough RAM, or an OS, that could get close to tickling this, though (requires a dict w/ at least 2**31 entries). ........ r46545 | neal.norwitz | 2006-05-30 06:19:21 +0200 (Tue, 30 May 2006) | 1 line Remove stray | in comment ........ r46546 | neal.norwitz | 2006-05-30 06:25:05 +0200 (Tue, 30 May 2006) | 1 line Use Py_SAFE_DOWNCAST for safety. Fix format strings. Remove 2 more stray | in comment ........ r46547 | neal.norwitz | 2006-05-30 06:43:23 +0200 (Tue, 30 May 2006) | 1 line No DOWNCAST is required since sizeof(Py_ssize_t) >= sizeof(int) and Py_ReprEntr returns an int ........ r46548 | tim.peters | 2006-05-30 07:04:59 +0200 (Tue, 30 May 2006) | 3 lines dict_print(): Explicitly narrow the return value from a (possibly) wider variable. ........ r46549 | tim.peters | 2006-05-30 07:23:59 +0200 (Tue, 30 May 2006) | 5 lines dict_print(): So that Neal & I don't spend the rest of our lives taking turns rewriting code that works ;-), get rid of casting illusions by declaring a new variable with the obvious type. ........ r46550 | georg.brandl | 2006-05-30 09:04:55 +0200 (Tue, 30 May 2006) | 3 lines Restore exception pickle support. #1497319. ........ r46551 | georg.brandl | 2006-05-30 09:13:29 +0200 (Tue, 30 May 2006) | 3 lines Add a test case for exception pickling. args is never NULL. ........ r46552 | neal.norwitz | 2006-05-30 09:21:10 +0200 (Tue, 30 May 2006) | 1 line Don't fail if the (sub)pkgname already exist. ........ r46553 | georg.brandl | 2006-05-30 09:34:45 +0200 (Tue, 30 May 2006) | 3 lines Disallow keyword args for exceptions. ........ r46554 | neal.norwitz | 2006-05-30 09:36:54 +0200 (Tue, 30 May 2006) | 5 lines I'm impatient. I think this will fix a few more problems with the buildbots. I'm not sure this is the best approach, but I can't think of anything better. If this creates problems, feel free to revert, but I think it's safe and should make things a little better. ........ r46555 | georg.brandl | 2006-05-30 10:17:00 +0200 (Tue, 30 May 2006) | 4 lines Do the check for no keyword arguments in __init__ so that subclasses of Exception can be supplied keyword args ........ r46556 | georg.brandl | 2006-05-30 10:47:19 +0200 (Tue, 30 May 2006) | 3 lines Convert test_exceptions to unittest. ........ r46557 | andrew.kuchling | 2006-05-30 14:52:01 +0200 (Tue, 30 May 2006) | 1 line Add SoC name, and reorganize this section a bit ........ r46559 | tim.peters | 2006-05-30 17:53:34 +0200 (Tue, 30 May 2006) | 11 lines PyLong_FromString(): Continued fraction analysis (explained in a new comment) suggests there are almost certainly large input integers in all non-binary input bases for which one Python digit too few is initally allocated to hold the final result. Instead of assert-failing when that happens, allocate more space. Alas, I estimate it would take a few days to find a specific such case, so this isn't backed up by a new test (not to mention that such a case may take hours to run, since conversion time is quadratic in the number of digits, and preliminary attempts suggested that the smallest such inputs contain at least a million digits). ........ r46560 | fredrik.lundh | 2006-05-30 19:11:48 +0200 (Tue, 30 May 2006) | 3 lines changed find/rfind to return -1 for matches outside the source string ........ r46561 | bob.ippolito | 2006-05-30 19:37:54 +0200 (Tue, 30 May 2006) | 1 line Change wrapping terminology to overflow masking ........ r46562 | fredrik.lundh | 2006-05-30 19:39:58 +0200 (Tue, 30 May 2006) | 3 lines changed count to return 0 for slices outside the source string ........ r46568 | tim.peters | 2006-05-31 01:28:02 +0200 (Wed, 31 May 2006) | 2 lines Whitespace normalization. ........ r46569 | brett.cannon | 2006-05-31 04:19:54 +0200 (Wed, 31 May 2006) | 5 lines Clarify wording on default values for strptime(); defaults are used when better values cannot be inferred. Closes bug #1496315. ........ r46572 | neal.norwitz | 2006-05-31 09:43:27 +0200 (Wed, 31 May 2006) | 1 line Calculate smallest properly (it was off by one) and use proper ssize_t types for Win64 ........ r46573 | neal.norwitz | 2006-05-31 10:01:08 +0200 (Wed, 31 May 2006) | 1 line Revert last checkin, it is better to do make distclean ........ r46574 | neal.norwitz | 2006-05-31 11:02:44 +0200 (Wed, 31 May 2006) | 3 lines On 64-bit platforms running test_struct after test_tarfile would fail since the deprecation warning wouldn't be raised. ........ r46575 | thomas.heller | 2006-05-31 13:37:58 +0200 (Wed, 31 May 2006) | 3 lines PyTuple_Pack is not available in Python 2.3, but ctypes must stay compatible with that. ........ r46576 | andrew.kuchling | 2006-05-31 15:18:56 +0200 (Wed, 31 May 2006) | 1 line 'functional' module was renamed to 'functools' ........ r46577 | kristjan.jonsson | 2006-05-31 15:35:41 +0200 (Wed, 31 May 2006) | 1 line Fixup the PCBuild8 project directory. exceptions.c have moved to Objects, and the functionalmodule.c has been replaced with _functoolsmodule.c. Other minor changes to .vcproj files and .sln to fix compilation ........ r46578 | andrew.kuchling | 2006-05-31 16:08:48 +0200 (Wed, 31 May 2006) | 15 lines [Bug #1473048] SimpleXMLRPCServer and DocXMLRPCServer don't look at the path of the HTTP request at all; you can POST or GET from / or /RPC2 or /blahblahblah with the same results. Security scanners that look for /cgi-bin/phf will therefore report lots of vulnerabilities. Fix: add a .rpc_paths attribute to the SimpleXMLRPCServer class, and report a 404 error if the path isn't on the allowed list. Possibly-controversial aspect of this change: the default makes only '/' and '/RPC2' legal. Maybe this will break people's applications (though I doubt it). We could just set the default to an empty tuple, which would exactly match the current behaviour. ........ r46579 | andrew.kuchling | 2006-05-31 16:12:47 +0200 (Wed, 31 May 2006) | 1 line Mention SimpleXMLRPCServer change ........ r46580 | tim.peters | 2006-05-31 16:28:07 +0200 (Wed, 31 May 2006) | 2 lines Trimmed trailing whitespace. ........ r46581 | tim.peters | 2006-05-31 17:33:22 +0200 (Wed, 31 May 2006) | 4 lines _range_error(): Speed and simplify (there's no real need for loops here). Assert that size_t is actually big enough, and that f->size is at least one. Wrap a long line. ........ r46582 | tim.peters | 2006-05-31 17:34:37 +0200 (Wed, 31 May 2006) | 2 lines Repaired error in new comment. ........ r46584 | neal.norwitz | 2006-06-01 07:32:49 +0200 (Thu, 01 Jun 2006) | 4 lines Remove ; at end of macro. There was a compiler recently that warned about extra semi-colons. It may have been the HP C compiler. This file will trigger a bunch of those warnings now. ........ r46585 | georg.brandl | 2006-06-01 08:39:19 +0200 (Thu, 01 Jun 2006) | 3 lines Correctly unpickle 2.4 exceptions via __setstate__ (patch #1498571) ........ r46586 | georg.brandl | 2006-06-01 10:27:32 +0200 (Thu, 01 Jun 2006) | 3 lines Correctly allocate complex types with tp_alloc. (bug #1498638) ........ r46587 | georg.brandl | 2006-06-01 14:30:46 +0200 (Thu, 01 Jun 2006) | 2 lines Correctly dispatch Faults in loads (patch #1498627) ........ r46588 | georg.brandl | 2006-06-01 15:00:49 +0200 (Thu, 01 Jun 2006) | 3 lines Some code style tweaks, and remove apply. ........ r46589 | armin.rigo | 2006-06-01 15:19:12 +0200 (Thu, 01 Jun 2006) | 5 lines [ 1497053 ] Let dicts propagate the exceptions in user __eq__(). [ 1456209 ] dictresize() vulnerability ( <- backport candidate ). ........ r46590 | tim.peters | 2006-06-01 15:41:46 +0200 (Thu, 01 Jun 2006) | 2 lines Whitespace normalization. ........ r46591 | tim.peters | 2006-06-01 15:49:23 +0200 (Thu, 01 Jun 2006) | 2 lines Record bugs 1275608 and 1456209 as being fixed. ........ r46592 | tim.peters | 2006-06-01 15:56:26 +0200 (Thu, 01 Jun 2006) | 5 lines Re-enable a new empty-string test added during the NFS sprint, but disabled then because str and unicode strings gave different results. The implementations were repaired later during the sprint, but the new test remained disabled. ........ r46594 | tim.peters | 2006-06-01 17:50:44 +0200 (Thu, 01 Jun 2006) | 7 lines Armin committed his patch while I was reviewing it (I'm sure he didn't know this), so merged in some changes I made during review. Nothing material apart from changing a new `mask` local from int to Py_ssize_t. Mostly this is repairing comments that were made incorrect, and adding new comments. Also a few minor code rewrites for clarity or helpful succinctness. ........ r46599 | neal.norwitz | 2006-06-02 06:45:53 +0200 (Fri, 02 Jun 2006) | 1 line Convert docstrings to comments so regrtest -v prints method names ........ r46600 | neal.norwitz | 2006-06-02 06:50:49 +0200 (Fri, 02 Jun 2006) | 2 lines Fix memory leak found by valgrind. ........ r46601 | neal.norwitz | 2006-06-02 06:54:52 +0200 (Fri, 02 Jun 2006) | 1 line More memory leaks from valgrind ........ r46602 | neal.norwitz | 2006-06-02 08:23:00 +0200 (Fri, 02 Jun 2006) | 11 lines Patch #1357836: Prevent an invalid memory read from test_coding in case the done flag is set. In that case, the loop isn't entered. I wonder if rather than setting the done flag in the cases before the loop, if they should just exit early. This code looks like it should be refactored. Backport candidate (also the early break above if decoding_fgets fails) ........ r46603 | martin.blais | 2006-06-02 15:03:43 +0200 (Fri, 02 Jun 2006) | 1 line Fixed struct test to not use unittest. ........ r46605 | tim.peters | 2006-06-03 01:22:51 +0200 (Sat, 03 Jun 2006) | 10 lines pprint functions used to sort a dict (by key) if and only if the output required more than one line. "Small" dicts got displayed in seemingly random order (the hash-induced order produced by dict.__repr__). None of this was documented. Now pprint functions always sort dicts by key, and the docs promise it. This was proposed and agreed to during the PyCon 2006 core sprint -- I just didn't have time for it before now. ........
2006-06-08 11:42:34 -03:00
if (tok->buf != NULL) {
tok->cur = tok->buf + cur;
tok->line_start = tok->cur;
/* replace "\r\n" with "\n" */
/* For Mac leave the \r, giving syntax error */
pt = tok->inp - 2;
if (pt >= tok->buf && *pt == '\r') {
*pt++ = '\n';
*pt = '\0';
tok->inp = pt;
}
}
1990-10-14 09:07:46 -03:00
}
if (tok->done != E_OK) {
if (tok->prompt != NULL)
PySys_WriteStderr("\n");
tok->cur = tok->inp;
1990-10-14 09:07:46 -03:00
return EOF;
}
}
/*NOTREACHED*/
1990-10-14 09:07:46 -03:00
}
/* Back-up one character */
static void
tok_backup(register struct tok_state *tok, register int c)
1990-10-14 09:07:46 -03:00
{
if (c != EOF) {
if (--tok->cur < tok->buf)
1997-04-29 18:03:06 -03:00
Py_FatalError("tok_backup: begin of buffer");
1990-10-14 09:07:46 -03:00
if (*tok->cur != c)
*tok->cur = c;
}
}
/* Return the token corresponding to a single character */
int
PyToken_OneChar(int c)
1990-10-14 09:07:46 -03:00
{
switch (c) {
case '(': return LPAR;
case ')': return RPAR;
case '[': return LSQB;
case ']': return RSQB;
case ':': return COLON;
case ',': return COMMA;
case ';': return SEMI;
case '+': return PLUS;
case '-': return MINUS;
case '*': return STAR;
case '/': return SLASH;
case '|': return VBAR;
case '&': return AMPER;
case '<': return LESS;
case '>': return GREATER;
case '=': return EQUAL;
case '.': return DOT;
case '%': return PERCENT;
case '{': return LBRACE;
case '}': return RBRACE;
case '^': return CIRCUMFLEX;
case '~': return TILDE;
case '@': return AT;
1990-10-14 09:07:46 -03:00
default: return OP;
}
}
int
PyToken_TwoChars(int c1, int c2)
{
switch (c1) {
case '=':
switch (c2) {
case '=': return EQEQUAL;
}
break;
case '!':
switch (c2) {
case '=': return NOTEQUAL;
}
break;
case '<':
switch (c2) {
case '=': return LESSEQUAL;
case '<': return LEFTSHIFT;
}
break;
case '>':
switch (c2) {
case '=': return GREATEREQUAL;
case '>': return RIGHTSHIFT;
}
break;
case '+':
switch (c2) {
case '=': return PLUSEQUAL;
}
break;
case '-':
switch (c2) {
case '=': return MINEQUAL;
}
break;
1996-01-11 21:31:58 -04:00
case '*':
switch (c2) {
case '*': return DOUBLESTAR;
case '=': return STAREQUAL;
}
break;
case '/':
switch (c2) {
case '/': return DOUBLESLASH;
case '=': return SLASHEQUAL;
}
break;
case '|':
switch (c2) {
case '=': return VBAREQUAL;
}
break;
case '%':
switch (c2) {
case '=': return PERCENTEQUAL;
}
break;
case '&':
switch (c2) {
case '=': return AMPEREQUAL;
}
break;
case '^':
switch (c2) {
case '=': return CIRCUMFLEXEQUAL;
1996-01-11 21:31:58 -04:00
}
break;
}
return OP;
}
int
PyToken_ThreeChars(int c1, int c2, int c3)
{
switch (c1) {
case '<':
switch (c2) {
case '<':
switch (c3) {
case '=':
return LEFTSHIFTEQUAL;
}
break;
}
break;
case '>':
switch (c2) {
case '>':
switch (c3) {
case '=':
return RIGHTSHIFTEQUAL;
}
break;
}
break;
case '*':
switch (c2) {
case '*':
switch (c3) {
case '=':
return DOUBLESTAREQUAL;
}
break;
}
break;
case '/':
switch (c2) {
case '/':
switch (c3) {
case '=':
return DOUBLESLASHEQUAL;
}
break;
}
break;
}
return OP;
}
static int
indenterror(struct tok_state *tok)
{
if (tok->alterror) {
tok->done = E_TABSPACE;
tok->cur = tok->inp;
return 1;
}
if (tok->altwarning) {
PySys_WriteStderr("%s: inconsistent use of tabs and spaces "
"in indentation\n", tok->filename);
tok->altwarning = 0;
}
return 0;
}
1990-10-14 09:07:46 -03:00
/* Get next token, after space stripping etc. */
static int
tok_get(register struct tok_state *tok, char **p_start, char **p_end)
1990-10-14 09:07:46 -03:00
{
register int c;
int blankline;
*p_start = *p_end = NULL;
nextline:
tok->start = NULL;
blankline = 0;
1990-10-14 09:07:46 -03:00
/* Get indentation level */
if (tok->atbol) {
register int col = 0;
register int altcol = 0;
1990-10-14 09:07:46 -03:00
tok->atbol = 0;
for (;;) {
c = tok_nextc(tok);
if (c == ' ')
col++, altcol++;
else if (c == '\t') {
1990-10-14 09:07:46 -03:00
col = (col/tok->tabsize + 1) * tok->tabsize;
altcol = (altcol/tok->alttabsize + 1)
* tok->alttabsize;
}
1995-07-07 19:27:27 -03:00
else if (c == '\014') /* Control-L (formfeed) */
col = altcol = 0; /* For Emacs users */
1990-10-14 09:07:46 -03:00
else
break;
}
tok_backup(tok, c);
if (c == '#' || c == '\n') {
/* Lines with only whitespace and/or comments
shouldn't affect the indentation and are
not passed to the parser as NEWLINE tokens,
except *totally* empty lines in interactive
mode, which signal the end of a command group. */
if (col == 0 && c == '\n' && tok->prompt != NULL)
blankline = 0; /* Let it through */
else
blankline = 1; /* Ignore completely */
/* We can't jump back right here since we still
may need to skip to the end of a comment */
1990-10-14 09:07:46 -03:00
}
if (!blankline && tok->level == 0) {
if (col == tok->indstack[tok->indent]) {
/* No change */
if (altcol != tok->altindstack[tok->indent]) {
if (indenterror(tok))
return ERRORTOKEN;
}
1990-10-14 09:07:46 -03:00
}
else if (col > tok->indstack[tok->indent]) {
/* Indent -- always one */
if (tok->indent+1 >= MAXINDENT) {
tok->done = E_TOODEEP;
tok->cur = tok->inp;
return ERRORTOKEN;
}
if (altcol <= tok->altindstack[tok->indent]) {
if (indenterror(tok))
return ERRORTOKEN;
}
tok->pendin++;
tok->indstack[++tok->indent] = col;
tok->altindstack[tok->indent] = altcol;
1990-10-14 09:07:46 -03:00
}
else /* col < tok->indstack[tok->indent] */ {
/* Dedent -- any number, must be consistent */
while (tok->indent > 0 &&
col < tok->indstack[tok->indent]) {
tok->pendin--;
tok->indent--;
}
if (col != tok->indstack[tok->indent]) {
tok->done = E_DEDENT;
tok->cur = tok->inp;
return ERRORTOKEN;
}
if (altcol != tok->altindstack[tok->indent]) {
if (indenterror(tok))
return ERRORTOKEN;
}
1990-10-14 09:07:46 -03:00
}
}
}
tok->start = tok->cur;
1990-10-14 09:07:46 -03:00
/* Return pending indents/dedents */
if (tok->pendin != 0) {
if (tok->pendin < 0) {
tok->pendin++;
return DEDENT;
}
else {
tok->pendin--;
return INDENT;
}
}
1990-10-14 09:07:46 -03:00
again:
tok->start = NULL;
1990-10-14 09:07:46 -03:00
/* Skip spaces */
do {
c = tok_nextc(tok);
1995-07-07 19:27:27 -03:00
} while (c == ' ' || c == '\t' || c == '\014');
1990-10-14 09:07:46 -03:00
/* Set start of current token */
tok->start = tok->cur - 1;
/* Skip comment */
if (c == '#')
while (c != EOF && c != '\n')
1990-10-14 09:07:46 -03:00
c = tok_nextc(tok);
1990-10-14 09:07:46 -03:00
/* Check for EOF and errors now */
if (c == EOF) {
1990-10-14 09:07:46 -03:00
return tok->done == E_EOF ? ENDMARKER : ERRORTOKEN;
}
1990-10-14 09:07:46 -03:00
/* Identifier (most frequent token!) */
if (isalpha(c) || c == '_') {
/* Process r"", u"" and ur"" */
switch (c) {
case 'r':
case 'R':
c = tok_nextc(tok);
if (c == '"' || c == '\'')
goto letter_quote;
break;
case 'u':
case 'U':
c = tok_nextc(tok);
if (c == 'r' || c == 'R')
c = tok_nextc(tok);
if (c == '"' || c == '\'')
goto letter_quote;
break;
}
while (isalnum(c) || c == '_') {
1990-10-14 09:07:46 -03:00
c = tok_nextc(tok);
}
1990-10-14 09:07:46 -03:00
tok_backup(tok, c);
*p_start = tok->start;
1990-10-14 09:07:46 -03:00
*p_end = tok->cur;
return NAME;
}
1990-10-14 09:07:46 -03:00
/* Newline */
if (c == '\n') {
tok->atbol = 1;
if (blankline || tok->level > 0)
goto nextline;
*p_start = tok->start;
1990-10-14 09:07:46 -03:00
*p_end = tok->cur - 1; /* Leave '\n' out of the string */
tok->cont_line = 0;
1990-10-14 09:07:46 -03:00
return NEWLINE;
}
/* Period or number starting with period? */
if (c == '.') {
c = tok_nextc(tok);
if (isdigit(c)) {
goto fraction;
}
else {
tok_backup(tok, c);
*p_start = tok->start;
*p_end = tok->cur;
return DOT;
}
}
1996-01-11 21:31:58 -04:00
1990-10-14 09:07:46 -03:00
/* Number */
if (isdigit(c)) {
if (c == '0') {
/* Hex or octal -- maybe. */
1990-10-14 09:07:46 -03:00
c = tok_nextc(tok);
if (c == '.')
goto fraction;
1996-01-11 21:31:58 -04:00
#ifndef WITHOUT_COMPLEX
1996-01-26 14:59:07 -04:00
if (c == 'j' || c == 'J')
1996-01-11 21:31:58 -04:00
goto imaginary;
#endif
1990-10-14 09:07:46 -03:00
if (c == 'x' || c == 'X') {
/* Hex */
do {
c = tok_nextc(tok);
} while (isxdigit(c));
}
else {
int found_decimal = 0;
1990-10-14 09:07:46 -03:00
/* Octal; c is first char of it */
/* There's no 'isoctdigit' macro, sigh */
while ('0' <= c && c < '8') {
c = tok_nextc(tok);
}
if (isdigit(c)) {
found_decimal = 1;
do {
c = tok_nextc(tok);
} while (isdigit(c));
}
if (c == '.')
goto fraction;
else if (c == 'e' || c == 'E')
goto exponent;
#ifndef WITHOUT_COMPLEX
else if (c == 'j' || c == 'J')
goto imaginary;
#endif
else if (found_decimal) {
tok->done = E_TOKEN;
tok_backup(tok, c);
return ERRORTOKEN;
}
1990-10-14 09:07:46 -03:00
}
if (c == 'l' || c == 'L')
c = tok_nextc(tok);
1990-10-14 09:07:46 -03:00
}
else {
/* Decimal */
do {
c = tok_nextc(tok);
} while (isdigit(c));
if (c == 'l' || c == 'L')
1990-10-14 09:07:46 -03:00
c = tok_nextc(tok);
else {
/* Accept floating point numbers. */
if (c == '.') {
fraction:
/* Fraction */
do {
c = tok_nextc(tok);
} while (isdigit(c));
}
if (c == 'e' || c == 'E') {
exponent:
/* Exponent part */
1990-10-14 09:07:46 -03:00
c = tok_nextc(tok);
if (c == '+' || c == '-')
c = tok_nextc(tok);
if (!isdigit(c)) {
tok->done = E_TOKEN;
tok_backup(tok, c);
return ERRORTOKEN;
}
do {
c = tok_nextc(tok);
} while (isdigit(c));
1990-10-14 09:07:46 -03:00
}
1996-01-11 21:31:58 -04:00
#ifndef WITHOUT_COMPLEX
1996-01-26 14:59:07 -04:00
if (c == 'j' || c == 'J')
1996-01-11 21:31:58 -04:00
/* Imaginary part */
imaginary:
c = tok_nextc(tok);
#endif
1990-10-14 09:07:46 -03:00
}
}
tok_backup(tok, c);
*p_start = tok->start;
1990-10-14 09:07:46 -03:00
*p_end = tok->cur;
return NUMBER;
}
letter_quote:
/* String */
if (c == '\'' || c == '"') {
2006-02-15 13:27:45 -04:00
Py_ssize_t quote2 = tok->cur - tok->start + 1;
int quote = c;
int triple = 0;
int tripcount = 0;
1990-10-14 09:07:46 -03:00
for (;;) {
c = tok_nextc(tok);
if (c == '\n') {
if (!triple) {
tok->done = E_EOLS;
tok_backup(tok, c);
1990-10-14 09:07:46 -03:00
return ERRORTOKEN;
}
tripcount = 0;
tok->cont_line = 1; /* multiline string. */
1990-10-14 09:07:46 -03:00
}
else if (c == EOF) {
if (triple)
tok->done = E_EOFS;
else
tok->done = E_EOLS;
tok->cur = tok->inp;
return ERRORTOKEN;
}
else if (c == quote) {
tripcount++;
if (tok->cur - tok->start == quote2) {
c = tok_nextc(tok);
if (c == quote) {
triple = 1;
tripcount = 0;
continue;
}
tok_backup(tok, c);
}
if (!triple || tripcount == 3)
break;
}
else if (c == '\\') {
tripcount = 0;
c = tok_nextc(tok);
if (c == EOF) {
tok->done = E_EOLS;
tok->cur = tok->inp;
return ERRORTOKEN;
}
}
else
tripcount = 0;
}
*p_start = tok->start;
*p_end = tok->cur;
return STRING;
}
1990-10-14 09:07:46 -03:00
/* Line continuation */
if (c == '\\') {
c = tok_nextc(tok);
if (c != '\n') {
tok->done = E_LINECONT;
tok->cur = tok->inp;
1990-10-14 09:07:46 -03:00
return ERRORTOKEN;
}
tok->cont_line = 1;
1990-10-14 09:07:46 -03:00
goto again; /* Read next line */
}
/* Check for two-character token */
{
int c2 = tok_nextc(tok);
1997-04-29 18:03:06 -03:00
int token = PyToken_TwoChars(c, c2);
if (token != OP) {
int c3 = tok_nextc(tok);
int token3 = PyToken_ThreeChars(c, c2, c3);
if (token3 != OP) {
token = token3;
} else {
tok_backup(tok, c3);
}
*p_start = tok->start;
*p_end = tok->cur;
return token;
}
tok_backup(tok, c2);
}
/* Keep track of parentheses nesting level */
switch (c) {
case '(':
case '[':
case '{':
tok->level++;
break;
case ')':
case ']':
case '}':
tok->level--;
break;
}
1990-10-14 09:07:46 -03:00
/* Punctuation character */
*p_start = tok->start;
1990-10-14 09:07:46 -03:00
*p_end = tok->cur;
1997-04-29 18:03:06 -03:00
return PyToken_OneChar(c);
1990-10-14 09:07:46 -03:00
}
int
PyTokenizer_Get(struct tok_state *tok, char **p_start, char **p_end)
{
int result = tok_get(tok, p_start, p_end);
if (tok->decoding_erred) {
result = ERRORTOKEN;
tok->done = E_DECODE;
}
return result;
}
1990-10-14 09:07:46 -03:00
1996-12-30 12:17:54 -04:00
#ifdef Py_DEBUG
1990-10-14 09:07:46 -03:00
void
tok_dump(int type, char *start, char *end)
1990-10-14 09:07:46 -03:00
{
1997-04-29 18:03:06 -03:00
printf("%s", _PyParser_TokenNames[type]);
1990-10-14 09:07:46 -03:00
if (type == NAME || type == NUMBER || type == STRING || type == OP)
printf("(%.*s)", (int)(end - start), start);
}
#endif