Fixed bug #1915: Python compiles with --enable-unicode=no again. However several extension methods and modules do not work without unicode support.

This commit is contained in:
Christian Heimes 2008-01-23 14:20:41 +00:00
parent 2c63442586
commit d2f4cb8cca
4 changed files with 11 additions and 4 deletions

View File

@ -60,7 +60,7 @@ def normalize_encoding(encoding):
"""
# Make sure we have an 8-bit string, because .translate() works
# differently for Unicode strings.
if type(encoding) is types.UnicodeType:
if hasattr(types, "UnicodeType") and type(encoding) is types.UnicodeType:
# Note that .encode('latin-1') does *not* use the codec
# registry, so this call doesn't recurse. (See unicodeobject.c
# PyUnicode_AsEncodedString() for details)

View File

@ -824,7 +824,11 @@ except NameError:
(True, False) = (1, 0)
def isbasestring(x):
return isinstance(x, types.StringType) or isinstance(x, types.UnicodeType)
try:
return isinstance(x, basestring)
except:
return isinstance(x, types.StringType) or isinstance(x, types.UnicodeType)
class Values:

View File

@ -12,6 +12,10 @@ What's New in Python 2.5.2c1?
Core and builtins
-----------------
- Bug #1915: Python compiles with --enable-unicode=no again. However
several extension methods and modules do not work without unicode
support.
- Issue #1678380: distinction between 0.0 and -0.0 was lost during constant
folding optimization. This was a regression from Python 2.4.

View File

@ -1537,7 +1537,7 @@ PyTokenizer_Get(struct tok_state *tok, char **p_start, char **p_end)
there, as it must be empty for PGEN, and we can check for PGEN only
in this file. */
#ifdef PGEN
#if defined(PGEN) || !defined(Py_USING_UNICODE)
char*
PyTokenizer_RestoreEncoding(struct tok_state* tok, int len, int* offset)
{
@ -1557,7 +1557,6 @@ dec_utf8(const char *enc, const char *text, size_t len) {
}
return ret;
}
char *
PyTokenizer_RestoreEncoding(struct tok_state* tok, int len, int *offset)
{