merge json library with simplejson 2.0.9 (issue 4136)

This commit is contained in:
Bob Ippolito 2009-03-17 23:19:00 +00:00
parent 277859d591
commit d914e3f861
13 changed files with 2430 additions and 582 deletions

View File

@ -1,11 +1,13 @@
r"""A simple, fast, extensible JSON encoder and decoder r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format. interchange format.
json exposes an API familiar to uses of the standard library :mod:`json` exposes an API familiar to users of the standard library
marshal and pickle modules. :mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies:: Encoding basic Python object hierarchies::
@ -32,23 +34,28 @@ Compact encoding::
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]' '[1,2,3,{"4":5,"6":7}]'
Pretty printing (using repr() because of extraneous whitespace in the output):: Pretty printing::
>>> import json >>> import json
>>> print repr(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
'{\n "4": 5, \n "6": 7\n}' >>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON:: Decoding JSON::
>>> import json >>> import json
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}] >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
>>> json.loads('"\\"foo\\bar"') True
u'"foo\x08ar' >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO >>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]') >>> io = StringIO('["streaming API"]')
>>> json.load(io) >>> json.load(io)[0] == 'streaming API'
[u'streaming API'] True
Specializing JSON object decoding:: Specializing JSON object decoding::
@ -61,43 +68,36 @@ Specializing JSON object decoding::
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex) ... object_hook=as_complex)
(1+2j) (1+2j)
>>> import decimal >>> from decimal import Decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
Decimal('1.1') True
Extending JSONEncoder:: Specializing JSON object encoding::
>>> import json >>> import json
>>> class ComplexEncoder(json.JSONEncoder): >>> def encode_complex(obj):
... def default(self, obj): ... if isinstance(obj, complex):
... if isinstance(obj, complex): ... return [obj.real, obj.imag]
... return [obj.real, obj.imag] ... raise TypeError(repr(o) + " is not JSON serializable")
... return json.JSONEncoder.default(self, obj)
... ...
>>> dumps(2 + 1j, cls=ComplexEncoder) >>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]' '[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j) >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]' '[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Using json.tool from the shell to validate and Using json.tool from the shell to validate and pretty-print::
pretty-print::
$ echo '{"json":"obj"}' | python -mjson.tool $ echo '{"json":"obj"}' | python -m json.tool
{ {
"json": "obj" "json": "obj"
} }
$ echo '{ 1.2:3.4}' | python -mjson.tool $ echo '{ 1.2:3.4}' | python -m json.tool
Expecting property name: line 1 column 2 (char 2) Expecting property name: line 1 column 2 (char 2)
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
""" """
__version__ = '2.0.9'
__version__ = '1.9'
__all__ = [ __all__ = [
'dump', 'dumps', 'load', 'loads', 'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder', 'JSONDecoder', 'JSONEncoder',
@ -125,28 +125,29 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object). ``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``. will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error. to cause an error.
If ``check_circular`` is ``False``, then the circular reference check If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse). result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object If ``indent`` is a non-negative integer, then JSON array elements and
members will be pretty-printed with that indent level. An indent level object members will be pretty-printed with that indent level. An indent
of 0 will only insert newlines. ``None`` is the most compact representation. level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators. then it will be used instead of the default ``(', ', ': ')`` separators.
@ -163,8 +164,8 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
""" """
# cached encoder # cached encoder
if (skipkeys is False and ensure_ascii is True and if (not skipkeys and ensure_ascii and
check_circular is True and allow_nan is True and check_circular and allow_nan and
cls is None and indent is None and separators is None and cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw): encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj) iterable = _default_encoder.iterencode(obj)
@ -186,19 +187,19 @@ def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
encoding='utf-8', default=None, **kw): encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``. """Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``. will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode`` ``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``. coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse). result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
@ -223,8 +224,8 @@ def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
""" """
# cached encoder # cached encoder
if (skipkeys is False and ensure_ascii is True and if (not skipkeys and ensure_ascii and
check_circular is True and allow_nan is True and check_circular and allow_nan and
cls is None and indent is None and separators is None and cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw): encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj) return _default_encoder.encode(obj)
@ -242,8 +243,8 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw): parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
containing a JSON document) to a Python object. a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must

View File

@ -1,10 +1,10 @@
"""Implementation of JSONDecoder """Implementation of JSONDecoder
""" """
import re import re
import sys import sys
import struct
from json.scanner import Scanner, pattern from json.scanner import make_scanner
try: try:
from _json import scanstring as c_scanstring from _json import scanstring as c_scanstring
except ImportError: except ImportError:
@ -14,7 +14,14 @@ __all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
NaN, PosInf, NegInf = float('nan'), float('inf'), float('-inf') def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos): def linecol(doc, pos):
@ -27,49 +34,26 @@ def linecol(doc, pos):
def errmsg(msg, doc, pos, end=None): def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _json
lineno, colno = linecol(doc, pos) lineno, colno = linecol(doc, pos)
if end is None: if end is None:
fmt = '{0}: line {1} column {2} (char {3})' fmt = '{0}: line {1} column {2} (char {3})'
return fmt.format(msg, lineno, colno, pos) return fmt.format(msg, lineno, colno, pos)
#fmt = '%s: line %d column %d (char %d)'
#return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end) endlineno, endcolno = linecol(doc, end)
fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})' fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end) return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
#fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
#return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = { _CONSTANTS = {
'-Infinity': NegInf, '-Infinity': NegInf,
'Infinity': PosInf, 'Infinity': PosInf,
'NaN': NaN, 'NaN': NaN,
'true': True,
'false': False,
'null': None,
} }
def JSONConstant(match, context, c=_CONSTANTS):
s = match.group(0)
fn = getattr(context, 'parse_constant', None)
if fn is None:
rval = c[s]
else:
rval = fn(s)
return rval, None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
fn = getattr(context, 'parse_float', None) or float
res = fn(integer + (frac or '') + (exp or ''))
else:
fn = getattr(context, 'parse_int', None) or int
res = fn(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = { BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/', '"': u'"', '\\': u'\\', '/': u'/',
@ -78,8 +62,16 @@ BACKSLASH = {
DEFAULT_ENCODING = "utf-8" DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match): Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None: if encoding is None:
encoding = DEFAULT_ENCODING encoding = DEFAULT_ENCODING
chunks = [] chunks = []
@ -92,14 +84,18 @@ def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHU
errmsg("Unterminated string starting at", s, begin)) errmsg("Unterminated string starting at", s, begin))
end = chunk.end() end = chunk.end()
content, terminator = chunk.groups() content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content: if content:
if not isinstance(content, unicode): if not isinstance(content, unicode):
content = unicode(content, encoding) content = unicode(content, encoding)
_append(content) _append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"': if terminator == '"':
break break
elif terminator != '\\': elif terminator != '\\':
if strict: if strict:
#msg = "Invalid control character %r at" % (terminator,)
msg = "Invalid control character {0!r} at".format(terminator) msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end)) raise ValueError(errmsg(msg, s, end))
else: else:
@ -110,136 +106,157 @@ def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHU
except IndexError: except IndexError:
raise ValueError( raise ValueError(
errmsg("Unterminated string starting at", s, begin)) errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u': if esc != 'u':
try: try:
m = _b[esc] char = _b[esc]
except KeyError: except KeyError:
msg = "Invalid \\escape: {0!r}".format(esc) msg = "Invalid \\escape: " + repr(esc)
raise ValueError(errmsg(msg, s, end)) raise ValueError(errmsg(msg, s, end))
end += 1 end += 1
else: else:
# Unicode escape sequence
esc = s[end + 1:end + 5] esc = s[end + 1:end + 5]
next_end = end + 5 next_end = end + 5
msg = "Invalid \\uXXXX escape" if len(esc) != 4:
try: msg = "Invalid \\uXXXX escape"
if len(esc) != 4:
raise ValueError
uni = int(esc, 16)
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
m = unichr(uni)
except ValueError:
raise ValueError(errmsg(msg, s, end)) raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end end = next_end
_append(m) # Append the unescaped character
_append(char)
return u''.join(chunks), end return u''.join(chunks), end
# Use speedup # Use speedup if available
if c_scanstring is not None: scanstring = c_scanstring or py_scanstring
scanstring = c_scanstring
else:
scanstring = py_scanstring
def JSONString(match, context): WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
encoding = getattr(context, 'encoding', None) WHITESPACE_STR = ' \t\n\r'
strict = getattr(context, 'strict', True)
return scanstring(match.string, match.end(), encoding, strict)
pattern(r'"')(JSONString)
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
WHITESPACE = re.compile(r'\s*', FLAGS) _w=WHITESPACE.match, _ws=WHITESPACE_STR):
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {} pairs = {}
s = match.string # Use a slice to prevent IndexError from being raised, the following
end = _w(s, match.end()).end() # check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1] nextchar = s[end:end + 1]
# Trivial empty object # Normally we expect nextchar == '"'
if nextchar == '}':
return pairs, end + 1
if nextchar != '"': if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end)) if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1 end += 1
encoding = getattr(context, 'encoding', None)
strict = getattr(context, 'strict', True)
iterscan = JSONScanner.iterscan
while True: while True:
key, end = scanstring(s, end, encoding, strict) key, end = scanstring(s, end, encoding, strict)
end = _w(s, end).end()
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':': if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end)) end = _w(s, end).end()
end = _w(s, end + 1).end() if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try: try:
value, end = iterscan(s, idx=end, context=context).next() if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration: except StopIteration:
raise ValueError(errmsg("Expecting object", s, end)) raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1] try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1 end += 1
if nextchar == '}': if nextchar == '}':
break break
if nextchar != ',': elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1)) raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1] try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1 end += 1
if nextchar != '"': if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1)) raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None: if object_hook is not None:
pairs = object_hook(pairs) pairs = object_hook(pairs)
return pairs, end return pairs, end
pattern(r'{')(JSONObject)
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
def JSONArray(match, context, _w=WHITESPACE.match):
values = [] values = []
s = match.string
end = _w(s, match.end()).end()
# Look-ahead for trivial empty array
nextchar = s[end:end + 1] nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']': if nextchar == ']':
return values, end + 1 return values, end + 1
iterscan = JSONScanner.iterscan _append = values.append
while True: while True:
try: try:
value, end = iterscan(s, idx=end, context=context).next() value, end = scan_once(s, end)
except StopIteration: except StopIteration:
raise ValueError(errmsg("Expecting object", s, end)) raise ValueError(errmsg("Expecting object", s, end))
values.append(value) _append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1] nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1 end += 1
if nextchar == ']': if nextchar == ']':
break break
if nextchar != ',': elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end)) raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object): class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder """Simple JSON <http://json.org> decoder
@ -268,10 +285,8 @@ class JSONDecoder(object):
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec. their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING) """
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None, parse_float=None, def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True): parse_int=None, parse_constant=None, strict=True):
@ -282,8 +297,8 @@ class JSONDecoder(object):
Note that currently only encodings that are a superset of ASCII work, Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``. strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result of ``object_hook``, if specified, will be called with the result
every JSON object decoded and its return value will be used in of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting). deserializations (e.g. to support JSON-RPC class hinting).
@ -298,21 +313,24 @@ class JSONDecoder(object):
for JSON integers (e.g. float). for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the ``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false. following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers This can be used to raise an exception if invalid JSON numbers
are encountered. are encountered.
""" """
self.encoding = encoding self.encoding = encoding
self.object_hook = object_hook self.object_hook = object_hook
self.parse_float = parse_float self.parse_float = parse_float or float
self.parse_int = parse_int self.parse_int = parse_int or int
self.parse_constant = parse_constant self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match): def decode(self, s, _w=WHITESPACE.match):
""" """Return the Python representation of ``s`` (a ``str`` or ``unicode``
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document) instance containing a JSON document)
""" """
@ -322,18 +340,17 @@ class JSONDecoder(object):
raise ValueError(errmsg("Extra data", s, end, len(s))) raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj return obj
def raw_decode(self, s, **kw): def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning """Decode a JSON document from ``s`` (a ``str`` or ``unicode``
with a JSON document) and return a 2-tuple of the Python beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended. representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may This can be used to decode a JSON document from a string that may
have extraneous data at the end. have extraneous data at the end.
""" """
kw.setdefault('context', self)
try: try:
obj, end = self._scanner.iterscan(s, **kw).next() obj, end = self.scan_once(s, idx)
except StopIteration: except StopIteration:
raise ValueError("No JSON object could be decoded") raise ValueError("No JSON object could be decoded")
return obj, end return obj, end

View File

@ -1,15 +1,15 @@
"""Implementation of JSONEncoder """Implementation of JSONEncoder
""" """
import re import re
import math
try: try:
from _json import encode_basestring_ascii as c_encode_basestring_ascii from _json import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError: except ImportError:
c_encode_basestring_ascii = None c_encode_basestring_ascii = None
try:
__all__ = ['JSONEncoder'] from _json import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
@ -25,30 +25,12 @@ ESCAPE_DCT = {
} }
for i in range(0x20): for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
#ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr FLOAT_REPR = repr
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if math.isnan(o):
text = 'NaN'
elif math.isinf(o):
if math.copysign(1., o) == 1.:
text = 'Infinity'
else:
text = '-Infinity'
else:
return FLOAT_REPR(o)
if not allow_nan:
msg = "Out of range float values are not JSON compliant: " + repr(o)
raise ValueError(msg)
return text
def encode_basestring(s): def encode_basestring(s):
"""Return a JSON representation of a Python string """Return a JSON representation of a Python string
@ -59,6 +41,9 @@ def encode_basestring(s):
def py_encode_basestring_ascii(s): def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None: if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8') s = s.decode('utf-8')
def replace(match): def replace(match):
@ -69,20 +54,19 @@ def py_encode_basestring_ascii(s):
n = ord(s) n = ord(s)
if n < 0x10000: if n < 0x10000:
return '\\u{0:04x}'.format(n) return '\\u{0:04x}'.format(n)
#return '\\u%04x' % (n,)
else: else:
# surrogate pair # surrogate pair
n -= 0x10000 n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff) s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff) s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
#return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
if c_encode_basestring_ascii is not None: encode_basestring_ascii = (
encode_basestring_ascii = c_encode_basestring_ascii c_encode_basestring_ascii or py_encode_basestring_ascii)
else:
encode_basestring_ascii = py_encode_basestring_ascii
class JSONEncoder(object): class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures. """Extensible JSON <http://json.org> encoder for Python data structures.
@ -113,7 +97,6 @@ class JSONEncoder(object):
implementation (to raise ``TypeError``). implementation (to raise ``TypeError``).
""" """
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', ' item_separator = ', '
key_separator = ': ' key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True, def __init__(self, skipkeys=False, ensure_ascii=True,
@ -121,25 +104,25 @@ class JSONEncoder(object):
indent=None, separators=None, encoding='utf-8', default=None): indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults. """Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped. skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object. ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError). prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place. Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant, encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders. but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats. Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis. that JSON serializations can be compared on a day-to-day basis.
@ -161,175 +144,26 @@ class JSONEncoder(object):
The default is UTF-8. The default is UTF-8.
""" """
self.skipkeys = skipkeys self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii self.ensure_ascii = ensure_ascii
self.check_circular = check_circular self.check_circular = check_circular
self.allow_nan = allow_nan self.allow_nan = allow_nan
self.sort_keys = sort_keys self.sort_keys = sort_keys
self.indent = indent self.indent = indent
self.current_indent_level = 0
if separators is not None: if separators is not None:
self.item_separator, self.key_separator = separators self.item_separator, self.key_separator = separators
if default is not None: if default is not None:
self.default = default self.default = default
self.encoding = encoding self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key {0!r} is not a string".format(key))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o): def default(self, o):
"""Implement this method in a subclass such that it returns a serializable """Implement this method in a subclass such that it returns
object for ``o``, or calls the base implementation (to raise a a serializable object for ``o``, or calls the base implementation
``TypeError``). (to raise a ``TypeError``).
For example, to support arbitrary iterators, you could implement For example, to support arbitrary iterators, you could
default like this:: implement default like this::
def default(self, o): def default(self, o):
try: try:
@ -364,12 +198,14 @@ class JSONEncoder(object):
# This doesn't pass the iterator directly to ''.join() because the # This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly # exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do. # equivalent to the PySequence_Fast that ''.join() would do.
chunks = list(self.iterencode(o)) chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks) return ''.join(chunks)
def iterencode(self, o): def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string representation as """Encode the given object and yield each string
available. representation as available.
For example:: For example::
@ -381,4 +217,229 @@ class JSONEncoder(object):
markers = {} markers = {}
else: else:
markers = None markers = None
return self._iterencode(o, markers) if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if (_one_shot and c_make_encoder is not None
and not self.indent and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode

View File

@ -1,69 +1,66 @@
"""Iterator based sre token scanner """JSON token scanner
""" """
import re import re
import sre_parse try:
import sre_compile from _json import make_scanner as c_make_scanner
import sre_constants except ImportError:
c_make_scanner = None
from re import VERBOSE, MULTILINE, DOTALL __all__ = ['make_scanner']
from sre_constants import BRANCH, SUBPATTERN
__all__ = ['Scanner', 'pattern'] NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
FLAGS = (VERBOSE | MULTILINE | DOTALL) def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
class Scanner(object): def _scan_once(string, idx):
def __init__(self, lexicon, flags=FLAGS): try:
self.actions = [None] nextchar = string[idx]
# Combine phrases into a compound pattern except IndexError:
s = sre_parse.Pattern() raise StopIteration
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
s.groups = len(p) + 1 # NOTE(guido): Added to make SRE validation work if nextchar == '"':
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) return parse_string(string, idx + 1, encoding, strict)
self.scanner = sre_compile.compile(p) elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
def iterscan(self, string, idx=0, context=None): m = match_number(string, idx)
"""Yield match, end_idx for each match if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
""" return _scan_once
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
make_scanner = c_make_scanner or py_make_scanner
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator

View File

@ -0,0 +1,30 @@
from unittest import TestCase
import json
def default_iterable(obj):
return list(obj)
class TestCheckCircular(TestCase):
def test_circular_dict(self):
dct = {}
dct['a'] = dct
self.assertRaises(ValueError, json.dumps, dct)
def test_circular_list(self):
lst = []
lst.append(lst)
self.assertRaises(ValueError, json.dumps, lst)
def test_circular_composite(self):
dct2 = {}
dct2['a'] = []
dct2['a'].append(dct2)
self.assertRaises(ValueError, json.dumps, dct2)
def test_circular_default(self):
json.dumps([set()], default=default_iterable)
self.assertRaises(TypeError, json.dumps, [set()])
def test_circular_off_default(self):
json.dumps([set()], default=default_iterable, check_circular=False)
self.assertRaises(TypeError, json.dumps, [set()], check_circular=False)

View File

@ -13,3 +13,10 @@ class TestDecode(TestCase):
rval = json.loads('1', parse_int=float) rval = json.loads('1', parse_int=float)
self.assert_(isinstance(rval, float)) self.assert_(isinstance(rval, float))
self.assertEquals(rval, 1.0) self.assertEquals(rval, 1.0)
def test_decoder_optimizations(self):
# Several optimizations were made that skip over calls to
# the whitespace regex, so this test is designed to try and
# exercise the uncommon cases. The array cases are already covered.
rval = json.loads('{ "key" : "value" , "k":"v" }')
self.assertEquals(rval, {"key":"value", "k":"v"})

View File

@ -11,3 +11,11 @@ class TestDump(TestCase):
def test_dumps(self): def test_dumps(self):
self.assertEquals(json.dumps({}), '{}') self.assertEquals(json.dumps({}), '{}')
def test_encode_truefalse(self):
self.assertEquals(json.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEquals(json.dumps(
{2: 3.0, 4.0: 5L, False: 1, 6L: True, "7": 0}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true, "7": 0}')

View File

@ -26,10 +26,14 @@ class TestEncodeBaseStringAscii(TestCase):
self._test_encode_basestring_ascii(json.encoder.py_encode_basestring_ascii) self._test_encode_basestring_ascii(json.encoder.py_encode_basestring_ascii)
def test_c_encode_basestring_ascii(self): def test_c_encode_basestring_ascii(self):
if not json.encoder.c_encode_basestring_ascii:
return
self._test_encode_basestring_ascii(json.encoder.c_encode_basestring_ascii) self._test_encode_basestring_ascii(json.encoder.c_encode_basestring_ascii)
def _test_encode_basestring_ascii(self, encode_basestring_ascii): def _test_encode_basestring_ascii(self, encode_basestring_ascii):
fname = encode_basestring_ascii.__name__ fname = encode_basestring_ascii.__name__
for input_string, expect in CASES: for input_string, expect in CASES:
result = encode_basestring_ascii(input_string) result = encode_basestring_ascii(input_string)
self.assertEquals(result, expect) self.assertEquals(result, expect,
'{0!r} != {1!r} for {2}({3!r})'.format(
result, expect, fname, input_string))

View File

@ -73,4 +73,4 @@ class TestFail(TestCase):
except ValueError: except ValueError:
pass pass
else: else:
self.fail("Expected failure for fail%d.json: %r" % (idx, doc)) self.fail("Expected failure for fail{0}.json: {1!r}".format(idx, doc))

View File

@ -5,5 +5,11 @@ import json
class TestFloat(TestCase): class TestFloat(TestCase):
def test_floats(self): def test_floats(self):
for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100]: for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100, 3.1]:
self.assertEquals(float(json.dumps(num)), num) self.assertEquals(float(json.dumps(num)), num)
self.assertEquals(json.loads(json.dumps(num)), num)
def test_ints(self):
for num in [1, 1L, 1<<32, 1<<64]:
self.assertEquals(json.dumps(num), str(num))
self.assertEquals(int(json.dumps(num)), num)

View File

@ -51,5 +51,14 @@ class TestUnicode(TestCase):
def test_unicode_decode(self): def test_unicode_decode(self):
for i in range(0, 0xd7ff): for i in range(0, 0xd7ff):
u = unichr(i) u = unichr(i)
js = '"\\u{0:04x}"'.format(i) s = '"\\u{0:04x}"'.format(i)
self.assertEquals(json.loads(js), u) self.assertEquals(json.loads(s), u)
def test_default_encoding(self):
self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEquals(type(json.loads(u'""')), unicode)
self.assertEquals(type(json.loads(u'"a"')), unicode)
self.assertEquals(type(json.loads(u'["a"]')[0]), unicode)

View File

@ -2,11 +2,11 @@ r"""Command-line tool to validate and pretty-print JSON
Usage:: Usage::
$ echo '{"json":"obj"}' | python -mjson.tool $ echo '{"json":"obj"}' | python -m json.tool
{ {
"json": "obj" "json": "obj"
} }
$ echo '{ 1.2:3.4}' | python -mjson.tool $ echo '{ 1.2:3.4}' | python -m json.tool
Expecting property name: line 1 column 2 (char 2) Expecting property name: line 1 column 2 (char 2)
""" """
@ -24,7 +24,7 @@ def main():
infile = open(sys.argv[1], 'rb') infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb') outfile = open(sys.argv[2], 'wb')
else: else:
raise SystemExit("{0} [infile [outfile]]".format(sys.argv[0])) raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try: try:
obj = json.load(infile) obj = json.load(infile)
except ValueError, e: except ValueError, e:

File diff suppressed because it is too large Load Diff