Merged revisions 62734,62736,62748,62769 via svnmerge from

svn+ssh://pythondev@svn.python.org/python/trunk

........
  r62734 | brett.cannon | 2008-05-05 22:21:38 +0200 (Mon, 05 May 2008) | 5 lines

  Add the 'json' package. Code taken from simplejson 1.9 and contributed by Bob
  Ippolito.

  Closes issue #2750.
........
  r62736 | georg.brandl | 2008-05-05 22:53:39 +0200 (Mon, 05 May 2008) | 2 lines

  Fix JSON module docs.
........
  r62748 | benjamin.peterson | 2008-05-06 04:51:10 +0200 (Tue, 06 May 2008) | 2 lines

  PEP 8 nits in json package
........
  r62769 | christian.heimes | 2008-05-06 18:18:41 +0200 (Tue, 06 May 2008) | 2 lines

  Intern static string
  Use float constructors instead of magic code for float constants
........
This commit is contained in:
Christian Heimes 2008-05-08 14:29:10 +00:00
parent c848655eb0
commit 90540004d3
33 changed files with 2854 additions and 1 deletions

397
Doc/library/json.rst Normal file
View File

@ -0,0 +1,397 @@
:mod:`json` --- JSON encoder and decoder
========================================
.. module:: json
:synopsis: Encode and decode the JSON format.
.. moduleauthor:: Bob Ippolito <bob@redivi.com>
.. sectionauthor:: Bob Ippolito <bob@redivi.com>
.. versionadded:: 2.6
JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript
syntax (ECMA-262 3rd edition) used as a lightweight data interchange format.
:mod:`json` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules.
Encoding basic Python object hierarchies::
>>> import json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import json
>>> print json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import json
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal)
Decimal('1.1')
Extending :class:`JSONEncoder`::
>>> import json
>>> class ComplexEncoder(json.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return json.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
.. highlight:: none
Using json.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -mjson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -mjson.tool
Expecting property name: line 1 column 2 (char 2)
.. highlight:: python
.. note::
The JSON produced by this module's default settings is a subset of
YAML, so it may be used as a serializer for that as well.
Basic Usage
-----------
.. function:: dump(obj, fp[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, encoding[, default[, **kw]]]]]]]]]])
Serialize *obj* as a JSON formatted stream to *fp* (a ``.write()``-supporting
file-like object).
If *skipkeys* is ``True`` (default: ``False``), then dict keys that are not
of a basic type (:class:`str`, :class:`unicode`, :class:`int`, :class:`long`,
:class:`float`, :class:`bool`, ``None``) will be skipped instead of raising a
:exc:`TypeError`.
If *ensure_ascii* is ``False`` (default: ``True``), then some chunks written
to *fp* may be :class:`unicode` instances, subject to normal Python
:class:`str` to :class:`unicode` coercion rules. Unless ``fp.write()``
explicitly understands :class:`unicode` (as in :func:`codecs.getwriter`) this
is likely to cause an error.
If *check_circular* is ``False`` (default: ``True``), then the circular
reference check for container types will be skipped and a circular reference
will result in an :exc:`OverflowError` (or worse).
If *allow_nan* is ``False`` (default: ``True``), then it will be a
:exc:`ValueError` to serialize out of range :class:`float` values (``nan``,
``inf``, ``-inf``) in strict compliance of the JSON specification, instead of
using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level of 0
will only insert newlines. ``None`` (the default) selects the most compact
representation.
If *separators* is an ``(item_separator, dict_separator)`` tuple, then it
will be used instead of the default ``(', ', ': ')`` separators. ``(',',
':')`` is the most compact JSON representation.
*encoding* is the character encoding for str instances, default is UTF-8.
*default(obj)* is a function that should return a serializable version of
*obj* or raise :exc:`TypeError`. The default simply raises :exc:`TypeError`.
To use a custom :class:`JSONEncoder`` subclass (e.g. one that overrides the
:meth:`default` method to serialize additional types), specify it with the
*cls* kwarg.
.. function:: dumps(obj[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, encoding[, default[, **kw]]]]]]]]]])
Serialize *obj* to a JSON formatted :class:`str`.
If *ensure_ascii* is ``False``, then the return value will be a
:class:`unicode` instance. The other arguments have the same meaning as in
:func:`dump`.
.. function load(fp[, encoding[, cls[, object_hook[, parse_float[, parse_int[, parse_constant[, **kw]]]]]]])
Deserialize *fp* (a ``.read()``-supporting file-like object containing a JSON
document) to a Python object.
If the contents of *fp* are encoded with an ASCII based encoding other than
UTF-8 (e.g. latin-1), then an appropriate *encoding* name must be specified.
Encodings that are not ASCII based (such as UCS-2) are not allowed, and
should be wrapped with ``codecs.getreader(fp)(encoding)``, or simply decoded
to a :class:`unicode` object and passed to :func:`loads`.
*object_hook* is an optional function that will be called with the result of
any object literal decode (a :class:`dict`). The return value of
*object_hook* will be used instead of the :class:`dict`. This feature can be used
to implement custom decoders (e.g. JSON-RPC class hinting).
*parse_float*, if specified, will be called with the string of every JSON
float to be decoded. By default, this is equivalent to ``float(num_str)``.
This can be used to use another datatype or parser for JSON floats
(e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every JSON int
to be decoded. By default, this is equivalent to ``int(num_str)``. This can
be used to use another datatype or parser for JSON integers
(e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the following
strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``, ``'null'``, ``'true'``,
``'false'``. This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom :class:`JSONDecoder` subclass, specify it with the ``cls``
kwarg. Additional keyword arguments will be passed to the constructor of the
class.
.. function loads(s[, encoding[, cls[, object_hook[, parse_float[, parse_int[, parse_constant[, **kw]]]]]]])
Deserialize *s* (a :class:`str` or :class:`unicode` instance containing a JSON
document) to a Python object.
If *s* is a :class:`str` instance and is encoded with an ASCII based encoding
other than UTF-8 (e.g. latin-1), then an appropriate *encoding* name must be
specified. Encodings that are not ASCII based (such as UCS-2) are not
allowed and should be decoded to :class:`unicode` first.
The other arguments have the same meaning as in :func:`dump`.
Encoders and decoders
---------------------
.. class:: JSONDecoder([encoding[, object_hook[, parse_float[, parse_int[, parse_constant[, strict]]]]]])
Simple JSON decoder.
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as their
corresponding ``float`` values, which is outside the JSON spec.
*encoding* determines the encoding used to interpret any :class:`str` objects
decoded by this instance (UTF-8 by default). It has no effect when decoding
:class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work, strings
of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every JSON
object decoded and its return value will be used in place of the given
:class:`dict`. This can be used to provide custom deserializations (e.g. to
support JSON-RPC class hinting).
*parse_float*, if specified, will be called with the string of every JSON
float to be decoded. By default, this is equivalent to ``float(num_str)``.
This can be used to use another datatype or parser for JSON floats
(e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every JSON int
to be decoded. By default, this is equivalent to ``int(num_str)``. This can
be used to use another datatype or parser for JSON integers
(e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the following
strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``, ``'null'``, ``'true'``,
``'false'``. This can be used to raise an exception if invalid JSON numbers
are encountered.
.. method:: decode(s)
Return the Python representation of *s* (a :class:`str` or
:class:`unicode` instance containing a JSON document)
.. method:: raw_decode(s)
Decode a JSON document from *s* (a :class:`str` or :class:`unicode`
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in *s* where the document ended.
This can be used to decode a JSON document from a string that may have
extraneous data at the end.
.. class:: JSONEncoder([skipkeys[, ensure_ascii[, check_circular[, allow_nan[, sort_keys[, indent[, separators[, encoding[, default]]]]]]]]])
Extensible JSON encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
:meth:`default` method with another method that returns a serializable object
for ``o`` if possible, otherwise it should call the superclass implementation
(to raise :exc:`TypeError`).
If *skipkeys* is ``False`` (the default), then it is a :exc:`TypeError` to
attempt encoding of keys that are not str, int, long, float or None. If
*skipkeys* is ``True``, such items are simply skipped.
If *ensure_ascii* is ``True`` (the default), the output is guaranteed to be
:class:`str` objects with all incoming unicode characters escaped. If
*ensure_ascii* is ``False``, the output will be a unicode object.
If *check_circular* is ``True`` (the default), then lists, dicts, and custom
encoded objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an :exc:`OverflowError`).
Otherwise, no such check takes place.
If *allow_nan* is ``True`` (the default), then ``NaN``, ``Infinity``, and
``-Infinity`` will be encoded as such. This behavior is not JSON
specification compliant, but is consistent with most JavaScript based
encoders and decoders. Otherwise, it will be a :exc:`ValueError` to encode
such floats.
If *sort_keys* is ``True`` (the default), then the output of dictionaries
will be sorted by key; this is useful for regression tests to ensure that
JSON serializations can be compared on a day-to-day basis.
If *indent* is a non-negative integer (it is ``None`` by default), then JSON
array elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is the most
compact representation.
If specified, *separators* should be an ``(item_separator, key_separator)``
tuple. The default is ``(', ', ': ')``. To get the most compact JSON
representation, you should specify ``(',', ':')`` to eliminate whitespace.
If specified, *default* is a function that gets called for objects that can't
otherwise be serialized. It should return a JSON encodable version of the
object or raise a :exc:`TypeError`.
If *encoding* is not ``None``, then all input strings will be transformed
into unicode using that encoding prior to JSON-encoding. The default is
UTF-8.
.. method:: default(o)
Implement this method in a subclass such that it returns a serializable
object for *o*, or calls the base implementation (to raise a
:exc:`TypeError`).
For example, to support arbitrary iterators, you could implement default
like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
.. method:: encode(o)
Return a JSON string representation of a Python data structure, *o*. For
example::
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
.. method:: iterencode(o)
Encode the given object, *o*, and yield each string representation as
available. For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)

View File

@ -12,6 +12,7 @@ on the Internet.
.. toctree::
email.rst
json.rst
mailcap.rst
mailbox.rst
mhlib.rst

View File

@ -1210,6 +1210,7 @@ Mullender that was in Python's :file:`Demo/classes/` directory for a
long time. This implementation was significantly updated by Jeffrey
Yasskin.
Other Language Changes
======================
@ -2146,6 +2147,31 @@ complete list of changes, or look through the CVS logs for all the details.
.. ======================================================================
.. whole new modules get described in subsections here
The :mod:`json` module
----------------------
The new :mod:`json` module supports the encoding and decoding of Python types in
JSON (Javascript Object Notation). JSON is a lightweight interchange format
often used in web applications. For more information about JSON, see
http://www.json.org.
:mod:`json` comes with support for decoding and encoding most builtin Python
types. The following example encodes and decodes a dictionary::
>>> import json
>>> data = {"spam" : "foo", "parrot" : 42}
>>> in_json = json.dumps(data) # Encode the data
>>> in_json
'{"parrot": 42, "spam": "foo"}'
>>> json.loads(in_json) # Decode into a Python object
{"spam" : "foo", "parrot" : 42}
It is also possible to write your own decoders and encoders to support more
types. Pretty-printing of the JSON strings is also supported.
:mod:`json` (originally called simplejson) was written by Bob Ippolito.
Improved SSL Support
--------------------------------------------------

318
Lib/json/__init__.py Normal file
View File

@ -0,0 +1,318 @@
r"""A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
json exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print(json.dumps("\"foo\bar"))
"\"foo\bar"
>>> print(json.dumps('\u1234'))
"\u1234"
>>> print(json.dumps('\\'))
"\\"
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
{"a": 0, "b": 0, "c": 0}
>>> from io import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing (using repr() because of extraneous whitespace in the output)::
>>> import json
>>> print(repr(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)))
'{\n "4": 5, \n "6": 7\n}'
Decoding JSON::
>>> import json
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
['foo', {'bar': ['baz', None, 1.0, 2]}]
>>> json.loads('"\\"foo\\bar"')
'"foo\x08ar'
>>> from io import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)
['streaming API']
Specializing JSON object decoding::
>>> import json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal)
Decimal('1.1')
Extending JSONEncoder::
>>> import json
>>> class ComplexEncoder(json.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return json.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Using json.tool from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -mjson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -mjson.tool
Expecting property name: line 1 column 2 (char 2)
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.9'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from .decoder import JSONDecoder
from .encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object
containing a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)

339
Lib/json/decoder.py Normal file
View File

@ -0,0 +1,339 @@
"""Implementation of JSONDecoder
"""
import re
import sys
from json.scanner import Scanner, pattern
try:
from _json import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
NaN, PosInf, NegInf = float('nan'), float('inf'), float('-inf')
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
fmt = '{0}: line {1} column {2} (char {3})'
return fmt.format(msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
'true': True,
'false': False,
'null': None,
}
def JSONConstant(match, context, c=_CONSTANTS):
s = match.group(0)
fn = getattr(context, 'parse_constant', None)
if fn is None:
rval = c[s]
else:
rval = fn(s)
return rval, None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
fn = getattr(context, 'parse_float', None) or float
res = fn(integer + (frac or '') + (exp or ''))
else:
fn = getattr(context, 'parse_int', None) or int
res = fn(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': '"', '\\': '\\', '/': '/',
'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, str):
content = str(content, encoding)
_append(content)
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
msg = "Invalid \\escape: {0!r}".format(esc)
raise ValueError(errmsg(msg, s, end))
end += 1
else:
esc = s[end + 1:end + 5]
next_end = end + 5
msg = "Invalid \\uXXXX escape"
try:
if len(esc) != 4:
raise ValueError
uni = int(esc, 16)
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
m = chr(uni)
except ValueError:
raise ValueError(errmsg(msg, s, end))
end = next_end
_append(m)
return ''.join(chunks), end
# Use speedup
if c_scanstring is not None:
scanstring = c_scanstring
else:
scanstring = py_scanstring
def JSONString(match, context):
encoding = getattr(context, 'encoding', None)
strict = getattr(context, 'strict', True)
return scanstring(match.string, match.end(), encoding, strict)
pattern(r'"')(JSONString)
WHITESPACE = re.compile(r'\s*', FLAGS)
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {}
s = match.string
end = _w(s, match.end()).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
encoding = getattr(context, 'encoding', None)
strict = getattr(context, 'strict', True)
iterscan = JSONScanner.iterscan
while True:
key, end = scanstring(s, end, encoding, strict)
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end = _w(s, end + 1).end()
try:
value, end = next(iterscan(s, idx=end, context=context))
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == '}':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
end = _w(s, match.end()).end()
# Look-ahead for trivial empty array
nextchar = s[end:end + 1]
if nextchar == ']':
return values, end + 1
iterscan = JSONScanner.iterscan
while True:
try:
value, end = next(iterscan(s, idx=end, context=context))
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
values.append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING)
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result of
every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float
self.parse_int = parse_int
self.parse_constant = parse_constant
self.strict = strict
def decode(self, s, _w=WHITESPACE.match):
"""
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, **kw):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = next(self._scanner.iterscan(s, **kw))
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end

384
Lib/json/encoder.py Normal file
View File

@ -0,0 +1,384 @@
"""Implementation of JSONEncoder
"""
import re
try:
from _json import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
__all__ = ['JSONEncoder']
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return FLOAT_REPR(o)
if not allow_nan:
msg = "Out of range float values are not JSON compliant: " + repr(o)
raise ValueError(msg)
return text
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
if isinstance(s, bytes): # and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u{0:04x}'.format(n)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '"' + (ESCAPE_ASCII.sub(replace, s)) + '"'
if c_encode_basestring_ascii is not None:
encode_basestring_ascii = c_encode_basestring_ascii
else:
encode_basestring_ascii = py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = list(dct.keys())
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = iter(dct.items())
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, str):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, int)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key {0!r} is not a string".format(key))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, str):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, int)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""Implement this method in a subclass such that it returns a serializable
object for ``o``, or calls the base implementation (to raise a
``TypeError``).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, (str, bytes)):
if isinstance(o, bytes):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""Encode the given object and yield each string representation as
available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)

69
Lib/json/scanner.py Normal file
View File

@ -0,0 +1,69 @@
"""Iterator based sre token scanner
"""
import re
import sre_parse
import sre_compile
import sre_constants
from re import VERBOSE, MULTILINE, DOTALL
from sre_constants import BRANCH, SUBPATTERN
__all__ = ['Scanner', 'pattern']
FLAGS = (VERBOSE | MULTILINE | DOTALL)
class Scanner(object):
def __init__(self, lexicon, flags=FLAGS):
self.actions = [None]
# Combine phrases into a compound pattern
s = sre_parse.Pattern()
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
s.groups = len(p) + 1 # NOTE(guido): Added to make SRE validation work
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def iterscan(self, string, idx=0, context=None):
"""Yield match, end_idx for each match
"""
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator

View File

@ -0,0 +1,35 @@
import os
import sys
import unittest
import doctest
here = os.path.dirname(__file__)
def test_suite():
suite = additional_tests()
loader = unittest.TestLoader()
for fn in os.listdir(here):
if fn.startswith("test") and fn.endswith(".py"):
modname = "json.tests." + fn[:-3]
__import__(modname)
module = sys.modules[modname]
suite.addTests(loader.loadTestsFromModule(module))
return suite
def additional_tests():
import json
import json.encoder
import json.decoder
suite = unittest.TestSuite()
for mod in (json, json.encoder, json.decoder):
suite.addTest(doctest.DocTestSuite(mod))
return suite
def main():
suite = test_suite()
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == '__main__':
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
main()

View File

@ -0,0 +1,15 @@
import decimal
from unittest import TestCase
import json
class TestDecode(TestCase):
def test_decimal(self):
rval = json.loads('1.1', parse_float=decimal.Decimal)
self.assert_(isinstance(rval, decimal.Decimal))
self.assertEquals(rval, decimal.Decimal('1.1'))
def test_float(self):
rval = json.loads('1', parse_int=float)
self.assert_(isinstance(rval, float))
self.assertEquals(rval, 1.0)

View File

@ -0,0 +1,9 @@
from unittest import TestCase
import json
class TestDefault(TestCase):
def test_default(self):
self.assertEquals(
json.dumps(type, default=repr),
json.dumps(repr(type)))

View File

@ -0,0 +1,13 @@
from unittest import TestCase
from io import StringIO
import json
class TestDump(TestCase):
def test_dump(self):
sio = StringIO()
json.dump({}, sio)
self.assertEquals(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEquals(json.dumps({}), '{}')

View File

@ -0,0 +1,37 @@
from unittest import TestCase
import json.encoder
CASES = [
('/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', b'"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', b'"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
('controls', b'"controls"'),
('\x08\x0c\n\r\t', b'"\\b\\f\\n\\r\\t"'),
('{"object with 1 member":["array with 1 element"]}', b'"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
(' s p a c e d ', b'" s p a c e d "'),
('\U0001d120', b'"\\ud834\\udd20"'),
('\u03b1\u03a9', b'"\\u03b1\\u03a9"'),
(b'\xce\xb1\xce\xa9', b'"\\u03b1\\u03a9"'),
('\u03b1\u03a9', b'"\\u03b1\\u03a9"'),
(b'\xce\xb1\xce\xa9', b'"\\u03b1\\u03a9"'),
('\u03b1\u03a9', b'"\\u03b1\\u03a9"'),
('\u03b1\u03a9', b'"\\u03b1\\u03a9"'),
("`1~!@#$%^&*()_+-={':[,]}|;.</>?", b'"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
('\x08\x0c\n\r\t', b'"\\b\\f\\n\\r\\t"'),
('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', b'"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBaseStringAscii(TestCase):
def test_py_encode_basestring_ascii(self):
self._test_encode_basestring_ascii(json.encoder.py_encode_basestring_ascii)
def test_c_encode_basestring_ascii(self):
if json.encoder.c_encode_basestring_ascii is not None:
self._test_encode_basestring_ascii(json.encoder.c_encode_basestring_ascii)
def _test_encode_basestring_ascii(self, encode_basestring_ascii):
fname = encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = encode_basestring_ascii(input_string)
result = result.encode("ascii")
self.assertEquals(result, expect)

View File

@ -0,0 +1,76 @@
from unittest import TestCase
import json
# Fri Dec 30 18:57:26 2005
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
'{unquoted_key: "keys must be quoted}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
'["double extra comma",,]',
# http://json.org/JSON_checker/test/fail6.json
'[ , "<-- missing value"]',
# http://json.org/JSON_checker/test/fail7.json
'["Comma after the close"],',
# http://json.org/JSON_checker/test/fail8.json
'["Extra close"]]',
# http://json.org/JSON_checker/test/fail9.json
'{"Extra comma": true,}',
# http://json.org/JSON_checker/test/fail10.json
'{"Extra value after close": true} "misplaced quoted value"',
# http://json.org/JSON_checker/test/fail11.json
'{"Illegal expression": 1 + 2}',
# http://json.org/JSON_checker/test/fail12.json
'{"Illegal invocation": alert()}',
# http://json.org/JSON_checker/test/fail13.json
'{"Numbers cannot have leading zeroes": 013}',
# http://json.org/JSON_checker/test/fail14.json
'{"Numbers cannot be hex": 0x14}',
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
'["Illegal backslash escape: \\\'"]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
# http://json.org/JSON_checker/test/fail19.json
'{"Missing colon" null}',
# http://json.org/JSON_checker/test/fail20.json
'{"Double colon":: null}',
# http://json.org/JSON_checker/test/fail21.json
'{"Comma instead of colon", null}',
# http://json.org/JSON_checker/test/fail22.json
'["Colon instead of comma": false]',
# http://json.org/JSON_checker/test/fail23.json
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
# http://code.google.com/p/simplejson/issues/detail?id=3
'["A\u001FZ control characters in string"]',
]
SKIPS = {
1: "why not have a string payload?",
18: "spec doesn't specify any nesting limitations",
}
class TestFail(TestCase):
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
json.loads(doc)
continue
try:
json.loads(doc)
except ValueError:
pass
else:
self.fail("Expected failure for fail%d.json: %r" % (idx, doc))

View File

@ -0,0 +1,9 @@
import math
from unittest import TestCase
import json
class TestFloat(TestCase):
def test_floats(self):
for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100]:
self.assertEquals(float(json.dumps(num)), num)

View File

@ -0,0 +1,41 @@
from unittest import TestCase
import json
import textwrap
class TestIndent(TestCase):
def test_indent(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
[
"blorpie"
],
[
"whoops"
],
[],
"d-shtaeou",
"d-nthiouh",
"i-vhbjkhnth",
{
"nifty": 87
},
{
"field": "yes",
"morefield": false
}
]""")
d1 = json.dumps(h)
d2 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
h1 = json.loads(d1)
h2 = json.loads(d2)
self.assertEquals(h1, h)
self.assertEquals(h2, h)
self.assertEquals(d2, expect)

View File

@ -0,0 +1,76 @@
from unittest import TestCase
import json
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E666,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],
"compact": [1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "&#34; \u0022 %22 0x22 034 &#x22;",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066
,"rosebud"]
'''
class TestPass1(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
try:
json.dumps(res, allow_nan=False)
except ValueError:
pass
else:
self.fail("23456789012E666 should be out of range")

View File

@ -0,0 +1,14 @@
from unittest import TestCase
import json
# from http://json.org/JSON_checker/test/pass2.json
JSON = r'''
[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]
'''
class TestPass2(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))

View File

@ -0,0 +1,20 @@
from unittest import TestCase
import json
# from http://json.org/JSON_checker/test/pass3.json
JSON = r'''
{
"JSON Test Pattern pass3": {
"The outermost value": "must be an object or array.",
"In this test": "It is an object."
}
}
'''
class TestPass3(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))

View File

@ -0,0 +1,67 @@
from unittest import TestCase
import json
class JSONTestObject:
pass
class RecursiveJSONEncoder(json.JSONEncoder):
recurse = False
def default(self, o):
if o is JSONTestObject:
if self.recurse:
return [JSONTestObject]
else:
return 'JSONTestObject'
return json.JSONEncoder.default(o)
class TestRecursion(TestCase):
def test_listrecursion(self):
x = []
x.append(x)
try:
json.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on list recursion")
x = []
y = [x]
x.append(y)
try:
json.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on alternating list recursion")
y = []
x = [y, y]
# ensure that the marker is cleared
json.dumps(x)
def test_dictrecursion(self):
x = {}
x["test"] = x
try:
json.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on dict recursion")
x = {}
y = {"a": x, "b": x}
# ensure that the marker is cleared
json.dumps(x)
def test_defaultrecursion(self):
enc = RecursiveJSONEncoder()
self.assertEquals(enc.encode(JSONTestObject), '"JSONTestObject"')
enc.recurse = True
try:
enc.encode(JSONTestObject)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on default recursion")

View File

@ -0,0 +1,103 @@
import sys
import decimal
from unittest import TestCase
import json.decoder
class TestScanString(TestCase):
def test_py_scanstring(self):
self._test_scanstring(json.decoder.py_scanstring)
def test_c_scanstring(self):
if json.decoder.c_scanstring is not None:
self._test_scanstring(json.decoder.c_scanstring)
def _test_scanstring(self, scanstring):
self.assertEquals(
scanstring('"z\\ud834\\udd20x"', 1, None, True),
('z\U0001d120x', 16))
if sys.maxunicode == 65535:
self.assertEquals(
scanstring('"z\U0001d120x"', 1, None, True),
('z\U0001d120x', 6))
else:
self.assertEquals(
scanstring('"z\U0001d120x"', 1, None, True),
('z\U0001d120x', 5))
self.assertEquals(
scanstring('"\\u007b"', 1, None, True),
('{', 8))
self.assertEquals(
scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
('A JSON payload should be an object or array, not a string.', 60))
self.assertEquals(
scanstring('["Unclosed array"', 2, None, True),
('Unclosed array', 17))
self.assertEquals(
scanstring('["extra comma",]', 2, None, True),
('extra comma', 14))
self.assertEquals(
scanstring('["double extra comma",,]', 2, None, True),
('double extra comma', 21))
self.assertEquals(
scanstring('["Comma after the close"],', 2, None, True),
('Comma after the close', 24))
self.assertEquals(
scanstring('["Extra close"]]', 2, None, True),
('Extra close', 14))
self.assertEquals(
scanstring('{"Extra comma": true,}', 2, None, True),
('Extra comma', 14))
self.assertEquals(
scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
('Extra value after close', 26))
self.assertEquals(
scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
('Illegal expression', 21))
self.assertEquals(
scanstring('{"Illegal invocation": alert()}', 2, None, True),
('Illegal invocation', 21))
self.assertEquals(
scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
('Numbers cannot have leading zeroes', 37))
self.assertEquals(
scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
('Numbers cannot be hex', 24))
self.assertEquals(
scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
('Too deep', 30))
self.assertEquals(
scanstring('{"Missing colon" null}', 2, None, True),
('Missing colon', 16))
self.assertEquals(
scanstring('{"Double colon":: null}', 2, None, True),
('Double colon', 15))
self.assertEquals(
scanstring('{"Comma instead of colon", null}', 2, None, True),
('Comma instead of colon', 25))
self.assertEquals(
scanstring('["Colon instead of comma": false]', 2, None, True),
('Colon instead of comma', 25))
self.assertEquals(
scanstring('["Bad value", truth]', 2, None, True),
('Bad value', 12))

View File

@ -0,0 +1,42 @@
import textwrap
from unittest import TestCase
import json
class TestSeparators(TestCase):
def test_separators(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
[
"blorpie"
] ,
[
"whoops"
] ,
[] ,
"d-shtaeou" ,
"d-nthiouh" ,
"i-vhbjkhnth" ,
{
"nifty" : 87
} ,
{
"field" : "yes" ,
"morefield" : false
}
]""")
d1 = json.dumps(h)
d2 = json.dumps(h, indent=2, sort_keys=True, separators=(' ,', ' : '))
h1 = json.loads(d1)
h2 = json.loads(d2)
self.assertEquals(h1, h)
self.assertEquals(h2, h)
self.assertEquals(d2, expect)

View File

@ -0,0 +1,15 @@
import decimal
from unittest import TestCase
from json import decoder
from json import encoder
class TestSpeedups(TestCase):
def test_scanstring(self):
self.assertEquals(decoder.scanstring.__module__, "_json")
self.assert_(decoder.scanstring is decoder.c_scanstring)
def test_encode_basestring_ascii(self):
self.assertEquals(encoder.encode_basestring_ascii.__module__, "_json")
self.assert_(encoder.encode_basestring_ascii is
encoder.c_encode_basestring_ascii)

View File

@ -0,0 +1,55 @@
from unittest import TestCase
import json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = '\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = '\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = '\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = '\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = '\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, '"{0}"'.format(u))
def test_encoding6(self):
u = '\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, '["{0}"]'.format(u))
def test_big_unicode_encode(self):
u = '\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), '"\U0001d120"')
def test_big_unicode_decode(self):
u = 'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = chr(i)
js = '"\\u{0:04x}"'.format(i)
self.assertEquals(json.loads(js), u)

37
Lib/json/tool.py Normal file
View File

@ -0,0 +1,37 @@
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -mjson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -mjson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit("{0} [infile [outfile]]".format(sys.argv[0]))
try:
obj = json.load(infile)
except ValueError as e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()

17
Lib/test/test_json.py Normal file
View File

@ -0,0 +1,17 @@
"""Tests for json.
The tests for json are defined in the json.tests package;
the test_suite() function there returns a test suite that's ready to
be run.
"""
import json.tests
import test.test_support
def test_main():
test.test_support.run_unittest(json.tests.test_suite())
if __name__ == "__main__":
test_main()

620
Modules/_json.c Normal file
View File

@ -0,0 +1,620 @@
#include "Python.h"
#define DEFAULT_ENCODING "utf-8"
#define S_CHAR(c) (c >= ' ' && c <= '~' && c != '\\' && c != '"')
#define MIN_EXPANSION 6
#ifdef Py_UNICODE_WIDE
#define MAX_EXPANSION (2 * MIN_EXPANSION)
#else
#define MAX_EXPANSION MIN_EXPANSION
#endif
static Py_ssize_t
ascii_escape_char(Py_UNICODE c, char *output, Py_ssize_t chars)
{
Py_UNICODE x;
output[chars++] = '\\';
switch (c) {
case '\\': output[chars++] = (char)c; break;
case '"': output[chars++] = (char)c; break;
case '\b': output[chars++] = 'b'; break;
case '\f': output[chars++] = 'f'; break;
case '\n': output[chars++] = 'n'; break;
case '\r': output[chars++] = 'r'; break;
case '\t': output[chars++] = 't'; break;
default:
#ifdef Py_UNICODE_WIDE
if (c >= 0x10000) {
/* UTF-16 surrogate pair */
Py_UNICODE v = c - 0x10000;
c = 0xd800 | ((v >> 10) & 0x3ff);
output[chars++] = 'u';
x = (c & 0xf000) >> 12;
output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
x = (c & 0x0f00) >> 8;
output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
x = (c & 0x00f0) >> 4;
output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
x = (c & 0x000f);
output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
c = 0xdc00 | (v & 0x3ff);
output[chars++] = '\\';
}
#endif
output[chars++] = 'u';
x = (c & 0xf000) >> 12;
output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
x = (c & 0x0f00) >> 8;
output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
x = (c & 0x00f0) >> 4;
output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
x = (c & 0x000f);
output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
}
return chars;
}
static PyObject *
ascii_escape_unicode(PyObject *pystr)
{
Py_ssize_t i;
Py_ssize_t input_chars;
Py_ssize_t output_size;
Py_ssize_t chars;
PyObject *rval;
char *output;
Py_UNICODE *input_unicode;
input_chars = PyUnicode_GET_SIZE(pystr);
input_unicode = PyUnicode_AS_UNICODE(pystr);
/* One char input can be up to 6 chars output, estimate 4 of these */
output_size = 2 + (MIN_EXPANSION * 4) + input_chars;
rval = PyString_FromStringAndSize(NULL, output_size);
if (rval == NULL) {
return NULL;
}
output = PyString_AS_STRING(rval);
chars = 0;
output[chars++] = '"';
for (i = 0; i < input_chars; i++) {
Py_UNICODE c = input_unicode[i];
if (S_CHAR(c)) {
output[chars++] = (char)c;
}
else {
chars = ascii_escape_char(c, output, chars);
}
if (output_size - chars < (1 + MAX_EXPANSION)) {
/* There's more than four, so let's resize by a lot */
output_size *= 2;
/* This is an upper bound */
if (output_size > 2 + (input_chars * MAX_EXPANSION)) {
output_size = 2 + (input_chars * MAX_EXPANSION);
}
if (_PyString_Resize(&rval, output_size) == -1) {
return NULL;
}
output = PyString_AS_STRING(rval);
}
}
output[chars++] = '"';
if (_PyString_Resize(&rval, chars) == -1) {
return NULL;
}
return rval;
}
static PyObject *
ascii_escape_str(PyObject *pystr)
{
Py_ssize_t i;
Py_ssize_t input_chars;
Py_ssize_t output_size;
Py_ssize_t chars;
PyObject *rval;
char *output;
char *input_str;
input_chars = PyString_GET_SIZE(pystr);
input_str = PyString_AS_STRING(pystr);
/* One char input can be up to 6 chars output, estimate 4 of these */
output_size = 2 + (MIN_EXPANSION * 4) + input_chars;
rval = PyString_FromStringAndSize(NULL, output_size);
if (rval == NULL) {
return NULL;
}
output = PyString_AS_STRING(rval);
chars = 0;
output[chars++] = '"';
for (i = 0; i < input_chars; i++) {
Py_UNICODE c = (Py_UNICODE)input_str[i];
if (S_CHAR(c)) {
output[chars++] = (char)c;
}
else if (c > 0x7F) {
/* We hit a non-ASCII character, bail to unicode mode */
PyObject *uni;
Py_DECREF(rval);
uni = PyUnicode_DecodeUTF8(input_str, input_chars, "strict");
if (uni == NULL) {
return NULL;
}
rval = ascii_escape_unicode(uni);
Py_DECREF(uni);
return rval;
}
else {
chars = ascii_escape_char(c, output, chars);
}
/* An ASCII char can't possibly expand to a surrogate! */
if (output_size - chars < (1 + MIN_EXPANSION)) {
/* There's more than four, so let's resize by a lot */
output_size *= 2;
if (output_size > 2 + (input_chars * MIN_EXPANSION)) {
output_size = 2 + (input_chars * MIN_EXPANSION);
}
if (_PyString_Resize(&rval, output_size) == -1) {
return NULL;
}
output = PyString_AS_STRING(rval);
}
}
output[chars++] = '"';
if (_PyString_Resize(&rval, chars) == -1) {
return NULL;
}
return rval;
}
void
raise_errmsg(char *msg, PyObject *s, Py_ssize_t end)
{
static PyObject *errmsg_fn = NULL;
PyObject *pymsg;
if (errmsg_fn == NULL) {
PyObject *decoder = PyImport_ImportModule("json.decoder");
if (decoder == NULL)
return;
errmsg_fn = PyObject_GetAttrString(decoder, "errmsg");
if (errmsg_fn == NULL)
return;
Py_XDECREF(decoder);
}
pymsg = PyObject_CallFunction(errmsg_fn, "(zOn)", msg, s, end);
PyErr_SetObject(PyExc_ValueError, pymsg);
Py_DECREF(pymsg);
/*
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
*/
}
static PyObject *
join_list_unicode(PyObject *lst)
{
static PyObject *ustr = NULL;
static PyObject *joinstr = NULL;
if (ustr == NULL) {
Py_UNICODE c = 0;
ustr = PyUnicode_FromUnicode(&c, 0);
}
if (joinstr == NULL) {
joinstr = PyUnicode_InternFromString("join");
}
if (joinstr == NULL || ustr == NULL) {
return NULL;
}
return PyObject_CallMethodObjArgs(ustr, joinstr, lst, NULL);
}
static PyObject *
scanstring_str(PyObject *pystr, Py_ssize_t end, char *encoding, int strict)
{
PyObject *rval;
Py_ssize_t len = PyString_GET_SIZE(pystr);
Py_ssize_t begin = end - 1;
Py_ssize_t next = begin;
char *buf = PyString_AS_STRING(pystr);
Py_buffer info;
PyObject *chunks = PyList_New(0);
if (chunks == NULL) {
goto bail;
}
while (1) {
/* Find the end of the string or the next escape */
Py_UNICODE c = 0;
PyObject *chunk = NULL;
for (next = end; next < len; next++) {
c = buf[next];
if (c == '"' || c == '\\') {
break;
}
else if (strict && c <= 0x1f) {
raise_errmsg("Invalid control character at", pystr, begin);
goto bail;
}
}
if (!(c == '"' || c == '\\')) {
raise_errmsg("Unterminated string starting at", pystr, begin);
goto bail;
}
/* Pick up this chunk if it's not zero length */
if (next != end) {
if (PyBuffer_FillInfo(&info, &buf[end], next - end, 1, 0) < 0) {
goto bail;
}
PyObject *strchunk = PyMemoryView_FromMemory(&info);
if (strchunk == NULL) {
goto bail;
}
chunk = PyUnicode_FromEncodedObject(strchunk, encoding, NULL);
Py_DECREF(strchunk);
if (chunk == NULL) {
goto bail;
}
if (PyList_Append(chunks, chunk)) {
goto bail;
}
Py_DECREF(chunk);
}
next++;
if (c == '"') {
end = next;
break;
}
if (next == len) {
raise_errmsg("Unterminated string starting at", pystr, begin);
goto bail;
}
c = buf[next];
if (c != 'u') {
/* Non-unicode backslash escapes */
end = next + 1;
switch (c) {
case '"': break;
case '\\': break;
case '/': break;
case 'b': c = '\b'; break;
case 'f': c = '\f'; break;
case 'n': c = '\n'; break;
case 'r': c = '\r'; break;
case 't': c = '\t'; break;
default: c = 0;
}
if (c == 0) {
raise_errmsg("Invalid \\escape", pystr, end - 2);
goto bail;
}
}
else {
c = 0;
next++;
end = next + 4;
if (end >= len) {
raise_errmsg("Invalid \\uXXXX escape", pystr, next - 1);
goto bail;
}
/* Decode 4 hex digits */
for (; next < end; next++) {
Py_ssize_t shl = (end - next - 1) << 2;
Py_UNICODE digit = buf[next];
switch (digit) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
c |= (digit - '0') << shl; break;
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
c |= (digit - 'a' + 10) << shl; break;
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
c |= (digit - 'A' + 10) << shl; break;
default:
raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
goto bail;
}
}
#ifdef Py_UNICODE_WIDE
/* Surrogate pair */
if (c >= 0xd800 && c <= 0xdbff) {
Py_UNICODE c2 = 0;
if (end + 6 >= len) {
raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr,
end - 5);
}
if (buf[next++] != '\\' || buf[next++] != 'u') {
raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr,
end - 5);
}
end += 6;
/* Decode 4 hex digits */
for (; next < end; next++) {
Py_ssize_t shl = (end - next - 1) << 2;
Py_UNICODE digit = buf[next];
switch (digit) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
c2 |= (digit - '0') << shl; break;
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
c2 |= (digit - 'a' + 10) << shl; break;
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
c2 |= (digit - 'A' + 10) << shl; break;
default:
raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
goto bail;
}
}
c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
}
#endif
}
chunk = PyUnicode_FromUnicode(&c, 1);
if (chunk == NULL) {
goto bail;
}
if (PyList_Append(chunks, chunk)) {
goto bail;
}
Py_DECREF(chunk);
}
rval = join_list_unicode(chunks);
if (rval == NULL) {
goto bail;
}
Py_DECREF(chunks);
chunks = NULL;
return Py_BuildValue("(Nn)", rval, end);
bail:
Py_XDECREF(chunks);
return NULL;
}
static PyObject *
scanstring_unicode(PyObject *pystr, Py_ssize_t end, int strict)
{
PyObject *rval;
Py_ssize_t len = PyUnicode_GET_SIZE(pystr);
Py_ssize_t begin = end - 1;
Py_ssize_t next = begin;
const Py_UNICODE *buf = PyUnicode_AS_UNICODE(pystr);
PyObject *chunks = PyList_New(0);
if (chunks == NULL) {
goto bail;
}
while (1) {
/* Find the end of the string or the next escape */
Py_UNICODE c = 0;
PyObject *chunk = NULL;
for (next = end; next < len; next++) {
c = buf[next];
if (c == '"' || c == '\\') {
break;
}
else if (strict && c <= 0x1f) {
raise_errmsg("Invalid control character at", pystr, begin);
goto bail;
}
}
if (!(c == '"' || c == '\\')) {
raise_errmsg("Unterminated string starting at", pystr, begin);
goto bail;
}
/* Pick up this chunk if it's not zero length */
if (next != end) {
chunk = PyUnicode_FromUnicode(&buf[end], next - end);
if (chunk == NULL) {
goto bail;
}
if (PyList_Append(chunks, chunk)) {
goto bail;
}
Py_DECREF(chunk);
}
next++;
if (c == '"') {
end = next;
break;
}
if (next == len) {
raise_errmsg("Unterminated string starting at", pystr, begin);
goto bail;
}
c = buf[next];
if (c != 'u') {
/* Non-unicode backslash escapes */
end = next + 1;
switch (c) {
case '"': break;
case '\\': break;
case '/': break;
case 'b': c = '\b'; break;
case 'f': c = '\f'; break;
case 'n': c = '\n'; break;
case 'r': c = '\r'; break;
case 't': c = '\t'; break;
default: c = 0;
}
if (c == 0) {
raise_errmsg("Invalid \\escape", pystr, end - 2);
goto bail;
}
}
else {
c = 0;
next++;
end = next + 4;
if (end >= len) {
raise_errmsg("Invalid \\uXXXX escape", pystr, next - 1);
goto bail;
}
/* Decode 4 hex digits */
for (; next < end; next++) {
Py_ssize_t shl = (end - next - 1) << 2;
Py_UNICODE digit = buf[next];
switch (digit) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
c |= (digit - '0') << shl; break;
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
c |= (digit - 'a' + 10) << shl; break;
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
c |= (digit - 'A' + 10) << shl; break;
default:
raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
goto bail;
}
}
#ifdef Py_UNICODE_WIDE
/* Surrogate pair */
if (c >= 0xd800 && c <= 0xdbff) {
Py_UNICODE c2 = 0;
if (end + 6 >= len) {
raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr,
end - 5);
}
if (buf[next++] != '\\' || buf[next++] != 'u') {
raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr,
end - 5);
}
end += 6;
/* Decode 4 hex digits */
for (; next < end; next++) {
Py_ssize_t shl = (end - next - 1) << 2;
Py_UNICODE digit = buf[next];
switch (digit) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
c2 |= (digit - '0') << shl; break;
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
c2 |= (digit - 'a' + 10) << shl; break;
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
c2 |= (digit - 'A' + 10) << shl; break;
default:
raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
goto bail;
}
}
c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
}
#endif
}
chunk = PyUnicode_FromUnicode(&c, 1);
if (chunk == NULL) {
goto bail;
}
if (PyList_Append(chunks, chunk)) {
goto bail;
}
Py_DECREF(chunk);
}
rval = join_list_unicode(chunks);
if (rval == NULL) {
goto bail;
}
Py_DECREF(chunks);
chunks = NULL;
return Py_BuildValue("(Nn)", rval, end);
bail:
Py_XDECREF(chunks);
return NULL;
}
PyDoc_STRVAR(pydoc_scanstring,
"scanstring(str_or_bytes, end, encoding) -> (bytes, end)\n");
static PyObject *
py_scanstring(PyObject* self, PyObject *args)
{
PyObject *pystr;
Py_ssize_t end;
char *encoding = NULL;
int strict = 0;
if (!PyArg_ParseTuple(args, "On|zi:scanstring", &pystr, &end, &encoding, &strict)) {
return NULL;
}
if (encoding == NULL) {
encoding = DEFAULT_ENCODING;
}
if (PyString_Check(pystr)) {
return scanstring_str(pystr, end, encoding, strict);
}
else if (PyUnicode_Check(pystr)) {
return scanstring_unicode(pystr, end, strict);
}
else {
PyErr_Format(PyExc_TypeError,
"first argument must be a string or bytes, not %.80s",
Py_TYPE(pystr)->tp_name);
return NULL;
}
}
PyDoc_STRVAR(pydoc_encode_basestring_ascii,
"encode_basestring_ascii(str_or_bytes) -> bytes\n");
static PyObject *
py_encode_basestring_ascii(PyObject* self, PyObject *pystr)
{
PyObject *rval;
/* METH_O */
if (PyString_Check(pystr)) {
rval = ascii_escape_str(pystr);
}
else if (PyUnicode_Check(pystr)) {
rval = ascii_escape_unicode(pystr);
}
else {
PyErr_Format(PyExc_TypeError,
"first argument must be a string or unicode, not %.80s",
Py_TYPE(pystr)->tp_name);
return NULL;
}
if (PyString_Check(rval)) {
PyObject *urval = PyUnicode_DecodeASCII(PyString_AS_STRING(rval), PyString_GET_SIZE(rval), NULL);
Py_DECREF(rval);
return urval;
}
return rval;
}
static PyMethodDef json_methods[] = {
{"encode_basestring_ascii", (PyCFunction)py_encode_basestring_ascii,
METH_O, pydoc_encode_basestring_ascii},
{"scanstring", (PyCFunction)py_scanstring, METH_VARARGS,
pydoc_scanstring},
{NULL, NULL, 0, NULL}
};
PyDoc_STRVAR(module_doc,
"json speedups\n");
void
init_json(void)
{
PyObject *m;
m = Py_InitModule3("_json", json_methods, module_doc);
}

View File

@ -145,6 +145,10 @@ SOURCE=..\..\Modules\_heapqmodule.c
# End Source File
# Begin Source File
SOURCE=..\..\Modules\_json.c
# End Source File
# Begin Source File
SOURCE=..\..\Modules\_localemodule.c
# End Source File
# Begin Source File

View File

@ -376,6 +376,9 @@
<File
RelativePath="..\..\Modules\_heapqmodule.c">
</File>
<File
RelativePath="..\..\Modules\json.c">
</File>
<File
RelativePath="..\..\Modules\_localemodule.c">
</File>

View File

@ -1002,6 +1002,10 @@
RelativePath="..\..\Modules\_heapqmodule.c"
>
</File>
<File
RelativePath="..\..\Modules\_json.c"
>
</File>
<File
RelativePath="..\..\Modules\_localemodule.c"
>

View File

@ -46,6 +46,7 @@ extern void init_winreg(void);
extern void init_struct(void);
extern void initdatetime(void);
extern void init_functools(void);
extern void init_json(void);
extern void initzlib(void);
extern void init_multibytecodec(void);
@ -119,6 +120,7 @@ struct _inittab _PyImport_Inittab[] = {
{"_struct", init_struct},
{"datetime", initdatetime},
{"_functools", init_functools},
{"_json", init_json},
{"xxsubtype", initxxsubtype},
{"zipimport", initzipimport},

View File

@ -29,7 +29,6 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "make_buildinfo", "make_buil
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{553EC33E-9816-4996-A660-5D6186A0B0B3}"
ProjectSection(SolutionItems) = preProject
..\Modules\getbuildinfo.c = ..\Modules\getbuildinfo.c
readme.txt = readme.txt
EndProjectSection
EndProject

View File

@ -1006,6 +1006,10 @@
RelativePath="..\Modules\_heapqmodule.c"
>
</File>
<File
RelativePath="..\Modules\_json.c"
>
</File>
<File
RelativePath="..\Modules\_localemodule.c"
>

View File

@ -430,6 +430,8 @@ class PyBuildExt(build_ext):
exts.append( Extension("_bytesio", ["_bytesio.c"]) )
# atexit
exts.append( Extension("atexit", ["atexitmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c']) )
# profiler (_lsprof is for cProfile.py)