Merged revisions 70980,71059,71225,71234,71241,71243,71249,71251,71255,71266,71299,71329,71397-71398,71486 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70980 | jack.diederich | 2009-04-01 15:26:13 -0500 (Wed, 01 Apr 2009) | 3 lines
bounds check arguments to mmap.move(). All of them. Really.
fixes crasher on OS X 10.5
........
r71059 | mark.dickinson | 2009-04-02 13:39:37 -0500 (Thu, 02 Apr 2009) | 2 lines
sys.long_info attributes should be ints, not longs
........
r71225 | georg.brandl | 2009-04-05 06:54:07 -0500 (Sun, 05 Apr 2009) | 1 line
#5580: no need to use parentheses when converterr() argument is actually a type description.
........
r71234 | georg.brandl | 2009-04-05 08:16:35 -0500 (Sun, 05 Apr 2009) | 1 line
Whitespace normalization.
........
r71241 | georg.brandl | 2009-04-05 09:48:49 -0500 (Sun, 05 Apr 2009) | 1 line
#5471: fix expanduser() for $HOME set to "/".
........
r71243 | georg.brandl | 2009-04-05 10:14:29 -0500 (Sun, 05 Apr 2009) | 1 line
#5432: make plistlib docstring a raw string, since it contains examples with backslash escapes.
........
r71249 | georg.brandl | 2009-04-05 11:30:43 -0500 (Sun, 05 Apr 2009) | 1 line
#5444: adapt make.bat to new htmlhelp output file name.
........
r71251 | georg.brandl | 2009-04-05 12:17:42 -0500 (Sun, 05 Apr 2009) | 1 line
#5298: clarify docs about GIL by using more consistent wording.
........
r71255 | georg.brandl | 2009-04-05 13:34:58 -0500 (Sun, 05 Apr 2009) | 1 line
#602893: add indicator for current line in cgitb that doesnt rely on styling alone.
........
r71266 | georg.brandl | 2009-04-05 15:23:13 -0500 (Sun, 05 Apr 2009) | 1 line
Normalize issue referencing style.
........
r71299 | gregory.p.smith | 2009-04-05 18:43:58 -0500 (Sun, 05 Apr 2009) | 3 lines
Fixes issue5705: os.setuid() and friends did not accept the same range of
values that pwd.getpwnam() returns.
........
r71329 | benjamin.peterson | 2009-04-06 16:53:33 -0500 (Mon, 06 Apr 2009) | 1 line
add create_connection to __all__ #5711
........
r71397 | georg.brandl | 2009-04-08 11:36:39 -0500 (Wed, 08 Apr 2009) | 1 line
Remove redundant backtick.
........
r71398 | georg.brandl | 2009-04-08 11:39:04 -0500 (Wed, 08 Apr 2009) | 1 line
Update ignore file for suspicious builder.
........
r71486 | andrew.kuchling | 2009-04-11 11:18:14 -0500 (Sat, 11 Apr 2009) | 1 line
Re-word
........
2009-04-11 16:48:14 -03:00
|
|
|
r"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
|
2008-01-27 11:20:13 -04:00
|
|
|
|
2009-09-15 21:49:03 -03:00
|
|
|
The property list (.plist) file format is a simple XML pickle supporting
|
2008-01-27 11:20:13 -04:00
|
|
|
basic object types, like dictionaries, lists, numbers and strings.
|
|
|
|
Usually the top level object is a dictionary.
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
To write out a plist file, use the dump(value, file)
|
|
|
|
function. 'value' is the top level object, 'file' is
|
|
|
|
a (writable) file object.
|
2008-01-27 11:20:13 -04:00
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
To parse a plist from a file, use the load(file) function,
|
|
|
|
with a (readable) file object as the only argument. It
|
2008-01-27 11:20:13 -04:00
|
|
|
returns the top level object (again, usually a dictionary).
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
To work with plist data in bytes objects, you can use loads()
|
|
|
|
and dumps().
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
Values can be strings, integers, floats, booleans, tuples, lists,
|
2013-11-21 10:46:49 -04:00
|
|
|
dictionaries (but only with string keys), Data, bytes, bytearray, or
|
|
|
|
datetime.datetime objects.
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
Generate Plist example:
|
|
|
|
|
|
|
|
pl = dict(
|
2009-09-15 21:49:03 -03:00
|
|
|
aString = "Doodah",
|
|
|
|
aList = ["A", "B", 12, 32.1, [1, 2, 3]],
|
2008-01-27 11:20:13 -04:00
|
|
|
aFloat = 0.1,
|
|
|
|
anInt = 728,
|
2009-09-15 21:49:03 -03:00
|
|
|
aDict = dict(
|
|
|
|
anotherString = "<hello & hi there!>",
|
|
|
|
aUnicodeValue = "M\xe4ssig, Ma\xdf",
|
|
|
|
aTrueValue = True,
|
|
|
|
aFalseValue = False,
|
2008-01-27 11:20:13 -04:00
|
|
|
),
|
2013-11-21 10:46:49 -04:00
|
|
|
someData = b"<binary gunk>",
|
|
|
|
someMoreData = b"<lots of binary gunk>" * 10,
|
2008-01-27 11:20:13 -04:00
|
|
|
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
|
|
|
|
)
|
2013-11-21 10:46:49 -04:00
|
|
|
with open(fileName, 'wb') as fp:
|
|
|
|
dump(pl, fp)
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
Parse Plist example:
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
with open(fileName, 'rb') as fp:
|
|
|
|
pl = load(fp)
|
|
|
|
print(pl["aKey"])
|
2008-01-27 11:20:13 -04:00
|
|
|
"""
|
|
|
|
__all__ = [
|
|
|
|
"readPlist", "writePlist", "readPlistFromBytes", "writePlistToBytes",
|
2016-06-05 23:00:50 -03:00
|
|
|
"Plist", "Data", "Dict", "InvalidFileException", "FMT_XML", "FMT_BINARY",
|
2013-11-21 10:46:49 -04:00
|
|
|
"load", "dump", "loads", "dumps"
|
2008-01-27 11:20:13 -04:00
|
|
|
]
|
|
|
|
|
|
|
|
import binascii
|
2013-11-21 10:46:49 -04:00
|
|
|
import codecs
|
|
|
|
import contextlib
|
2008-01-27 11:20:13 -04:00
|
|
|
import datetime
|
2013-11-21 10:46:49 -04:00
|
|
|
import enum
|
2008-01-27 11:20:13 -04:00
|
|
|
from io import BytesIO
|
2013-11-21 10:46:49 -04:00
|
|
|
import itertools
|
|
|
|
import os
|
2008-01-27 11:20:13 -04:00
|
|
|
import re
|
2013-11-21 10:46:49 -04:00
|
|
|
import struct
|
|
|
|
from warnings import warn
|
|
|
|
from xml.parsers.expat import ParserCreate
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
PlistFormat = enum.Enum('PlistFormat', 'FMT_XML FMT_BINARY', module=__name__)
|
|
|
|
globals().update(PlistFormat.__members__)
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
#
|
|
|
|
#
|
|
|
|
# Deprecated functionality
|
|
|
|
#
|
|
|
|
#
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
|
|
|
|
class _InternalDict(dict):
|
|
|
|
|
|
|
|
# This class is needed while Dict is scheduled for deprecation:
|
|
|
|
# we only need to warn when a *user* instantiates Dict or when
|
|
|
|
# the "attribute notation for dict keys" is used.
|
2013-11-21 10:46:49 -04:00
|
|
|
__slots__ = ()
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
def __getattr__(self, attr):
|
|
|
|
try:
|
|
|
|
value = self[attr]
|
|
|
|
except KeyError:
|
|
|
|
raise AttributeError(attr)
|
|
|
|
warn("Attribute access from plist dicts is deprecated, use d[key] "
|
2011-07-04 09:28:45 -03:00
|
|
|
"notation instead", DeprecationWarning, 2)
|
2008-01-27 11:20:13 -04:00
|
|
|
return value
|
|
|
|
|
|
|
|
def __setattr__(self, attr, value):
|
|
|
|
warn("Attribute access from plist dicts is deprecated, use d[key] "
|
2011-07-04 09:28:45 -03:00
|
|
|
"notation instead", DeprecationWarning, 2)
|
2008-01-27 11:20:13 -04:00
|
|
|
self[attr] = value
|
|
|
|
|
|
|
|
def __delattr__(self, attr):
|
|
|
|
try:
|
|
|
|
del self[attr]
|
|
|
|
except KeyError:
|
|
|
|
raise AttributeError(attr)
|
|
|
|
warn("Attribute access from plist dicts is deprecated, use d[key] "
|
2011-07-04 09:28:45 -03:00
|
|
|
"notation instead", DeprecationWarning, 2)
|
2008-01-27 11:20:13 -04:00
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
class Dict(_InternalDict):
|
|
|
|
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
|
2011-07-04 09:28:45 -03:00
|
|
|
DeprecationWarning, 2)
|
2008-01-27 11:20:13 -04:00
|
|
|
super().__init__(**kwargs)
|
|
|
|
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def _maybe_open(pathOrFile, mode):
|
|
|
|
if isinstance(pathOrFile, str):
|
|
|
|
with open(pathOrFile, mode) as fp:
|
|
|
|
yield fp
|
|
|
|
|
|
|
|
else:
|
|
|
|
yield pathOrFile
|
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
class Plist(_InternalDict):
|
|
|
|
"""This class has been deprecated. Use dump() and load()
|
2008-01-27 11:20:13 -04:00
|
|
|
functions instead, together with regular dict objects.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, **kwargs):
|
2013-11-21 10:46:49 -04:00
|
|
|
warn("The Plist class is deprecated, use the load() and "
|
|
|
|
"dump() functions instead", DeprecationWarning, 2)
|
2008-01-27 11:20:13 -04:00
|
|
|
super().__init__(**kwargs)
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
@classmethod
|
2008-01-27 11:20:13 -04:00
|
|
|
def fromFile(cls, pathOrFile):
|
2013-11-21 10:46:49 -04:00
|
|
|
"""Deprecated. Use the load() function instead."""
|
2013-11-22 00:56:23 -04:00
|
|
|
with _maybe_open(pathOrFile, 'rb') as fp:
|
2013-11-21 10:46:49 -04:00
|
|
|
value = load(fp)
|
2008-01-27 11:20:13 -04:00
|
|
|
plist = cls()
|
2013-11-21 10:46:49 -04:00
|
|
|
plist.update(value)
|
2008-01-27 11:20:13 -04:00
|
|
|
return plist
|
|
|
|
|
|
|
|
def write(self, pathOrFile):
|
2013-11-21 10:46:49 -04:00
|
|
|
"""Deprecated. Use the dump() function instead."""
|
|
|
|
with _maybe_open(pathOrFile, 'wb') as fp:
|
|
|
|
dump(self, fp)
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
def readPlist(pathOrFile):
|
|
|
|
"""
|
|
|
|
Read a .plist from a path or file. pathOrFile should either
|
|
|
|
be a file name, or a readable binary file object.
|
|
|
|
|
|
|
|
This function is deprecated, use load instead.
|
|
|
|
"""
|
|
|
|
warn("The readPlist function is deprecated, use load() instead",
|
|
|
|
DeprecationWarning, 2)
|
|
|
|
|
|
|
|
with _maybe_open(pathOrFile, 'rb') as fp:
|
|
|
|
return load(fp, fmt=None, use_builtin_types=False,
|
|
|
|
dict_type=_InternalDict)
|
|
|
|
|
|
|
|
def writePlist(value, pathOrFile):
|
|
|
|
"""
|
|
|
|
Write 'value' to a .plist file. 'pathOrFile' may either be a
|
|
|
|
file name or a (writable) file object.
|
|
|
|
|
|
|
|
This function is deprecated, use dump instead.
|
|
|
|
"""
|
|
|
|
warn("The writePlist function is deprecated, use dump() instead",
|
|
|
|
DeprecationWarning, 2)
|
|
|
|
with _maybe_open(pathOrFile, 'wb') as fp:
|
|
|
|
dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False)
|
|
|
|
|
|
|
|
|
|
|
|
def readPlistFromBytes(data):
|
|
|
|
"""
|
|
|
|
Read a plist data from a bytes object. Return the root object.
|
|
|
|
|
|
|
|
This function is deprecated, use loads instead.
|
|
|
|
"""
|
|
|
|
warn("The readPlistFromBytes function is deprecated, use loads() instead",
|
|
|
|
DeprecationWarning, 2)
|
|
|
|
return load(BytesIO(data), fmt=None, use_builtin_types=False,
|
|
|
|
dict_type=_InternalDict)
|
|
|
|
|
|
|
|
|
|
|
|
def writePlistToBytes(value):
|
|
|
|
"""
|
|
|
|
Return 'value' as a plist-formatted bytes object.
|
|
|
|
|
|
|
|
This function is deprecated, use dumps instead.
|
|
|
|
"""
|
|
|
|
warn("The writePlistToBytes function is deprecated, use dumps() instead",
|
|
|
|
DeprecationWarning, 2)
|
|
|
|
f = BytesIO()
|
|
|
|
dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False)
|
|
|
|
return f.getvalue()
|
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
class Data:
|
2013-11-21 10:46:49 -04:00
|
|
|
"""
|
|
|
|
Wrapper for binary data.
|
2008-01-27 11:20:13 -04:00
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
This class is deprecated, use a bytes object instead.
|
|
|
|
"""
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
def __init__(self, data):
|
|
|
|
if not isinstance(data, bytes):
|
|
|
|
raise TypeError("data must be as bytes")
|
|
|
|
self.data = data
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def fromBase64(cls, data):
|
2009-06-04 06:42:55 -03:00
|
|
|
# base64.decodebytes just calls binascii.a2b_base64;
|
2008-01-27 11:20:13 -04:00
|
|
|
# it seems overkill to use both base64 and binascii.
|
2013-11-21 10:46:49 -04:00
|
|
|
return cls(_decode_base64(data))
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
def asBase64(self, maxlinelength=76):
|
2013-11-21 10:46:49 -04:00
|
|
|
return _encode_base64(self.data, maxlinelength)
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
def __eq__(self, other):
|
|
|
|
if isinstance(other, self.__class__):
|
|
|
|
return self.data == other.data
|
2016-05-01 07:36:16 -03:00
|
|
|
elif isinstance(other, bytes):
|
2008-01-27 11:20:13 -04:00
|
|
|
return self.data == other
|
|
|
|
else:
|
2016-05-01 07:36:16 -03:00
|
|
|
return NotImplemented
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
#
|
|
|
|
#
|
|
|
|
# End of deprecated functionality
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
# XML support
|
|
|
|
#
|
|
|
|
|
|
|
|
|
|
|
|
# XML 'header'
|
|
|
|
PLISTHEADER = b"""\
|
|
|
|
<?xml version="1.0" encoding="UTF-8"?>
|
|
|
|
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
# Regex to find any control chars, except for \t \n and \r
|
|
|
|
_controlCharPat = re.compile(
|
|
|
|
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
|
|
|
|
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
|
|
|
|
|
|
|
|
def _encode_base64(s, maxlinelength=76):
|
|
|
|
# copied from base64.encodebytes(), with added maxlinelength argument
|
|
|
|
maxbinsize = (maxlinelength//4)*3
|
|
|
|
pieces = []
|
|
|
|
for i in range(0, len(s), maxbinsize):
|
|
|
|
chunk = s[i : i + maxbinsize]
|
|
|
|
pieces.append(binascii.b2a_base64(chunk))
|
|
|
|
return b''.join(pieces)
|
|
|
|
|
|
|
|
def _decode_base64(s):
|
|
|
|
if isinstance(s, str):
|
|
|
|
return binascii.a2b_base64(s.encode("utf-8"))
|
|
|
|
|
|
|
|
else:
|
|
|
|
return binascii.a2b_base64(s)
|
2008-01-27 11:20:13 -04:00
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
# Contents should conform to a subset of ISO 8601
|
|
|
|
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units
|
|
|
|
# may be omitted with # a loss of precision)
|
|
|
|
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z", re.ASCII)
|
|
|
|
|
|
|
|
|
|
|
|
def _date_from_string(s):
|
|
|
|
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
|
|
|
|
gd = _dateParser.match(s).groupdict()
|
|
|
|
lst = []
|
|
|
|
for key in order:
|
|
|
|
val = gd[key]
|
|
|
|
if val is None:
|
|
|
|
break
|
|
|
|
lst.append(int(val))
|
|
|
|
return datetime.datetime(*lst)
|
|
|
|
|
|
|
|
|
|
|
|
def _date_to_string(d):
|
|
|
|
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
|
|
|
|
d.year, d.month, d.day,
|
|
|
|
d.hour, d.minute, d.second
|
|
|
|
)
|
|
|
|
|
|
|
|
def _escape(text):
|
|
|
|
m = _controlCharPat.search(text)
|
|
|
|
if m is not None:
|
|
|
|
raise ValueError("strings can't contains control characters; "
|
|
|
|
"use bytes instead")
|
|
|
|
text = text.replace("\r\n", "\n") # convert DOS line endings
|
|
|
|
text = text.replace("\r", "\n") # convert Mac line endings
|
|
|
|
text = text.replace("&", "&") # escape '&'
|
|
|
|
text = text.replace("<", "<") # escape '<'
|
|
|
|
text = text.replace(">", ">") # escape '>'
|
|
|
|
return text
|
|
|
|
|
|
|
|
class _PlistParser:
|
|
|
|
def __init__(self, use_builtin_types, dict_type):
|
2008-01-27 11:20:13 -04:00
|
|
|
self.stack = []
|
2013-11-21 10:46:49 -04:00
|
|
|
self.current_key = None
|
2008-01-27 11:20:13 -04:00
|
|
|
self.root = None
|
2013-11-21 10:46:49 -04:00
|
|
|
self._use_builtin_types = use_builtin_types
|
|
|
|
self._dict_type = dict_type
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
def parse(self, fileobj):
|
2011-05-28 06:19:19 -03:00
|
|
|
self.parser = ParserCreate()
|
2013-11-21 10:46:49 -04:00
|
|
|
self.parser.StartElementHandler = self.handle_begin_element
|
|
|
|
self.parser.EndElementHandler = self.handle_end_element
|
|
|
|
self.parser.CharacterDataHandler = self.handle_data
|
2011-05-28 06:19:19 -03:00
|
|
|
self.parser.ParseFile(fileobj)
|
2008-01-27 11:20:13 -04:00
|
|
|
return self.root
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
def handle_begin_element(self, element, attrs):
|
2008-01-27 11:20:13 -04:00
|
|
|
self.data = []
|
|
|
|
handler = getattr(self, "begin_" + element, None)
|
|
|
|
if handler is not None:
|
|
|
|
handler(attrs)
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
def handle_end_element(self, element):
|
2008-01-27 11:20:13 -04:00
|
|
|
handler = getattr(self, "end_" + element, None)
|
|
|
|
if handler is not None:
|
|
|
|
handler()
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
def handle_data(self, data):
|
2008-01-27 11:20:13 -04:00
|
|
|
self.data.append(data)
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
def add_object(self, value):
|
|
|
|
if self.current_key is not None:
|
2011-05-28 06:19:19 -03:00
|
|
|
if not isinstance(self.stack[-1], type({})):
|
|
|
|
raise ValueError("unexpected element at line %d" %
|
|
|
|
self.parser.CurrentLineNumber)
|
2013-11-21 10:46:49 -04:00
|
|
|
self.stack[-1][self.current_key] = value
|
|
|
|
self.current_key = None
|
2008-01-27 11:20:13 -04:00
|
|
|
elif not self.stack:
|
|
|
|
# this is the root object
|
|
|
|
self.root = value
|
|
|
|
else:
|
2011-05-28 06:19:19 -03:00
|
|
|
if not isinstance(self.stack[-1], type([])):
|
|
|
|
raise ValueError("unexpected element at line %d" %
|
|
|
|
self.parser.CurrentLineNumber)
|
2008-01-27 11:20:13 -04:00
|
|
|
self.stack[-1].append(value)
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
def get_data(self):
|
2008-01-27 11:20:13 -04:00
|
|
|
data = ''.join(self.data)
|
|
|
|
self.data = []
|
|
|
|
return data
|
|
|
|
|
|
|
|
# element handlers
|
|
|
|
|
|
|
|
def begin_dict(self, attrs):
|
2013-11-21 10:46:49 -04:00
|
|
|
d = self._dict_type()
|
|
|
|
self.add_object(d)
|
2008-01-27 11:20:13 -04:00
|
|
|
self.stack.append(d)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
def end_dict(self):
|
2013-11-21 10:46:49 -04:00
|
|
|
if self.current_key:
|
2011-05-28 06:19:19 -03:00
|
|
|
raise ValueError("missing value for key '%s' at line %d" %
|
2013-11-21 10:46:49 -04:00
|
|
|
(self.current_key,self.parser.CurrentLineNumber))
|
2008-01-27 11:20:13 -04:00
|
|
|
self.stack.pop()
|
|
|
|
|
|
|
|
def end_key(self):
|
2013-11-21 10:46:49 -04:00
|
|
|
if self.current_key or not isinstance(self.stack[-1], type({})):
|
2011-05-28 06:19:19 -03:00
|
|
|
raise ValueError("unexpected key at line %d" %
|
|
|
|
self.parser.CurrentLineNumber)
|
2013-11-21 10:46:49 -04:00
|
|
|
self.current_key = self.get_data()
|
2008-01-27 11:20:13 -04:00
|
|
|
|
|
|
|
def begin_array(self, attrs):
|
|
|
|
a = []
|
2013-11-21 10:46:49 -04:00
|
|
|
self.add_object(a)
|
2008-01-27 11:20:13 -04:00
|
|
|
self.stack.append(a)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
def end_array(self):
|
|
|
|
self.stack.pop()
|
|
|
|
|
|
|
|
def end_true(self):
|
2013-11-21 10:46:49 -04:00
|
|
|
self.add_object(True)
|
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
def end_false(self):
|
2013-11-21 10:46:49 -04:00
|
|
|
self.add_object(False)
|
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
def end_integer(self):
|
2013-11-21 10:46:49 -04:00
|
|
|
self.add_object(int(self.get_data()))
|
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
def end_real(self):
|
2013-11-21 10:46:49 -04:00
|
|
|
self.add_object(float(self.get_data()))
|
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
def end_string(self):
|
2013-11-21 10:46:49 -04:00
|
|
|
self.add_object(self.get_data())
|
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
def end_data(self):
|
2013-11-21 10:46:49 -04:00
|
|
|
if self._use_builtin_types:
|
|
|
|
self.add_object(_decode_base64(self.get_data()))
|
|
|
|
|
|
|
|
else:
|
|
|
|
self.add_object(Data.fromBase64(self.get_data()))
|
|
|
|
|
2008-01-27 11:20:13 -04:00
|
|
|
def end_date(self):
|
2013-11-21 10:46:49 -04:00
|
|
|
self.add_object(_date_from_string(self.get_data()))
|
|
|
|
|
|
|
|
|
|
|
|
class _DumbXMLWriter:
|
|
|
|
def __init__(self, file, indent_level=0, indent="\t"):
|
|
|
|
self.file = file
|
|
|
|
self.stack = []
|
|
|
|
self._indent_level = indent_level
|
|
|
|
self.indent = indent
|
|
|
|
|
|
|
|
def begin_element(self, element):
|
|
|
|
self.stack.append(element)
|
|
|
|
self.writeln("<%s>" % element)
|
|
|
|
self._indent_level += 1
|
|
|
|
|
|
|
|
def end_element(self, element):
|
|
|
|
assert self._indent_level > 0
|
|
|
|
assert self.stack.pop() == element
|
|
|
|
self._indent_level -= 1
|
|
|
|
self.writeln("</%s>" % element)
|
|
|
|
|
|
|
|
def simple_element(self, element, value=None):
|
|
|
|
if value is not None:
|
|
|
|
value = _escape(value)
|
|
|
|
self.writeln("<%s>%s</%s>" % (element, value, element))
|
|
|
|
|
|
|
|
else:
|
|
|
|
self.writeln("<%s/>" % element)
|
|
|
|
|
|
|
|
def writeln(self, line):
|
|
|
|
if line:
|
|
|
|
# plist has fixed encoding of utf-8
|
|
|
|
|
|
|
|
# XXX: is this test needed?
|
|
|
|
if isinstance(line, str):
|
|
|
|
line = line.encode('utf-8')
|
|
|
|
self.file.write(self._indent_level * self.indent)
|
|
|
|
self.file.write(line)
|
|
|
|
self.file.write(b'\n')
|
|
|
|
|
|
|
|
|
|
|
|
class _PlistWriter(_DumbXMLWriter):
|
|
|
|
def __init__(
|
|
|
|
self, file, indent_level=0, indent=b"\t", writeHeader=1,
|
|
|
|
sort_keys=True, skipkeys=False):
|
|
|
|
|
|
|
|
if writeHeader:
|
|
|
|
file.write(PLISTHEADER)
|
|
|
|
_DumbXMLWriter.__init__(self, file, indent_level, indent)
|
|
|
|
self._sort_keys = sort_keys
|
|
|
|
self._skipkeys = skipkeys
|
|
|
|
|
|
|
|
def write(self, value):
|
|
|
|
self.writeln("<plist version=\"1.0\">")
|
|
|
|
self.write_value(value)
|
|
|
|
self.writeln("</plist>")
|
|
|
|
|
|
|
|
def write_value(self, value):
|
|
|
|
if isinstance(value, str):
|
|
|
|
self.simple_element("string", value)
|
|
|
|
|
|
|
|
elif value is True:
|
|
|
|
self.simple_element("true")
|
|
|
|
|
|
|
|
elif value is False:
|
|
|
|
self.simple_element("false")
|
|
|
|
|
|
|
|
elif isinstance(value, int):
|
2014-01-15 06:32:35 -04:00
|
|
|
if -1 << 63 <= value < 1 << 64:
|
|
|
|
self.simple_element("integer", "%d" % value)
|
|
|
|
else:
|
|
|
|
raise OverflowError(value)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif isinstance(value, float):
|
|
|
|
self.simple_element("real", repr(value))
|
|
|
|
|
|
|
|
elif isinstance(value, dict):
|
|
|
|
self.write_dict(value)
|
|
|
|
|
|
|
|
elif isinstance(value, Data):
|
|
|
|
self.write_data(value)
|
|
|
|
|
|
|
|
elif isinstance(value, (bytes, bytearray)):
|
|
|
|
self.write_bytes(value)
|
|
|
|
|
|
|
|
elif isinstance(value, datetime.datetime):
|
|
|
|
self.simple_element("date", _date_to_string(value))
|
|
|
|
|
|
|
|
elif isinstance(value, (tuple, list)):
|
|
|
|
self.write_array(value)
|
|
|
|
|
|
|
|
else:
|
|
|
|
raise TypeError("unsupported type: %s" % type(value))
|
|
|
|
|
|
|
|
def write_data(self, data):
|
|
|
|
self.write_bytes(data.data)
|
|
|
|
|
|
|
|
def write_bytes(self, data):
|
|
|
|
self.begin_element("data")
|
|
|
|
self._indent_level -= 1
|
|
|
|
maxlinelength = max(
|
|
|
|
16,
|
|
|
|
76 - len(self.indent.replace(b"\t", b" " * 8) * self._indent_level))
|
|
|
|
|
|
|
|
for line in _encode_base64(data, maxlinelength).split(b"\n"):
|
|
|
|
if line:
|
|
|
|
self.writeln(line)
|
|
|
|
self._indent_level += 1
|
|
|
|
self.end_element("data")
|
|
|
|
|
|
|
|
def write_dict(self, d):
|
|
|
|
if d:
|
|
|
|
self.begin_element("dict")
|
|
|
|
if self._sort_keys:
|
|
|
|
items = sorted(d.items())
|
|
|
|
else:
|
|
|
|
items = d.items()
|
|
|
|
|
|
|
|
for key, value in items:
|
|
|
|
if not isinstance(key, str):
|
|
|
|
if self._skipkeys:
|
|
|
|
continue
|
|
|
|
raise TypeError("keys must be strings")
|
|
|
|
self.simple_element("key", key)
|
|
|
|
self.write_value(value)
|
|
|
|
self.end_element("dict")
|
|
|
|
|
|
|
|
else:
|
|
|
|
self.simple_element("dict")
|
|
|
|
|
|
|
|
def write_array(self, array):
|
|
|
|
if array:
|
|
|
|
self.begin_element("array")
|
|
|
|
for value in array:
|
|
|
|
self.write_value(value)
|
|
|
|
self.end_element("array")
|
|
|
|
|
|
|
|
else:
|
|
|
|
self.simple_element("array")
|
|
|
|
|
|
|
|
|
|
|
|
def _is_fmt_xml(header):
|
|
|
|
prefixes = (b'<?xml', b'<plist')
|
|
|
|
|
|
|
|
for pfx in prefixes:
|
|
|
|
if header.startswith(pfx):
|
|
|
|
return True
|
|
|
|
|
|
|
|
# Also check for alternative XML encodings, this is slightly
|
|
|
|
# overkill because the Apple tools (and plistlib) will not
|
|
|
|
# generate files with these encodings.
|
|
|
|
for bom, encoding in (
|
|
|
|
(codecs.BOM_UTF8, "utf-8"),
|
|
|
|
(codecs.BOM_UTF16_BE, "utf-16-be"),
|
|
|
|
(codecs.BOM_UTF16_LE, "utf-16-le"),
|
|
|
|
# expat does not support utf-32
|
|
|
|
#(codecs.BOM_UTF32_BE, "utf-32-be"),
|
|
|
|
#(codecs.BOM_UTF32_LE, "utf-32-le"),
|
|
|
|
):
|
|
|
|
if not header.startswith(bom):
|
|
|
|
continue
|
|
|
|
|
|
|
|
for start in prefixes:
|
|
|
|
prefix = bom + start.decode('ascii').encode(encoding)
|
|
|
|
if header[:len(prefix)] == prefix:
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
#
|
|
|
|
# Binary Plist
|
|
|
|
#
|
|
|
|
|
|
|
|
|
|
|
|
class InvalidFileException (ValueError):
|
|
|
|
def __init__(self, message="Invalid file"):
|
|
|
|
ValueError.__init__(self, message)
|
|
|
|
|
|
|
|
_BINARY_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'}
|
|
|
|
|
2017-11-30 18:15:30 -04:00
|
|
|
_undefined = object()
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
class _BinaryPlistParser:
|
|
|
|
"""
|
|
|
|
Read or write a binary plist file, following the description of the binary
|
|
|
|
format. Raise InvalidFileException in case of error, otherwise return the
|
|
|
|
root object.
|
|
|
|
|
|
|
|
see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c
|
|
|
|
"""
|
|
|
|
def __init__(self, use_builtin_types, dict_type):
|
|
|
|
self._use_builtin_types = use_builtin_types
|
|
|
|
self._dict_type = dict_type
|
|
|
|
|
|
|
|
def parse(self, fp):
|
|
|
|
try:
|
|
|
|
# The basic file format:
|
|
|
|
# HEADER
|
|
|
|
# object...
|
|
|
|
# refid->offset...
|
|
|
|
# TRAILER
|
|
|
|
self._fp = fp
|
|
|
|
self._fp.seek(-32, os.SEEK_END)
|
|
|
|
trailer = self._fp.read(32)
|
|
|
|
if len(trailer) != 32:
|
|
|
|
raise InvalidFileException()
|
|
|
|
(
|
|
|
|
offset_size, self._ref_size, num_objects, top_object,
|
|
|
|
offset_table_offset
|
|
|
|
) = struct.unpack('>6xBBQQQ', trailer)
|
|
|
|
self._fp.seek(offset_table_offset)
|
2014-05-23 10:13:33 -03:00
|
|
|
self._object_offsets = self._read_ints(num_objects, offset_size)
|
2017-11-30 18:15:30 -04:00
|
|
|
self._objects = [_undefined] * num_objects
|
|
|
|
return self._read_object(top_object)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
2017-10-31 10:58:55 -03:00
|
|
|
except (OSError, IndexError, struct.error, OverflowError,
|
|
|
|
UnicodeDecodeError):
|
2013-11-21 10:46:49 -04:00
|
|
|
raise InvalidFileException()
|
|
|
|
|
|
|
|
def _get_size(self, tokenL):
|
|
|
|
""" return the size of the next object."""
|
|
|
|
if tokenL == 0xF:
|
|
|
|
m = self._fp.read(1)[0] & 0x3
|
|
|
|
s = 1 << m
|
|
|
|
f = '>' + _BINARY_FORMAT[s]
|
|
|
|
return struct.unpack(f, self._fp.read(s))[0]
|
|
|
|
|
|
|
|
return tokenL
|
|
|
|
|
2014-05-23 10:13:33 -03:00
|
|
|
def _read_ints(self, n, size):
|
|
|
|
data = self._fp.read(size * n)
|
|
|
|
if size in _BINARY_FORMAT:
|
|
|
|
return struct.unpack('>' + _BINARY_FORMAT[size] * n, data)
|
|
|
|
else:
|
2017-10-31 10:58:55 -03:00
|
|
|
if not size or len(data) != size * n:
|
|
|
|
raise InvalidFileException()
|
2014-05-23 10:13:33 -03:00
|
|
|
return tuple(int.from_bytes(data[i: i + size], 'big')
|
|
|
|
for i in range(0, size * n, size))
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
def _read_refs(self, n):
|
2014-05-23 10:13:33 -03:00
|
|
|
return self._read_ints(n, self._ref_size)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
2017-11-30 18:15:30 -04:00
|
|
|
def _read_object(self, ref):
|
2013-11-21 10:46:49 -04:00
|
|
|
"""
|
2017-11-30 18:15:30 -04:00
|
|
|
read the object by reference.
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
May recursively read sub-objects (content of an array/dict/set)
|
|
|
|
"""
|
2017-11-30 18:15:30 -04:00
|
|
|
result = self._objects[ref]
|
|
|
|
if result is not _undefined:
|
|
|
|
return result
|
|
|
|
|
|
|
|
offset = self._object_offsets[ref]
|
2013-11-21 10:46:49 -04:00
|
|
|
self._fp.seek(offset)
|
|
|
|
token = self._fp.read(1)[0]
|
|
|
|
tokenH, tokenL = token & 0xF0, token & 0x0F
|
|
|
|
|
|
|
|
if token == 0x00:
|
2017-11-30 18:15:30 -04:00
|
|
|
result = None
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif token == 0x08:
|
2017-11-30 18:15:30 -04:00
|
|
|
result = False
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif token == 0x09:
|
2017-11-30 18:15:30 -04:00
|
|
|
result = True
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
# The referenced source code also mentions URL (0x0c, 0x0d) and
|
|
|
|
# UUID (0x0e), but neither can be generated using the Cocoa libraries.
|
|
|
|
|
|
|
|
elif token == 0x0f:
|
2017-11-30 18:15:30 -04:00
|
|
|
result = b''
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif tokenH == 0x10: # int
|
2017-11-30 18:15:30 -04:00
|
|
|
result = int.from_bytes(self._fp.read(1 << tokenL),
|
|
|
|
'big', signed=tokenL >= 3)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif token == 0x22: # real
|
2017-11-30 18:15:30 -04:00
|
|
|
result = struct.unpack('>f', self._fp.read(4))[0]
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif token == 0x23: # real
|
2017-11-30 18:15:30 -04:00
|
|
|
result = struct.unpack('>d', self._fp.read(8))[0]
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif token == 0x33: # date
|
|
|
|
f = struct.unpack('>d', self._fp.read(8))[0]
|
|
|
|
# timestamp 0 of binary plists corresponds to 1/1/2001
|
|
|
|
# (year of Mac OS X 10.0), instead of 1/1/1970.
|
2017-11-30 18:15:30 -04:00
|
|
|
result = (datetime.datetime(2001, 1, 1) +
|
|
|
|
datetime.timedelta(seconds=f))
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif tokenH == 0x40: # data
|
|
|
|
s = self._get_size(tokenL)
|
|
|
|
if self._use_builtin_types:
|
2017-11-30 18:15:30 -04:00
|
|
|
result = self._fp.read(s)
|
2013-11-21 10:46:49 -04:00
|
|
|
else:
|
2017-11-30 18:15:30 -04:00
|
|
|
result = Data(self._fp.read(s))
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif tokenH == 0x50: # ascii string
|
|
|
|
s = self._get_size(tokenL)
|
|
|
|
result = self._fp.read(s).decode('ascii')
|
2017-11-30 18:15:30 -04:00
|
|
|
result = result
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif tokenH == 0x60: # unicode string
|
|
|
|
s = self._get_size(tokenL)
|
2017-11-30 18:15:30 -04:00
|
|
|
result = self._fp.read(s * 2).decode('utf-16be')
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
# tokenH == 0x80 is documented as 'UID' and appears to be used for
|
|
|
|
# keyed-archiving, not in plists.
|
|
|
|
|
|
|
|
elif tokenH == 0xA0: # array
|
|
|
|
s = self._get_size(tokenL)
|
|
|
|
obj_refs = self._read_refs(s)
|
2017-11-30 18:15:30 -04:00
|
|
|
result = []
|
|
|
|
self._objects[ref] = result
|
|
|
|
result.extend(self._read_object(x) for x in obj_refs)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
# tokenH == 0xB0 is documented as 'ordset', but is not actually
|
|
|
|
# implemented in the Apple reference code.
|
|
|
|
|
|
|
|
# tokenH == 0xC0 is documented as 'set', but sets cannot be used in
|
|
|
|
# plists.
|
|
|
|
|
|
|
|
elif tokenH == 0xD0: # dict
|
|
|
|
s = self._get_size(tokenL)
|
|
|
|
key_refs = self._read_refs(s)
|
|
|
|
obj_refs = self._read_refs(s)
|
|
|
|
result = self._dict_type()
|
2017-11-30 18:15:30 -04:00
|
|
|
self._objects[ref] = result
|
2013-11-21 10:46:49 -04:00
|
|
|
for k, o in zip(key_refs, obj_refs):
|
2017-11-30 18:15:30 -04:00
|
|
|
result[self._read_object(k)] = self._read_object(o)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
2017-11-30 18:15:30 -04:00
|
|
|
else:
|
|
|
|
raise InvalidFileException()
|
|
|
|
|
|
|
|
self._objects[ref] = result
|
|
|
|
return result
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
def _count_to_size(count):
|
|
|
|
if count < 1 << 8:
|
|
|
|
return 1
|
|
|
|
|
|
|
|
elif count < 1 << 16:
|
|
|
|
return 2
|
|
|
|
|
|
|
|
elif count << 1 << 32:
|
|
|
|
return 4
|
|
|
|
|
|
|
|
else:
|
|
|
|
return 8
|
|
|
|
|
2017-11-30 18:15:30 -04:00
|
|
|
_scalars = (str, int, float, datetime.datetime, bytes)
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
class _BinaryPlistWriter (object):
|
|
|
|
def __init__(self, fp, sort_keys, skipkeys):
|
|
|
|
self._fp = fp
|
|
|
|
self._sort_keys = sort_keys
|
|
|
|
self._skipkeys = skipkeys
|
|
|
|
|
|
|
|
def write(self, value):
|
|
|
|
|
|
|
|
# Flattened object list:
|
|
|
|
self._objlist = []
|
|
|
|
|
|
|
|
# Mappings from object->objectid
|
|
|
|
# First dict has (type(object), object) as the key,
|
|
|
|
# second dict is used when object is not hashable and
|
|
|
|
# has id(object) as the key.
|
|
|
|
self._objtable = {}
|
|
|
|
self._objidtable = {}
|
|
|
|
|
|
|
|
# Create list of all objects in the plist
|
|
|
|
self._flatten(value)
|
|
|
|
|
|
|
|
# Size of object references in serialized containers
|
|
|
|
# depends on the number of objects in the plist.
|
|
|
|
num_objects = len(self._objlist)
|
|
|
|
self._object_offsets = [0]*num_objects
|
|
|
|
self._ref_size = _count_to_size(num_objects)
|
|
|
|
|
|
|
|
self._ref_format = _BINARY_FORMAT[self._ref_size]
|
|
|
|
|
|
|
|
# Write file header
|
|
|
|
self._fp.write(b'bplist00')
|
|
|
|
|
|
|
|
# Write object list
|
|
|
|
for obj in self._objlist:
|
|
|
|
self._write_object(obj)
|
|
|
|
|
|
|
|
# Write refnum->object offset table
|
|
|
|
top_object = self._getrefnum(value)
|
|
|
|
offset_table_offset = self._fp.tell()
|
|
|
|
offset_size = _count_to_size(offset_table_offset)
|
|
|
|
offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects
|
|
|
|
self._fp.write(struct.pack(offset_format, *self._object_offsets))
|
|
|
|
|
|
|
|
# Write trailer
|
|
|
|
sort_version = 0
|
|
|
|
trailer = (
|
|
|
|
sort_version, offset_size, self._ref_size, num_objects,
|
|
|
|
top_object, offset_table_offset
|
|
|
|
)
|
|
|
|
self._fp.write(struct.pack('>5xBBBQQQ', *trailer))
|
|
|
|
|
|
|
|
def _flatten(self, value):
|
|
|
|
# First check if the object is in the object table, not used for
|
|
|
|
# containers to ensure that two subcontainers with the same contents
|
|
|
|
# will be serialized as distinct values.
|
2017-11-30 18:15:30 -04:00
|
|
|
if isinstance(value, _scalars):
|
2013-11-21 10:46:49 -04:00
|
|
|
if (type(value), value) in self._objtable:
|
|
|
|
return
|
|
|
|
|
|
|
|
elif isinstance(value, Data):
|
|
|
|
if (type(value.data), value.data) in self._objtable:
|
|
|
|
return
|
|
|
|
|
2017-11-30 18:15:30 -04:00
|
|
|
elif id(value) in self._objidtable:
|
|
|
|
return
|
|
|
|
|
2013-11-21 10:46:49 -04:00
|
|
|
# Add to objectreference map
|
|
|
|
refnum = len(self._objlist)
|
|
|
|
self._objlist.append(value)
|
2017-11-30 18:15:30 -04:00
|
|
|
if isinstance(value, _scalars):
|
|
|
|
self._objtable[(type(value), value)] = refnum
|
|
|
|
elif isinstance(value, Data):
|
|
|
|
self._objtable[(type(value.data), value.data)] = refnum
|
|
|
|
else:
|
2013-11-21 10:46:49 -04:00
|
|
|
self._objidtable[id(value)] = refnum
|
|
|
|
|
|
|
|
# And finally recurse into containers
|
|
|
|
if isinstance(value, dict):
|
|
|
|
keys = []
|
|
|
|
values = []
|
|
|
|
items = value.items()
|
|
|
|
if self._sort_keys:
|
|
|
|
items = sorted(items)
|
|
|
|
|
|
|
|
for k, v in items:
|
|
|
|
if not isinstance(k, str):
|
|
|
|
if self._skipkeys:
|
|
|
|
continue
|
|
|
|
raise TypeError("keys must be strings")
|
|
|
|
keys.append(k)
|
|
|
|
values.append(v)
|
|
|
|
|
|
|
|
for o in itertools.chain(keys, values):
|
|
|
|
self._flatten(o)
|
|
|
|
|
|
|
|
elif isinstance(value, (list, tuple)):
|
|
|
|
for o in value:
|
|
|
|
self._flatten(o)
|
|
|
|
|
|
|
|
def _getrefnum(self, value):
|
2017-11-30 18:15:30 -04:00
|
|
|
if isinstance(value, _scalars):
|
|
|
|
return self._objtable[(type(value), value)]
|
|
|
|
elif isinstance(value, Data):
|
|
|
|
return self._objtable[(type(value.data), value.data)]
|
|
|
|
else:
|
2013-11-21 10:46:49 -04:00
|
|
|
return self._objidtable[id(value)]
|
|
|
|
|
|
|
|
def _write_size(self, token, size):
|
|
|
|
if size < 15:
|
|
|
|
self._fp.write(struct.pack('>B', token | size))
|
|
|
|
|
|
|
|
elif size < 1 << 8:
|
|
|
|
self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size))
|
|
|
|
|
|
|
|
elif size < 1 << 16:
|
|
|
|
self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size))
|
|
|
|
|
|
|
|
elif size < 1 << 32:
|
|
|
|
self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size))
|
|
|
|
|
|
|
|
else:
|
|
|
|
self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size))
|
|
|
|
|
|
|
|
def _write_object(self, value):
|
|
|
|
ref = self._getrefnum(value)
|
|
|
|
self._object_offsets[ref] = self._fp.tell()
|
|
|
|
if value is None:
|
|
|
|
self._fp.write(b'\x00')
|
|
|
|
|
|
|
|
elif value is False:
|
|
|
|
self._fp.write(b'\x08')
|
|
|
|
|
|
|
|
elif value is True:
|
|
|
|
self._fp.write(b'\x09')
|
|
|
|
|
|
|
|
elif isinstance(value, int):
|
2014-01-15 06:32:35 -04:00
|
|
|
if value < 0:
|
|
|
|
try:
|
|
|
|
self._fp.write(struct.pack('>Bq', 0x13, value))
|
|
|
|
except struct.error:
|
2014-02-06 06:19:18 -04:00
|
|
|
raise OverflowError(value) from None
|
2014-01-15 06:32:35 -04:00
|
|
|
elif value < 1 << 8:
|
2013-11-21 10:46:49 -04:00
|
|
|
self._fp.write(struct.pack('>BB', 0x10, value))
|
|
|
|
elif value < 1 << 16:
|
|
|
|
self._fp.write(struct.pack('>BH', 0x11, value))
|
|
|
|
elif value < 1 << 32:
|
|
|
|
self._fp.write(struct.pack('>BL', 0x12, value))
|
2014-02-06 06:19:18 -04:00
|
|
|
elif value < 1 << 63:
|
|
|
|
self._fp.write(struct.pack('>BQ', 0x13, value))
|
|
|
|
elif value < 1 << 64:
|
|
|
|
self._fp.write(b'\x14' + value.to_bytes(16, 'big', signed=True))
|
2013-11-21 10:46:49 -04:00
|
|
|
else:
|
2014-02-06 06:19:18 -04:00
|
|
|
raise OverflowError(value)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
elif isinstance(value, float):
|
|
|
|
self._fp.write(struct.pack('>Bd', 0x23, value))
|
|
|
|
|
|
|
|
elif isinstance(value, datetime.datetime):
|
|
|
|
f = (value - datetime.datetime(2001, 1, 1)).total_seconds()
|
|
|
|
self._fp.write(struct.pack('>Bd', 0x33, f))
|
|
|
|
|
|
|
|
elif isinstance(value, Data):
|
|
|
|
self._write_size(0x40, len(value.data))
|
|
|
|
self._fp.write(value.data)
|
|
|
|
|
|
|
|
elif isinstance(value, (bytes, bytearray)):
|
|
|
|
self._write_size(0x40, len(value))
|
|
|
|
self._fp.write(value)
|
|
|
|
|
|
|
|
elif isinstance(value, str):
|
|
|
|
try:
|
|
|
|
t = value.encode('ascii')
|
|
|
|
self._write_size(0x50, len(value))
|
|
|
|
except UnicodeEncodeError:
|
|
|
|
t = value.encode('utf-16be')
|
2016-10-04 14:04:30 -03:00
|
|
|
self._write_size(0x60, len(t) // 2)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
self._fp.write(t)
|
|
|
|
|
|
|
|
elif isinstance(value, (list, tuple)):
|
|
|
|
refs = [self._getrefnum(o) for o in value]
|
|
|
|
s = len(refs)
|
|
|
|
self._write_size(0xA0, s)
|
|
|
|
self._fp.write(struct.pack('>' + self._ref_format * s, *refs))
|
|
|
|
|
|
|
|
elif isinstance(value, dict):
|
|
|
|
keyRefs, valRefs = [], []
|
|
|
|
|
|
|
|
if self._sort_keys:
|
|
|
|
rootItems = sorted(value.items())
|
|
|
|
else:
|
|
|
|
rootItems = value.items()
|
|
|
|
|
|
|
|
for k, v in rootItems:
|
|
|
|
if not isinstance(k, str):
|
|
|
|
if self._skipkeys:
|
|
|
|
continue
|
|
|
|
raise TypeError("keys must be strings")
|
|
|
|
keyRefs.append(self._getrefnum(k))
|
|
|
|
valRefs.append(self._getrefnum(v))
|
|
|
|
|
|
|
|
s = len(keyRefs)
|
|
|
|
self._write_size(0xD0, s)
|
|
|
|
self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs))
|
|
|
|
self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs))
|
|
|
|
|
|
|
|
else:
|
2014-01-15 06:32:35 -04:00
|
|
|
raise TypeError(value)
|
2013-11-21 10:46:49 -04:00
|
|
|
|
|
|
|
|
|
|
|
def _is_fmt_binary(header):
|
|
|
|
return header[:8] == b'bplist00'
|
|
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
# Generic bits
|
|
|
|
#
|
|
|
|
|
|
|
|
_FORMATS={
|
|
|
|
FMT_XML: dict(
|
|
|
|
detect=_is_fmt_xml,
|
|
|
|
parser=_PlistParser,
|
|
|
|
writer=_PlistWriter,
|
|
|
|
),
|
|
|
|
FMT_BINARY: dict(
|
|
|
|
detect=_is_fmt_binary,
|
|
|
|
parser=_BinaryPlistParser,
|
|
|
|
writer=_BinaryPlistWriter,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def load(fp, *, fmt=None, use_builtin_types=True, dict_type=dict):
|
|
|
|
"""Read a .plist file. 'fp' should be (readable) file object.
|
|
|
|
Return the unpacked root object (which usually is a dictionary).
|
|
|
|
"""
|
|
|
|
if fmt is None:
|
|
|
|
header = fp.read(32)
|
|
|
|
fp.seek(0)
|
|
|
|
for info in _FORMATS.values():
|
|
|
|
if info['detect'](header):
|
2014-07-23 12:49:31 -03:00
|
|
|
P = info['parser']
|
2013-11-21 10:46:49 -04:00
|
|
|
break
|
|
|
|
|
|
|
|
else:
|
|
|
|
raise InvalidFileException()
|
|
|
|
|
|
|
|
else:
|
2014-07-23 12:49:31 -03:00
|
|
|
P = _FORMATS[fmt]['parser']
|
2013-11-21 10:46:49 -04:00
|
|
|
|
2014-07-23 12:49:31 -03:00
|
|
|
p = P(use_builtin_types=use_builtin_types, dict_type=dict_type)
|
2013-11-21 10:46:49 -04:00
|
|
|
return p.parse(fp)
|
|
|
|
|
|
|
|
|
|
|
|
def loads(value, *, fmt=None, use_builtin_types=True, dict_type=dict):
|
|
|
|
"""Read a .plist file from a bytes object.
|
|
|
|
Return the unpacked root object (which usually is a dictionary).
|
|
|
|
"""
|
|
|
|
fp = BytesIO(value)
|
|
|
|
return load(
|
|
|
|
fp, fmt=fmt, use_builtin_types=use_builtin_types, dict_type=dict_type)
|
|
|
|
|
|
|
|
|
|
|
|
def dump(value, fp, *, fmt=FMT_XML, sort_keys=True, skipkeys=False):
|
|
|
|
"""Write 'value' to a .plist file. 'fp' should be a (writable)
|
|
|
|
file object.
|
|
|
|
"""
|
|
|
|
if fmt not in _FORMATS:
|
|
|
|
raise ValueError("Unsupported format: %r"%(fmt,))
|
|
|
|
|
|
|
|
writer = _FORMATS[fmt]["writer"](fp, sort_keys=sort_keys, skipkeys=skipkeys)
|
|
|
|
writer.write(value)
|
|
|
|
|
|
|
|
|
|
|
|
def dumps(value, *, fmt=FMT_XML, skipkeys=False, sort_keys=True):
|
|
|
|
"""Return a bytes object with the contents for a .plist file.
|
|
|
|
"""
|
|
|
|
fp = BytesIO()
|
|
|
|
dump(value, fp, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys)
|
|
|
|
return fp.getvalue()
|