Whitespace normalization. Top level of Lib now fixed-point for reindent.py!

This commit is contained in:
Tim Peters 2001-01-15 03:34:38 +00:00
parent b90f89a496
commit e119006e7d
12 changed files with 424 additions and 424 deletions

View File

@ -2,7 +2,7 @@
## vim:ts=4:et:nowrap ## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects """A user-defined wrapper around string objects
Note: string objects have grown methods in Python 1.6 Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later. This module requires Python 1.6 or later.
""" """
from types import StringType, UnicodeType from types import StringType, UnicodeType
@ -14,7 +14,7 @@ class UserString:
self.data = seq self.data = seq
elif isinstance(seq, UserString): elif isinstance(seq, UserString):
self.data = seq.data[:] self.data = seq.data[:]
else: else:
self.data = str(seq) self.data = str(seq)
def __str__(self): return str(self.data) def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data) def __repr__(self): return repr(self.data)
@ -76,15 +76,15 @@ class UserString:
return self.__class__(self.data.encode(encoding, errors)) return self.__class__(self.data.encode(encoding, errors))
else: else:
return self.__class__(self.data.encode(encoding)) return self.__class__(self.data.encode(encoding))
else: else:
return self.__class__(self.data.encode()) return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint): def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end) return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8): def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize)) return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint): def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end) return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint): def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end) return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha() def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum() def isalnum(self): return self.data.isalnum()
@ -99,23 +99,23 @@ class UserString:
def ljust(self, width): return self.__class__(self.data.ljust(width)) def ljust(self, width): return self.__class__(self.data.ljust(width))
def lower(self): return self.__class__(self.data.lower()) def lower(self): return self.__class__(self.data.lower())
def lstrip(self): return self.__class__(self.data.lstrip()) def lstrip(self): return self.__class__(self.data.lstrip())
def replace(self, old, new, maxsplit=-1): def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit)) return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint): def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end) return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint): def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end) return self.data.rindex(sub, start, end)
def rjust(self, width): return self.__class__(self.data.rjust(width)) def rjust(self, width): return self.__class__(self.data.rjust(width))
def rstrip(self): return self.__class__(self.data.rstrip()) def rstrip(self): return self.__class__(self.data.rstrip())
def split(self, sep=None, maxsplit=-1): def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit) return self.data.split(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends) def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint): def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end) return self.data.startswith(prefix, start, end)
def strip(self): return self.__class__(self.data.strip()) def strip(self): return self.__class__(self.data.strip())
def swapcase(self): return self.__class__(self.data.swapcase()) def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title()) def title(self): return self.__class__(self.data.title())
def translate(self, *args): def translate(self, *args):
return self.__class__(self.data.translate(*args)) return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper()) def upper(self): return self.__class__(self.data.upper())
@ -136,7 +136,7 @@ class MutableString(UserString):
A faster and better solution is to rewrite your program using lists.""" A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""): def __init__(self, string=""):
self.data = string self.data = string
def __hash__(self): def __hash__(self):
raise TypeError, "unhashable type (it is mutable)" raise TypeError, "unhashable type (it is mutable)"
def __setitem__(self, index, sub): def __setitem__(self, index, sub):
if index < 0 or index >= len(self.data): raise IndexError if index < 0 or index >= len(self.data): raise IndexError
@ -157,7 +157,7 @@ class MutableString(UserString):
self.data = self.data[:start] + self.data[end:] self.data = self.data[:start] + self.data[end:]
def immutable(self): def immutable(self):
return UserString(self.data) return UserString(self.data)
if __name__ == "__main__": if __name__ == "__main__":
# execute the regression test to stdout, if called as a script: # execute the regression test to stdout, if called as a script:
import os import os

View File

@ -551,11 +551,11 @@ class FancyURLopener(URLopener):
if match: if match:
scheme, realm = match.groups() scheme, realm = match.groups()
if scheme.lower() == 'basic': if scheme.lower() == 'basic':
name = 'retry_' + self.type + '_basic_auth' name = 'retry_' + self.type + '_basic_auth'
if data is None: if data is None:
return getattr(self,name)(url, realm) return getattr(self,name)(url, realm)
else: else:
return getattr(self,name)(url, realm, data) return getattr(self,name)(url, realm, data)
def retry_http_basic_auth(self, url, realm, data=None): def retry_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url) host, selector = splithost(url)
@ -571,14 +571,14 @@ class FancyURLopener(URLopener):
return self.open(newurl, data) return self.open(newurl, data)
def retry_https_basic_auth(self, url, realm, data=None): def retry_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url) host, selector = splithost(url)
i = host.find('@') + 1 i = host.find('@') + 1
host = host[i:] host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i) user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None if not (user or passwd): return None
host = user + ':' + passwd + '@' + host host = user + ':' + passwd + '@' + host
newurl = '//' + host + selector newurl = '//' + host + selector
return self.open_https(newurl) return self.open_https(newurl)
def get_user_passwd(self, host, realm, clear_cache = 0): def get_user_passwd(self, host, realm, clear_cache = 0):
key = realm + '@' + host.lower() key = realm + '@' + host.lower()

View File

@ -1,12 +1,12 @@
"""An extensible library for opening URLs using a variety of protocols """An extensible library for opening URLs using a variety of protocols
The simplest way to use this module is to call the urlopen function, The simplest way to use this module is to call the urlopen function,
which accepts a string containing a URL or a Request object (described which accepts a string containing a URL or a Request object (described
below). It opens the URL and returns the results as file-like below). It opens the URL and returns the results as file-like
object; the returned object has some extra methods described below. object; the returned object has some extra methods described below.
The OpenerDirectory manages a collection of Handler objects that do The OpenerDirectory manages a collection of Handler objects that do
all the actual work. Each Handler implements a particular protocol or all the actual work. Each Handler implements a particular protocol or
option. The OpenerDirector is a composite object that invokes the option. The OpenerDirector is a composite object that invokes the
Handlers needed to open the requested URL. For example, the Handlers needed to open the requested URL. For example, the
HTTPHandler performs HTTP GET and POST requests and deals with HTTPHandler performs HTTP GET and POST requests and deals with
@ -16,7 +16,7 @@ with digest authentication.
urlopen(url, data=None) -- basic usage is that same as original urlopen(url, data=None) -- basic usage is that same as original
urllib. pass the url and optionally data to post to an HTTP URL, and urllib. pass the url and optionally data to post to an HTTP URL, and
get a file-like object back. One difference is that you can also pass get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of a Request instance instead of URL. Raises a URLError (subclass of
IOError); for HTTP errors, raises an HTTPError, which can also be IOError); for HTTP errors, raises an HTTPError, which can also be
treated as a valid response. treated as a valid response.
@ -42,7 +42,7 @@ exceptions:
URLError-- a subclass of IOError, individual protocols have their own URLError-- a subclass of IOError, individual protocols have their own
specific subclass specific subclass
HTTPError-- also a valid HTTP response, so you can treat an HTTP error HTTPError-- also a valid HTTP response, so you can treat an HTTP error
as an exceptional event or valid response as an exceptional event or valid response
internals: internals:
@ -57,7 +57,7 @@ import urllib2
authinfo = urllib2.HTTPBasicAuthHandler() authinfo = urllib2.HTTPBasicAuthHandler()
authinfo.add_password('realm', 'host', 'username', 'password') authinfo.add_password('realm', 'host', 'username', 'password')
# build a new opener that adds authentication and caching FTP handlers # build a new opener that adds authentication and caching FTP handlers
opener = urllib2.build_opener(authinfo, urllib2.CacheFTPHandler) opener = urllib2.build_opener(authinfo, urllib2.CacheFTPHandler)
# install it # install it
@ -73,7 +73,7 @@ f = urllib2.urlopen('http://www.python.org/')
# authentication for some reason but fails, how should the error be # authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if # signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know # the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to # that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too. # pass that information along to the client, too.
# XXX to do: # XXX to do:
@ -141,7 +141,7 @@ def install_opener(opener):
_opener = opener _opener = opener
# do these error classes make sense? # do these error classes make sense?
# make sure all of the IOError stuff is overridden. we just want to be # make sure all of the IOError stuff is overridden. we just want to be
# subtypes. # subtypes.
class URLError(IOError): class URLError(IOError):
@ -165,7 +165,7 @@ class HTTPError(URLError, addinfourl):
self.fp = fp self.fp = fp
# XXX # XXX
self.filename = url self.filename = url
def __str__(self): def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg) return 'HTTP Error %s: %s' % (self.code, self.msg)
@ -192,7 +192,7 @@ class Request:
def __getattr__(self, attr): def __getattr__(self, attr):
# XXX this is a fallback mechanism to guard against these # XXX this is a fallback mechanism to guard against these
# methods getting called in a non-standard order. this may be # methods getting called in a non-standard order. this may be
# too complicated and/or unnecessary. # too complicated and/or unnecessary.
# XXX should the __r_XXX attributes be public? # XXX should the __r_XXX attributes be public?
if attr[:12] == '_Request__r_': if attr[:12] == '_Request__r_':
@ -259,7 +259,7 @@ class OpenerDirector:
for meth in get_methods(handler): for meth in get_methods(handler):
if meth[-5:] == '_open': if meth[-5:] == '_open':
protocol = meth[:-5] protocol = meth[:-5]
if self.handle_open.has_key(protocol): if self.handle_open.has_key(protocol):
self.handle_open[protocol].append(handler) self.handle_open[protocol].append(handler)
else: else:
self.handle_open[protocol] = [handler] self.handle_open[protocol] = [handler]
@ -285,7 +285,7 @@ class OpenerDirector:
if added: if added:
self.handlers.append(handler) self.handlers.append(handler)
handler.add_parent(self) handler.add_parent(self)
def __del__(self): def __del__(self):
self.close() self.close()
@ -314,9 +314,9 @@ class OpenerDirector:
if data is not None: if data is not None:
req.add_data(data) req.add_data(data)
assert isinstance(req, Request) # really only care about interface assert isinstance(req, Request) # really only care about interface
result = self._call_chain(self.handle_open, 'default', result = self._call_chain(self.handle_open, 'default',
'default_open', req) 'default_open', req)
if result: if result:
return result return result
@ -381,7 +381,7 @@ def get_methods(inst):
# XXX probably also want an abstract factory that knows things like # XXX probably also want an abstract factory that knows things like
# the fact that a ProxyHandler needs to get inserted first. # the fact that a ProxyHandler needs to get inserted first.
# would also know when it makes sense to skip a superclass in favor of # would also know when it makes sense to skip a superclass in favor of
# a subclass and when it might make sense to include both # a subclass and when it might make sense to include both
def build_opener(*handlers): def build_opener(*handlers):
"""Create an opener object from a list of handlers. """Create an opener object from a list of handlers.
@ -393,7 +393,7 @@ def build_opener(*handlers):
If any of the handlers passed as arguments are subclasses of the If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used. default handlers, the default handlers will not be used.
""" """
opener = OpenerDirector() opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler, HTTPDefaultErrorHandler, HTTPRedirectHandler,
@ -472,7 +472,7 @@ class ProxyHandler(BaseHandler):
assert hasattr(proxies, 'has_key'), "proxies must be a mapping" assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies self.proxies = proxies
for type, url in proxies.items(): for type, url in proxies.items():
setattr(self, '%s_open' % type, setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \ lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type)) meth(r, proxy, type))
@ -574,7 +574,7 @@ class HTTPPasswordMgr:
if len(common) == len(base[1]): if len(common) == len(base[1]):
return 1 return 1
return 0 return 0
class HTTPBasicAuthHandler(BaseHandler): class HTTPBasicAuthHandler(BaseHandler):
rx = re.compile('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"') rx = re.compile('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"')
@ -590,8 +590,8 @@ class HTTPBasicAuthHandler(BaseHandler):
# if __current_realm is not None, then the server must have # if __current_realm is not None, then the server must have
# refused our name/password and is asking for authorization # refused our name/password and is asking for authorization
# again. must be careful to set it to None on successful # again. must be careful to set it to None on successful
# return. # return.
def http_error_401(self, req, fp, code, msg, headers): def http_error_401(self, req, fp, code, msg, headers):
# XXX could be mult. headers # XXX could be mult. headers
authreq = headers.get('www-authenticate', None) authreq = headers.get('www-authenticate', None)
@ -674,7 +674,7 @@ class HTTPDigestAuthHandler(BaseHandler):
return None return None
user, pw = self.passwd.find_user_password(realm, user, pw = self.passwd.find_user_password(realm,
req.get_full_url()) req.get_full_url())
if user is None: if user is None:
return None return None
@ -724,8 +724,8 @@ def encode_digest(digest):
n = ord(c) & 0xf n = ord(c) & 0xf
hexrep.append(hex(n)[-1]) hexrep.append(hex(n)[-1])
return string.join(hexrep, '') return string.join(hexrep, '')
class HTTPHandler(BaseHandler): class HTTPHandler(BaseHandler):
def http_open(self, req): def http_open(self, req):
# XXX devise a new mechanism to specify user/password # XXX devise a new mechanism to specify user/password
@ -745,7 +745,7 @@ class HTTPHandler(BaseHandler):
h.putrequest('GET', req.get_selector()) h.putrequest('GET', req.get_selector())
except socket.error, err: except socket.error, err:
raise URLError(err) raise URLError(err)
# XXX proxies would have different host here # XXX proxies would have different host here
h.putheader('Host', host) h.putheader('Host', host)
for args in self.parent.addheaders: for args in self.parent.addheaders:
@ -813,7 +813,7 @@ def parse_http_list(s):
start = i start = i
inquote = 0 inquote = 0
else: else:
i = i + q i = i + q
else: else:
if c < q: if c < q:
list.append(s[start:i+c]) list.append(s[start:i+c])
@ -838,7 +838,7 @@ class FileHandler(BaseHandler):
names = None names = None
def get_names(self): def get_names(self):
if FileHandler.names is None: if FileHandler.names is None:
FileHandler.names = (socket.gethostbyname('localhost'), FileHandler.names = (socket.gethostbyname('localhost'),
socket.gethostbyname(socket.gethostname())) socket.gethostbyname(socket.gethostname()))
return FileHandler.names return FileHandler.names
@ -967,7 +967,7 @@ class GopherHandler(BaseHandler):
class OpenerFactory: class OpenerFactory:
default_handlers = [UnknownHandler, HTTPHandler, default_handlers = [UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler, HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler] FTPHandler, FileHandler]
proxy_handlers = [ProxyHandler] proxy_handlers = [ProxyHandler]
handlers = [] handlers = []
@ -990,7 +990,7 @@ class OpenerFactory:
opener.add_handler(ph) opener.add_handler(ph)
if __name__ == "__main__": if __name__ == "__main__":
# XXX some of the test code depends on machine configurations that # XXX some of the test code depends on machine configurations that
# are internal to CNRI. Need to set up a public server with the # are internal to CNRI. Need to set up a public server with the
# right authentication configuration for test purposes. # right authentication configuration for test purposes.
if socket.gethostname() == 'bitdiddle': if socket.gethostname() == 'bitdiddle':
@ -1030,11 +1030,11 @@ if __name__ == "__main__":
bauth = HTTPBasicAuthHandler() bauth = HTTPBasicAuthHandler()
bauth.add_password('basic_test_realm', localhost, 'jhylton', bauth.add_password('basic_test_realm', localhost, 'jhylton',
'password')
dauth = HTTPDigestAuthHandler()
dauth.add_password('digest_test_realm', localhost, 'jhylton',
'password') 'password')
dauth = HTTPDigestAuthHandler()
dauth.add_password('digest_test_realm', localhost, 'jhylton',
'password')
cfh = CacheFTPHandler() cfh = CacheFTPHandler()
cfh.setTimeout(1) cfh.setTimeout(1)

View File

@ -6,25 +6,25 @@ UC Irvine, June 1995.
# A classification of schemes ('' means apply by default) # A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'wais', 'file', uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'wais', 'file',
'https', 'shttp', 'https', 'shttp',
'prospero', 'rtsp', 'rtspu', ''] 'prospero', 'rtsp', 'rtspu', '']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', 'wais', uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', 'wais',
'file', 'file',
'https', 'shttp', 'snews', 'https', 'shttp', 'snews',
'prospero', 'rtsp', 'rtspu', ''] 'prospero', 'rtsp', 'rtspu', '']
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais', non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais',
'snews', 'sip', 'snews', 'sip',
] ]
uses_params = ['ftp', 'hdl', 'prospero', 'http', uses_params = ['ftp', 'hdl', 'prospero', 'http',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'https', 'shttp', 'rtsp', 'rtspu', 'sip',
''] '']
uses_query = ['http', 'wais', uses_query = ['http', 'wais',
'https', 'shttp', 'https', 'shttp',
'gopher', 'rtsp', 'rtspu', 'sip', 'gopher', 'rtsp', 'rtspu', 'sip',
''] '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais', uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais',
'https', 'shttp', 'snews', 'https', 'shttp', 'snews',
'file', 'prospero', ''] 'file', 'prospero', '']
# Characters valid in scheme names # Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz' scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
@ -36,158 +36,158 @@ MAX_CACHE_SIZE = 20
_parse_cache = {} _parse_cache = {}
def clear_cache(): def clear_cache():
"""Clear the parse cache.""" """Clear the parse cache."""
global _parse_cache global _parse_cache
_parse_cache = {} _parse_cache = {}
def urlparse(url, scheme = '', allow_fragments = 1): def urlparse(url, scheme = '', allow_fragments = 1):
"""Parse a URL into 6 components: """Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment> <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes.""" (e.g. netloc is a single string) and we don't expand % escapes."""
key = url, scheme, allow_fragments key = url, scheme, allow_fragments
cached = _parse_cache.get(key, None) cached = _parse_cache.get(key, None)
if cached: if cached:
return cached return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache() clear_cache()
netloc = path = params = query = fragment = '' netloc = path = params = query = fragment = ''
i = url.find(':') i = url.find(':')
if i > 0: if i > 0:
if url[:i] == 'http': # optimize the common case if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower() scheme = url[:i].lower()
url = url[i+1:] url = url[i+1:]
if url[:2] == '//': if url[:2] == '//':
i = url.find('/', 2) i = url.find('/', 2)
if i < 0: if i < 0:
i = len(url) i = len(url)
netloc = url[2:i] netloc = url[2:i]
url = url[i:] url = url[i:]
if allow_fragments: if allow_fragments:
i = url.rfind('#') i = url.rfind('#')
if i >= 0: if i >= 0:
fragment = url[i+1:] fragment = url[i+1:]
url = url[:i] url = url[:i]
i = url.find('?') i = url.find('?')
if i >= 0: if i >= 0:
query = url[i+1:] query = url[i+1:]
url = url[:i] url = url[:i]
i = url.find(';') i = url.find(';')
if i >= 0: if i >= 0:
params = url[i+1:] params = url[i+1:]
url = url[:i] url = url[:i]
tuple = scheme, netloc, url, params, query, fragment tuple = scheme, netloc, url, params, query, fragment
_parse_cache[key] = tuple _parse_cache[key] = tuple
return tuple return tuple
for c in url[:i]: for c in url[:i]:
if c not in scheme_chars: if c not in scheme_chars:
break break
else: else:
scheme, url = url[:i].lower(), url[i+1:] scheme, url = url[:i].lower(), url[i+1:]
if scheme in uses_netloc: if scheme in uses_netloc:
if url[:2] == '//': if url[:2] == '//':
i = url.find('/', 2) i = url.find('/', 2)
if i < 0: if i < 0:
i = len(url) i = len(url)
netloc, url = url[2:i], url[i:] netloc, url = url[2:i], url[i:]
if allow_fragments and scheme in uses_fragment: if allow_fragments and scheme in uses_fragment:
i = url.rfind('#') i = url.rfind('#')
if i >= 0: if i >= 0:
url, fragment = url[:i], url[i+1:] url, fragment = url[:i], url[i+1:]
if scheme in uses_query: if scheme in uses_query:
i = url.find('?') i = url.find('?')
if i >= 0: if i >= 0:
url, query = url[:i], url[i+1:] url, query = url[:i], url[i+1:]
if scheme in uses_params: if scheme in uses_params:
i = url.find(';') i = url.find(';')
if i >= 0: if i >= 0:
url, params = url[:i], url[i+1:] url, params = url[:i], url[i+1:]
tuple = scheme, netloc, url, params, query, fragment tuple = scheme, netloc, url, params, query, fragment
_parse_cache[key] = tuple _parse_cache[key] = tuple
return tuple return tuple
def urlunparse((scheme, netloc, url, params, query, fragment)): def urlunparse((scheme, netloc, url, params, query, fragment)):
"""Put a parsed URL back together again. This may result in a """Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent).""" (the draft states that these are equivalent)."""
if netloc or (scheme in uses_netloc and url[:2] == '//'): if netloc or (scheme in uses_netloc and url[:2] == '//'):
if url and url[:1] != '/': url = '/' + url if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url url = '//' + (netloc or '') + url
if scheme: if scheme:
url = scheme + ':' + url url = scheme + ':' + url
if params: if params:
url = url + ';' + params url = url + ';' + params
if query: if query:
url = url + '?' + query url = url + '?' + query
if fragment: if fragment:
url = url + '#' + fragment url = url + '#' + fragment
return url return url
def urljoin(base, url, allow_fragments = 1): def urljoin(base, url, allow_fragments = 1):
"""Join a base URL and a possibly relative URL to form an absolute """Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.""" interpretation of the latter."""
if not base: if not base:
return url return url
if not url: if not url:
return base return base
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments) urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \ scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments) urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative: if scheme != bscheme or scheme not in uses_relative:
return url return url
if scheme in uses_netloc: if scheme in uses_netloc:
if netloc: if netloc:
return urlunparse((scheme, netloc, path, return urlunparse((scheme, netloc, path,
params, query, fragment)) params, query, fragment))
netloc = bnetloc netloc = bnetloc
if path[:1] == '/': if path[:1] == '/':
return urlunparse((scheme, netloc, path, return urlunparse((scheme, netloc, path,
params, query, fragment)) params, query, fragment))
if not path: if not path:
if not params: if not params:
params = bparams params = bparams
if not query: if not query:
query = bquery query = bquery
return urlunparse((scheme, netloc, bpath, return urlunparse((scheme, netloc, bpath,
params, query, fragment)) params, query, fragment))
segments = bpath.split('/')[:-1] + path.split('/') segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways... # XXX The stuff below is bogus in various ways...
if segments[-1] == '.': if segments[-1] == '.':
segments[-1] = '' segments[-1] = ''
while '.' in segments: while '.' in segments:
segments.remove('.') segments.remove('.')
while 1: while 1:
i = 1 i = 1
n = len(segments) - 1 n = len(segments) - 1
while i < n: while i < n:
if (segments[i] == '..' if (segments[i] == '..'
and segments[i-1] not in ('', '..')): and segments[i-1] not in ('', '..')):
del segments[i-1:i+1] del segments[i-1:i+1]
break break
i = i+1 i = i+1
else: else:
break break
if segments == ['', '..']: if segments == ['', '..']:
segments[-1] = '' segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..': elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = [''] segments[-2:] = ['']
return urlunparse((scheme, netloc, '/'.join(segments), return urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment)) params, query, fragment))
def urldefrag(url): def urldefrag(url):
"""Removes any existing fragment from URL. """Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the the URL contained no fragments, the second element is the
empty string. empty string.
""" """
s, n, p, a, q, frag = urlparse(url) s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, '')) defrag = urlunparse((s, n, p, a, q, ''))
return defrag, frag return defrag, frag
test_input = """ test_input = """
@ -226,34 +226,34 @@ test_input = """
# XXX The result for //g is actually http://g/; is this a problem? # XXX The result for //g is actually http://g/; is this a problem?
def test(): def test():
import sys import sys
base = '' base = ''
if sys.argv[1:]: if sys.argv[1:]:
fn = sys.argv[1] fn = sys.argv[1]
if fn == '-': if fn == '-':
fp = sys.stdin fp = sys.stdin
else: else:
fp = open(fn) fp = open(fn)
else: else:
import StringIO import StringIO
fp = StringIO.StringIO(test_input) fp = StringIO.StringIO(test_input)
while 1: while 1:
line = fp.readline() line = fp.readline()
if not line: break if not line: break
words = line.split() words = line.split()
if not words: if not words:
continue continue
url = words[0] url = words[0]
parts = urlparse(url) parts = urlparse(url)
print '%-10s : %s' % (url, parts) print '%-10s : %s' % (url, parts)
abs = urljoin(base, url) abs = urljoin(base, url)
if not base: if not base:
base = abs base = abs
wrapped = '<URL:%s>' % abs wrapped = '<URL:%s>' % abs
print '%-10s = %s' % (url, wrapped) print '%-10s = %s' % (url, wrapped)
if len(words) == 3 and words[1] == '=': if len(words) == 3 and words[1] == '=':
if wrapped != words[2]: if wrapped != words[2]:
print 'EXPECTED', words[2], '!!!!!!!!!!' print 'EXPECTED', words[2], '!!!!!!!!!!'
if __name__ == '__main__': if __name__ == '__main__':
test() test()

View File

@ -3,12 +3,12 @@
# Copyright 1994 by Lance Ellinghouse # Copyright 1994 by Lance Ellinghouse
# Cathedral City, California Republic, United States of America. # Cathedral City, California Republic, United States of America.
# All Rights Reserved # All Rights Reserved
# Permission to use, copy, modify, and distribute this software and its # Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted, # documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that # provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in # both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Lance Ellinghouse # supporting documentation, and that the name of Lance Ellinghouse
# not be used in advertising or publicity pertaining to distribution # not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission. # of the software without specific, written prior permission.
# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO # LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
@ -154,7 +154,7 @@ def test():
print ' -d: Decode (in stead of encode)' print ' -d: Decode (in stead of encode)'
print ' -t: data is text, encoded format unix-compatible text' print ' -t: data is text, encoded format unix-compatible text'
sys.exit(1) sys.exit(1)
for o, a in optlist: for o, a in optlist:
if o == '-d': dopt = 1 if o == '-d': dopt = 1
if o == '-t': topt = 1 if o == '-t': topt = 1

View File

@ -131,29 +131,29 @@ def _processoptions(args):
# Helper for _processoptions() # Helper for _processoptions()
def _setoption(arg): def _setoption(arg):
parts = arg.split(':') parts = arg.split(':')
if len(parts) > 5: if len(parts) > 5:
raise _OptionError("too many fields (max 5): %s" % `arg`) raise _OptionError("too many fields (max 5): %s" % `arg`)
while len(parts) < 5: while len(parts) < 5:
parts.append('') parts.append('')
action, message, category, module, lineno = [s.strip() action, message, category, module, lineno = [s.strip()
for s in parts] for s in parts]
action = _getaction(action) action = _getaction(action)
message = re.escape(message) message = re.escape(message)
category = _getcategory(category) category = _getcategory(category)
module = re.escape(module) module = re.escape(module)
if module: if module:
module = module + '$' module = module + '$'
if lineno: if lineno:
try: try:
lineno = int(lineno) lineno = int(lineno)
if lineno < 0: if lineno < 0:
raise ValueError raise ValueError
except (ValueError, OverflowError): except (ValueError, OverflowError):
raise _OptionError("invalid lineno %s" % `lineno`) raise _OptionError("invalid lineno %s" % `lineno`)
else: else:
lineno = 0 lineno = 0
filterwarnings(action, message, category, module, lineno) filterwarnings(action, message, category, module, lineno)
# Helper for _setoption() # Helper for _setoption()
def _getaction(action): def _getaction(action):

View File

@ -395,7 +395,7 @@ class Wave_write:
def getmarkers(self): def getmarkers(self):
return None return None
def tell(self): def tell(self):
return self._nframeswritten return self._nframeswritten

View File

@ -122,7 +122,7 @@ class Konqueror:
return not rc return not rc
def open(self, url, new=1): def open(self, url, new=1):
# XXX currently I know no way to prevent KFM from opening a new win. # XXX currently I know no way to prevent KFM from opening a new win.
self.open_new(url) self.open_new(url)
def open_new(self, url): def open_new(self, url):

View File

@ -1,21 +1,21 @@
"""Wichman-Hill random number generator. """Wichman-Hill random number generator.
Wichmann, B. A. & Hill, I. D. (1982) Wichmann, B. A. & Hill, I. D. (1982)
Algorithm AS 183: Algorithm AS 183:
An efficient and portable pseudo-random number generator An efficient and portable pseudo-random number generator
Applied Statistics 31 (1982) 188-190 Applied Statistics 31 (1982) 188-190
see also: see also:
Correction to Algorithm AS 183 Correction to Algorithm AS 183
Applied Statistics 33 (1984) 123 Applied Statistics 33 (1984) 123
McLeod, A. I. (1985) McLeod, A. I. (1985)
A remark on Algorithm AS 183 A remark on Algorithm AS 183
Applied Statistics 34 (1985),198-200 Applied Statistics 34 (1985),198-200
USE: USE:
whrandom.random() yields double precision random numbers whrandom.random() yields double precision random numbers
uniformly distributed between 0 and 1. uniformly distributed between 0 and 1.
whrandom.seed(x, y, z) must be called before whrandom.random() whrandom.seed(x, y, z) must be called before whrandom.random()
@ -38,96 +38,96 @@ down in the serial case by using a lock here.)
class whrandom: class whrandom:
def __init__(self, x = 0, y = 0, z = 0): def __init__(self, x = 0, y = 0, z = 0):
"""Initialize an instance. """Initialize an instance.
Without arguments, initialize from current time. Without arguments, initialize from current time.
With arguments (x, y, z), initialize from them.""" With arguments (x, y, z), initialize from them."""
self.seed(x, y, z) self.seed(x, y, z)
def seed(self, x = 0, y = 0, z = 0): def seed(self, x = 0, y = 0, z = 0):
"""Set the seed from (x, y, z). """Set the seed from (x, y, z).
These must be integers in the range [0, 256).""" These must be integers in the range [0, 256)."""
if not type(x) == type(y) == type(z) == type(0): if not type(x) == type(y) == type(z) == type(0):
raise TypeError, 'seeds must be integers' raise TypeError, 'seeds must be integers'
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256): if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError, 'seeds must be in range(0, 256)' raise ValueError, 'seeds must be in range(0, 256)'
if 0 == x == y == z: if 0 == x == y == z:
# Initialize from current time # Initialize from current time
import time import time
t = long(time.time() * 256) t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24)) t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256) t, x = divmod(t, 256)
t, y = divmod(t, 256) t, y = divmod(t, 256)
t, z = divmod(t, 256) t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1 # Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1) self._seed = (x or 1, y or 1, z or 1)
def random(self): def random(self):
"""Get the next random number in the range [0.0, 1.0).""" """Get the next random number in the range [0.0, 1.0)."""
# This part is thread-unsafe: # This part is thread-unsafe:
# BEGIN CRITICAL SECTION # BEGIN CRITICAL SECTION
x, y, z = self._seed x, y, z = self._seed
# #
x = (171 * x) % 30269 x = (171 * x) % 30269
y = (172 * y) % 30307 y = (172 * y) % 30307
z = (170 * z) % 30323 z = (170 * z) % 30323
# #
self._seed = x, y, z self._seed = x, y, z
# END CRITICAL SECTION # END CRITICAL SECTION
# #
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0 return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def uniform(self, a, b): def uniform(self, a, b):
"""Get a random number in the range [a, b).""" """Get a random number in the range [a, b)."""
return a + (b-a) * self.random() return a + (b-a) * self.random()
def randint(self, a, b): def randint(self, a, b):
"""Get a random integer in the range [a, b] including """Get a random integer in the range [a, b] including
both end points. both end points.
(Deprecated; use randrange below.)""" (Deprecated; use randrange below.)"""
return self.randrange(a, b+1) return self.randrange(a, b+1)
def choice(self, seq): def choice(self, seq):
"""Choose a random element from a non-empty sequence.""" """Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] return seq[int(self.random() * len(seq))]
def randrange(self, start, stop=None, step=1, int=int, default=None): def randrange(self, start, stop=None, step=1, int=int, default=None):
"""Choose a random item from range(start, stop[, step]). """Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want. endpoint; in Python this is usually not what you want.
Do not supply the 'int' and 'default' arguments.""" Do not supply the 'int' and 'default' arguments."""
# This code is a bit messy to make it fast for the # This code is a bit messy to make it fast for the
# common case while still doing adequate error checking # common case while still doing adequate error checking
istart = int(start) istart = int(start)
if istart != start: if istart != start:
raise ValueError, "non-integer arg 1 for randrange()" raise ValueError, "non-integer arg 1 for randrange()"
if stop is default: if stop is default:
if istart > 0: if istart > 0:
return int(self.random() * istart) return int(self.random() * istart)
raise ValueError, "empty range for randrange()" raise ValueError, "empty range for randrange()"
istop = int(stop) istop = int(stop)
if istop != stop: if istop != stop:
raise ValueError, "non-integer stop for randrange()" raise ValueError, "non-integer stop for randrange()"
if step == 1: if step == 1:
if istart < istop: if istart < istop:
return istart + int(self.random() * return istart + int(self.random() *
(istop - istart)) (istop - istart))
raise ValueError, "empty range for randrange()" raise ValueError, "empty range for randrange()"
istep = int(step) istep = int(step)
if istep != step: if istep != step:
raise ValueError, "non-integer step for randrange()" raise ValueError, "non-integer step for randrange()"
if istep > 0: if istep > 0:
n = (istop - istart + istep - 1) / istep n = (istop - istart + istep - 1) / istep
elif istep < 0: elif istep < 0:
n = (istop - istart + istep + 1) / istep n = (istop - istart + istep + 1) / istep
else: else:
raise ValueError, "zero step for randrange()" raise ValueError, "zero step for randrange()"
if n <= 0: if n <= 0:
raise ValueError, "empty range for randrange()" raise ValueError, "empty range for randrange()"
return istart + istep*int(self.random() * n) return istart + istep*int(self.random() * n)
# Initialize from the current time # Initialize from the current time

View File

@ -29,7 +29,7 @@ class ConversionError(Error):
pass pass
class Packer: class Packer:
"""Pack various data representations into a buffer.""" """Pack various data representations into a buffer."""
@ -106,7 +106,7 @@ class Packer:
self.pack_farray(n, list, pack_item) self.pack_farray(n, list, pack_item)
class Unpacker: class Unpacker:
"""Unpacks various data representations from the given buffer.""" """Unpacks various data representations from the given buffer."""
@ -220,7 +220,7 @@ class Unpacker:
n = self.unpack_uint() n = self.unpack_uint()
return self.unpack_farray(n, unpack_item) return self.unpack_farray(n, unpack_item)
# test suite # test suite
def _test(): def _test():
p = Packer() p = Packer()
@ -274,6 +274,6 @@ def _test():
print 'ConversionError:', var.msg print 'ConversionError:', var.msg
count = count + 1 count = count + 1
if __name__ == '__main__': if __name__ == '__main__':
_test() _test()

View File

@ -250,9 +250,9 @@ class XMLParser:
break break
res = interesting.search(rawdata, i) res = interesting.search(rawdata, i)
if res: if res:
j = res.start(0) j = res.start(0)
else: else:
j = n j = n
if i < j: if i < j:
data = rawdata[i:j] data = rawdata[i:j]
if self.__at_start and space.match(data) is None: if self.__at_start and space.match(data) is None:

View File

@ -6,13 +6,13 @@ import struct, os, time
import binascii import binascii
try: try:
import zlib # We may need its compression method import zlib # We may need its compression method
except: except:
zlib = None zlib = None
class BadZipfile(Exception): class BadZipfile(Exception):
pass pass
error = BadZipfile # The exception raised by this module error = BadZipfile # The exception raised by this module
# constants for Zip file compression methods # constants for Zip file compression methods
ZIP_STORED = 0 ZIP_STORED = 0
@ -35,11 +35,11 @@ def is_zipfile(filename):
""" """
try: try:
fpin = open(filename, "rb") fpin = open(filename, "rb")
fpin.seek(-22, 2) # Seek to end-of-file record fpin.seek(-22, 2) # Seek to end-of-file record
endrec = fpin.read() endrec = fpin.read()
fpin.close() fpin.close()
if endrec[0:4] == "PK\005\006" and endrec[-2:] == "\000\000": if endrec[0:4] == "PK\005\006" and endrec[-2:] == "\000\000":
return 1 # file has correct magic number return 1 # file has correct magic number
except: except:
pass pass
@ -48,26 +48,26 @@ class ZipInfo:
"""Class with attributes describing each file in the ZIP archive.""" """Class with attributes describing each file in the ZIP archive."""
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.filename = filename # Name of the file in the archive self.filename = filename # Name of the file in the archive
self.date_time = date_time # year, month, day, hour, min, sec self.date_time = date_time # year, month, day, hour, min, sec
# Standard values: # Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = "" # Comment for each file self.comment = "" # Comment for each file
self.extra = "" # ZIP extra data self.extra = "" # ZIP extra data
self.create_system = 0 # System which created ZIP archive self.create_system = 0 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile: # Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header # header_offset Byte offset to the file header
# file_offset Byte offset to the start of the file data # file_offset Byte offset to the start of the file data
# CRC CRC-32 of the uncompressed file # CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file # compress_size Size of the compressed file
# file_size Size of the uncompressed file # file_size Size of the uncompressed file
def FileHeader(self): def FileHeader(self):
"""Return the per-file header as a string.""" """Return the per-file header as a string."""
@ -75,12 +75,12 @@ class ZipInfo:
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | dt[5] / 2 dostime = dt[3] << 11 | dt[4] << 5 | dt[5] / 2
if self.flag_bits & 0x08: if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data # Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0 CRC = compress_size = file_size = 0
else: else:
CRC = self.CRC CRC = self.CRC
compress_size = self.compress_size compress_size = self.compress_size
file_size = self.file_size file_size = self.file_size
header = struct.pack(structFileHeader, stringFileHeader, header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, self.flag_bits, self.extract_version, self.reserved, self.flag_bits,
self.compress_type, dostime, dosdate, CRC, self.compress_type, dostime, dosdate, CRC,
@ -102,10 +102,10 @@ class ZipFile:
"Compression requires the (missing) zlib module" "Compression requires the (missing) zlib module"
else: else:
raise RuntimeError, "That compression method is not supported" raise RuntimeError, "That compression method is not supported"
self.debug = 0 # Level of printing: 0 through 3 self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression self.compression = compression # Method of compression
self.filename = filename self.filename = filename
self.mode = key = mode[0] self.mode = key = mode[0]
if key == 'r': if key == 'r':
@ -115,14 +115,14 @@ class ZipFile:
self.fp = open(filename, "wb") self.fp = open(filename, "wb")
elif key == 'a': elif key == 'a':
fp = self.fp = open(filename, "r+b") fp = self.fp = open(filename, "r+b")
fp.seek(-22, 2) # Seek to end-of-file record fp.seek(-22, 2) # Seek to end-of-file record
endrec = fp.read() endrec = fp.read()
if endrec[0:4] == stringEndArchive and \ if endrec[0:4] == stringEndArchive and \
endrec[-2:] == "\000\000": endrec[-2:] == "\000\000":
self._GetContents() # file is a zip file self._GetContents() # file is a zip file
# seek to start of directory and overwrite # seek to start of directory and overwrite
fp.seek(self.start_dir, 0) fp.seek(self.start_dir, 0)
else: # file is not a zip file, just append else: # file is not a zip file, just append
fp.seek(0, 2) fp.seek(0, 2)
else: else:
raise RuntimeError, 'Mode must be "r", "w" or "a"' raise RuntimeError, 'Mode must be "r", "w" or "a"'
@ -130,16 +130,16 @@ class ZipFile:
def _GetContents(self): def _GetContents(self):
"""Read in the table of contents for the ZIP file.""" """Read in the table of contents for the ZIP file."""
fp = self.fp fp = self.fp
fp.seek(-22, 2) # Start of end-of-archive record fp.seek(-22, 2) # Start of end-of-archive record
filesize = fp.tell() + 22 # Get file size filesize = fp.tell() + 22 # Get file size
endrec = fp.read(22) # Archive must not end with a comment! endrec = fp.read(22) # Archive must not end with a comment!
if endrec[0:4] != stringEndArchive or endrec[-2:] != "\000\000": if endrec[0:4] != stringEndArchive or endrec[-2:] != "\000\000":
raise BadZipfile, "File is not a zip file, or ends with a comment" raise BadZipfile, "File is not a zip file, or ends with a comment"
endrec = struct.unpack(structEndArchive, endrec) endrec = struct.unpack(structEndArchive, endrec)
if self.debug > 1: if self.debug > 1:
print endrec print endrec
size_cd = endrec[5] # bytes in central directory size_cd = endrec[5] # bytes in central directory
offset_cd = endrec[6] # offset of central directory offset_cd = endrec[6] # offset of central directory
x = filesize - 22 - size_cd x = filesize - 22 - size_cd
# "concat" is zero, unless zip was concatenated to another file # "concat" is zero, unless zip was concatenated to another file
concat = x - offset_cd concat = x - offset_cd
@ -211,7 +211,7 @@ class ZipFile:
"""Read all the files and check the CRC.""" """Read all the files and check the CRC."""
for zinfo in self.filelist: for zinfo in self.filelist:
try: try:
self.read(zinfo.filename) # Check CRC-32 self.read(zinfo.filename) # Check CRC-32
except: except:
return zinfo.filename return zinfo.filename
@ -256,7 +256,7 @@ class ZipFile:
def _writecheck(self, zinfo): def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive.""" """Check for errors before writing a file to the archive."""
if self.NameToInfo.has_key(zinfo.filename): if self.NameToInfo.has_key(zinfo.filename):
if self.debug: # Warning for duplicate names if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"): if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"' raise RuntimeError, 'write() requires mode "w" or "a"'
@ -278,20 +278,20 @@ class ZipFile:
date_time = mtime[0:6] date_time = mtime[0:6]
# Create ZipInfo instance to store file information # Create ZipInfo instance to store file information
if arcname is None: if arcname is None:
zinfo = ZipInfo(filename, date_time) zinfo = ZipInfo(filename, date_time)
else: else:
zinfo = ZipInfo(arcname, date_time) zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = st[0] << 16 # Unix attributes zinfo.external_attr = st[0] << 16 # Unix attributes
if compress_type is None: if compress_type is None:
zinfo.compress_type = self.compression zinfo.compress_type = self.compression
else: else:
zinfo.compress_type = compress_type zinfo.compress_type = compress_type
self._writecheck(zinfo) self._writecheck(zinfo)
fp = open(filename, "rb") fp = open(filename, "rb")
zinfo.flag_bits = 0x08 zinfo.flag_bits = 0x08
zinfo.header_offset = self.fp.tell() # Start of header bytes zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader()) self.fp.write(zinfo.FileHeader())
zinfo.file_offset = self.fp.tell() # Start of file bytes zinfo.file_offset = self.fp.tell() # Start of file bytes
CRC = 0 CRC = 0
compress_size = 0 compress_size = 0
file_size = 0 file_size = 0
@ -330,23 +330,23 @@ class ZipFile:
"""Write a file into the archive. The contents is the string """Write a file into the archive. The contents is the string
'bytes'.""" 'bytes'."""
self._writecheck(zinfo) self._writecheck(zinfo)
zinfo.file_size = len(bytes) # Uncompressed size zinfo.file_size = len(bytes) # Uncompressed size
zinfo.CRC = binascii.crc32(bytes) # CRC-32 checksum zinfo.CRC = binascii.crc32(bytes) # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED: if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15) zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush() bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size zinfo.compress_size = len(bytes) # Compressed size
else: else:
zinfo.compress_size = zinfo.file_size zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header bytes zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader()) self.fp.write(zinfo.FileHeader())
zinfo.file_offset = self.fp.tell() # Start of file bytes zinfo.file_offset = self.fp.tell() # Start of file bytes
self.fp.write(bytes) self.fp.write(bytes)
if zinfo.flag_bits & 0x08: if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data # Write CRC and file sizes after the file data
self.fp.write(struct.pack("<lll", zinfo.CRC, zinfo.compress_size, self.fp.write(struct.pack("<lll", zinfo.CRC, zinfo.compress_size,
zinfo.file_size)) zinfo.file_size))
self.filelist.append(zinfo) self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo self.NameToInfo[zinfo.filename] = zinfo
@ -359,10 +359,10 @@ class ZipFile:
def close(self): def close(self):
"""Close the file, and for mode "w" and "a" write the ending """Close the file, and for mode "w" and "a" write the ending
records.""" records."""
if self.mode in ("w", "a"): # write ending records if self.mode in ("w", "a"): # write ending records
count = 0 count = 0
pos1 = self.fp.tell() pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory for zinfo in self.filelist: # write central directory
count = count + 1 count = count + 1
dt = zinfo.date_time dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
@ -468,7 +468,7 @@ class PyZipFile(ZipFile):
file_pyo = pathname + ".pyo" file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \ if os.path.isfile(file_pyo) and \
os.stat(file_pyo)[8] >= os.stat(file_py)[8]: os.stat(file_pyo)[8] >= os.stat(file_py)[8]:
fname = file_pyo # Use .pyo file fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \ elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc)[8] < os.stat(file_py)[8]: os.stat(file_pyc)[8] < os.stat(file_py)[8]:
import py_compile import py_compile