Whitespace normalization.
This commit is contained in:
parent
0822ff7cca
commit
0eadaac7dc
|
@ -22,7 +22,7 @@ from SimpleXMLRPCServer import SimpleXMLRPCServer,\
|
||||||
|
|
||||||
class ServerHTMLDoc(pydoc.HTMLDoc):
|
class ServerHTMLDoc(pydoc.HTMLDoc):
|
||||||
"""Class used to generate pydoc HTML document for a server"""
|
"""Class used to generate pydoc HTML document for a server"""
|
||||||
|
|
||||||
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
|
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
|
||||||
"""Mark up some plain text, given a context of symbols to look for.
|
"""Mark up some plain text, given a context of symbols to look for.
|
||||||
Each context dictionary maps object names to anchor names."""
|
Each context dictionary maps object names to anchor names."""
|
||||||
|
@ -63,7 +63,7 @@ class ServerHTMLDoc(pydoc.HTMLDoc):
|
||||||
here = end
|
here = end
|
||||||
results.append(escape(text[here:]))
|
results.append(escape(text[here:]))
|
||||||
return ''.join(results)
|
return ''.join(results)
|
||||||
|
|
||||||
def docroutine(self, object, name=None, mod=None,
|
def docroutine(self, object, name=None, mod=None,
|
||||||
funcs={}, classes={}, methods={}, cl=None):
|
funcs={}, classes={}, methods={}, cl=None):
|
||||||
"""Produce HTML documentation for a function or method object."""
|
"""Produce HTML documentation for a function or method object."""
|
||||||
|
@ -72,7 +72,7 @@ class ServerHTMLDoc(pydoc.HTMLDoc):
|
||||||
note = ''
|
note = ''
|
||||||
|
|
||||||
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, name)
|
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, name)
|
||||||
|
|
||||||
if inspect.ismethod(object):
|
if inspect.ismethod(object):
|
||||||
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
|
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
|
||||||
# exclude the argument bound to the instance, it will be
|
# exclude the argument bound to the instance, it will be
|
||||||
|
@ -96,7 +96,7 @@ class ServerHTMLDoc(pydoc.HTMLDoc):
|
||||||
docstring = object[1] or ""
|
docstring = object[1] or ""
|
||||||
else:
|
else:
|
||||||
docstring = pydoc.getdoc(object)
|
docstring = pydoc.getdoc(object)
|
||||||
|
|
||||||
decl = title + argspec + (note and self.grey(
|
decl = title + argspec + (note and self.grey(
|
||||||
'<font face="helvetica, arial">%s</font>' % note))
|
'<font face="helvetica, arial">%s</font>' % note))
|
||||||
|
|
||||||
|
@ -112,10 +112,10 @@ class ServerHTMLDoc(pydoc.HTMLDoc):
|
||||||
for key, value in methods.items():
|
for key, value in methods.items():
|
||||||
fdict[key] = '#-' + key
|
fdict[key] = '#-' + key
|
||||||
fdict[value] = fdict[key]
|
fdict[value] = fdict[key]
|
||||||
|
|
||||||
head = '<big><big><strong>%s</strong></big></big>' % server_name
|
head = '<big><big><strong>%s</strong></big></big>' % server_name
|
||||||
result = self.heading(head, '#ffffff', '#7799ee')
|
result = self.heading(head, '#ffffff', '#7799ee')
|
||||||
|
|
||||||
doc = self.markup(package_documentation, self.preformat, fdict)
|
doc = self.markup(package_documentation, self.preformat, fdict)
|
||||||
doc = doc and '<tt>%s</tt>' % doc
|
doc = doc and '<tt>%s</tt>' % doc
|
||||||
result = result + '<p>%s</p>\n' % doc
|
result = result + '<p>%s</p>\n' % doc
|
||||||
|
@ -136,7 +136,7 @@ class XMLRPCDocGenerator:
|
||||||
This class is designed as mix-in and should not
|
This class is designed as mix-in and should not
|
||||||
be constructed directly.
|
be constructed directly.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# setup variables used for HTML documentation
|
# setup variables used for HTML documentation
|
||||||
self.server_name = 'XML-RPC Server Documentation'
|
self.server_name = 'XML-RPC Server Documentation'
|
||||||
|
@ -170,7 +170,7 @@ class XMLRPCDocGenerator:
|
||||||
argument string used in the documentation and the
|
argument string used in the documentation and the
|
||||||
_methodHelp(method_name) method to provide the help text used
|
_methodHelp(method_name) method to provide the help text used
|
||||||
in the documentation."""
|
in the documentation."""
|
||||||
|
|
||||||
methods = {}
|
methods = {}
|
||||||
|
|
||||||
for method_name in self.system_listMethods():
|
for method_name in self.system_listMethods():
|
||||||
|
@ -208,7 +208,7 @@ class XMLRPCDocGenerator:
|
||||||
self.server_documentation,
|
self.server_documentation,
|
||||||
methods
|
methods
|
||||||
)
|
)
|
||||||
|
|
||||||
return documenter.page(self.server_title, documentation)
|
return documenter.page(self.server_title, documentation)
|
||||||
|
|
||||||
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
|
@ -227,7 +227,7 @@ class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
Interpret all HTTP GET requests as requests for server
|
Interpret all HTTP GET requests as requests for server
|
||||||
documentation.
|
documentation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
response = self.server.generate_html_documentation()
|
response = self.server.generate_html_documentation()
|
||||||
self.send_response(200)
|
self.send_response(200)
|
||||||
self.send_header("Content-type", "text/html")
|
self.send_header("Content-type", "text/html")
|
||||||
|
@ -251,7 +251,7 @@ class DocXMLRPCServer( SimpleXMLRPCServer,
|
||||||
logRequests=1):
|
logRequests=1):
|
||||||
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests)
|
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests)
|
||||||
XMLRPCDocGenerator.__init__(self)
|
XMLRPCDocGenerator.__init__(self)
|
||||||
|
|
||||||
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
|
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
|
||||||
XMLRPCDocGenerator):
|
XMLRPCDocGenerator):
|
||||||
"""Handler for XML-RPC data and documentation requests passed through
|
"""Handler for XML-RPC data and documentation requests passed through
|
||||||
|
@ -281,8 +281,8 @@ if __name__ == '__main__':
|
||||||
|
|
||||||
Converts an angle in degrees to an angle in radians"""
|
Converts an angle in degrees to an angle in radians"""
|
||||||
import math
|
import math
|
||||||
return deg * math.pi / 180
|
return deg * math.pi / 180
|
||||||
|
|
||||||
server = DocXMLRPCServer(("localhost", 8000))
|
server = DocXMLRPCServer(("localhost", 8000))
|
||||||
|
|
||||||
server.set_server_title("Math Server")
|
server.set_server_title("Math Server")
|
||||||
|
@ -299,4 +299,4 @@ You can use it from Python as follows:
|
||||||
server.register_function(deg_to_rad)
|
server.register_function(deg_to_rad)
|
||||||
server.register_introspection_functions()
|
server.register_introspection_functions()
|
||||||
|
|
||||||
server.serve_forever()
|
server.serve_forever()
|
||||||
|
|
|
@ -374,10 +374,10 @@ class TimeRE(dict):
|
||||||
|
|
||||||
def pattern(self, format):
|
def pattern(self, format):
|
||||||
"""Return re pattern for the format string.
|
"""Return re pattern for the format string.
|
||||||
|
|
||||||
Need to make sure that any characters that might be interpreted as
|
Need to make sure that any characters that might be interpreted as
|
||||||
regex syntax is escaped.
|
regex syntax is escaped.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
processed_format = ''
|
processed_format = ''
|
||||||
# The sub() call escapes all characters that might be misconstrued
|
# The sub() call escapes all characters that might be misconstrued
|
||||||
|
@ -528,4 +528,3 @@ def _insensitiveindex(lst, findme):
|
||||||
return key
|
return key
|
||||||
else:
|
else:
|
||||||
raise ValueError("value not in list")
|
raise ValueError("value not in list")
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ except ImportError:
|
||||||
import sys
|
import sys
|
||||||
del sys.modules[__name__]
|
del sys.modules[__name__]
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# bsddb3 calls it db, but provide _db for backwards compatibility
|
# bsddb3 calls it db, but provide _db for backwards compatibility
|
||||||
db = _db = _bsddb
|
db = _db = _bsddb
|
||||||
__version__ = db.__version__
|
__version__ = db.__version__
|
||||||
|
|
|
@ -192,4 +192,3 @@ class DB(DictMixin):
|
||||||
if db.version() >= (4,1):
|
if db.version() >= (4,1):
|
||||||
def set_encrypt(self, *args, **kwargs):
|
def set_encrypt(self, *args, **kwargs):
|
||||||
return apply(self._cobj.set_encrypt, args, kwargs)
|
return apply(self._cobj.set_encrypt, args, kwargs)
|
||||||
|
|
||||||
|
|
|
@ -296,6 +296,3 @@ class DBShelfCursor:
|
||||||
|
|
||||||
|
|
||||||
#---------------------------------------------------------------------------
|
#---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,2 +1 @@
|
||||||
from csv import *
|
from csv import *
|
||||||
|
|
||||||
|
|
|
@ -116,7 +116,7 @@ class DictWriter:
|
||||||
self.restval = restval # for writing short dicts
|
self.restval = restval # for writing short dicts
|
||||||
if extrasaction.lower() not in ("raise", "ignore"):
|
if extrasaction.lower() not in ("raise", "ignore"):
|
||||||
raise ValueError, \
|
raise ValueError, \
|
||||||
("extrasaction (%s) must be 'raise' or 'ignore'" %
|
("extrasaction (%s) must be 'raise' or 'ignore'" %
|
||||||
extrasaction)
|
extrasaction)
|
||||||
self.extrasaction = extrasaction
|
self.extrasaction = extrasaction
|
||||||
self.writer = writer(f, dialect, *args)
|
self.writer = writer(f, dialect, *args)
|
||||||
|
@ -126,7 +126,7 @@ class DictWriter:
|
||||||
for k in rowdict.keys():
|
for k in rowdict.keys():
|
||||||
if k not in self.fieldnames:
|
if k not in self.fieldnames:
|
||||||
raise ValueError, "dict contains fields not in fieldnames"
|
raise ValueError, "dict contains fields not in fieldnames"
|
||||||
return [rowdict.get(key, self.restval) for key in self.fieldnames]
|
return [rowdict.get(key, self.restval) for key in self.fieldnames]
|
||||||
|
|
||||||
def writerow(self, rowdict):
|
def writerow(self, rowdict):
|
||||||
return self.writer.writerow(self._dict_to_list(rowdict))
|
return self.writer.writerow(self._dict_to_list(rowdict))
|
||||||
|
|
|
@ -26,9 +26,9 @@ class Sniffer:
|
||||||
"""
|
"""
|
||||||
Takes a file-like object and returns a dialect (or None)
|
Takes a file-like object and returns a dialect (or None)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.fileobj = fileobj
|
self.fileobj = fileobj
|
||||||
|
|
||||||
data = fileobj.read(self.sample)
|
data = fileobj.read(self.sample)
|
||||||
|
|
||||||
quotechar, delimiter, skipinitialspace = self._guessQuoteAndDelimiter(data)
|
quotechar, delimiter, skipinitialspace = self._guessQuoteAndDelimiter(data)
|
||||||
|
@ -51,11 +51,11 @@ class Sniffer:
|
||||||
|
|
||||||
def hasHeaders(self):
|
def hasHeaders(self):
|
||||||
return self._hasHeaders(self.fileobj, self.dialect)
|
return self._hasHeaders(self.fileobj, self.dialect)
|
||||||
|
|
||||||
|
|
||||||
def register_dialect(self, name = 'sniffed'):
|
def register_dialect(self, name = 'sniffed'):
|
||||||
csv.register_dialect(name, self.dialect)
|
csv.register_dialect(name, self.dialect)
|
||||||
|
|
||||||
|
|
||||||
def _guessQuoteAndDelimiter(self, data):
|
def _guessQuoteAndDelimiter(self, data):
|
||||||
"""
|
"""
|
||||||
|
@ -78,7 +78,7 @@ class Sniffer:
|
||||||
matches = regexp.findall(data)
|
matches = regexp.findall(data)
|
||||||
if matches:
|
if matches:
|
||||||
break
|
break
|
||||||
|
|
||||||
if not matches:
|
if not matches:
|
||||||
return ('', None, 0) # (quotechar, delimiter, skipinitialspace)
|
return ('', None, 0) # (quotechar, delimiter, skipinitialspace)
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ class Sniffer:
|
||||||
# there is *no* delimiter, it's a single column of quoted data
|
# there is *no* delimiter, it's a single column of quoted data
|
||||||
delim = ''
|
delim = ''
|
||||||
skipinitialspace = 0
|
skipinitialspace = 0
|
||||||
|
|
||||||
return (quotechar, delim, skipinitialspace)
|
return (quotechar, delim, skipinitialspace)
|
||||||
|
|
||||||
|
|
||||||
|
@ -132,14 +132,14 @@ class Sniffer:
|
||||||
e.g. "x occurred 5 times in 10 rows, 6 times in 1000 rows,
|
e.g. "x occurred 5 times in 10 rows, 6 times in 1000 rows,
|
||||||
7 times in 2 rows"
|
7 times in 2 rows"
|
||||||
3) use the mode of the meta-frequency to determine the /expected/
|
3) use the mode of the meta-frequency to determine the /expected/
|
||||||
frequency for that character
|
frequency for that character
|
||||||
4) find out how often the character actually meets that goal
|
4) find out how often the character actually meets that goal
|
||||||
5) the character that best meets its goal is the delimiter
|
5) the character that best meets its goal is the delimiter
|
||||||
For performance reasons, the data is evaluated in chunks, so it can
|
For performance reasons, the data is evaluated in chunks, so it can
|
||||||
try and evaluate the smallest portion of the data possible, evaluating
|
try and evaluate the smallest portion of the data possible, evaluating
|
||||||
additional chunks as necessary.
|
additional chunks as necessary.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
data = filter(None, data.split('\n'))
|
data = filter(None, data.split('\n'))
|
||||||
|
|
||||||
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
|
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
|
||||||
|
@ -218,7 +218,7 @@ class Sniffer:
|
||||||
# be a string in which case the length of the string is the determining factor: if
|
# be a string in which case the length of the string is the determining factor: if
|
||||||
# all of the rows except for the first are the same length, it's a header.
|
# all of the rows except for the first are the same length, it's a header.
|
||||||
# Finally, a 'vote' is taken at the end for each column, adding or subtracting from
|
# Finally, a 'vote' is taken at the end for each column, adding or subtracting from
|
||||||
# the likelihood of the first row being a header.
|
# the likelihood of the first row being a header.
|
||||||
|
|
||||||
def seval(item):
|
def seval(item):
|
||||||
"""
|
"""
|
||||||
|
@ -227,7 +227,7 @@ class Sniffer:
|
||||||
return eval(item.replace('(', '').replace(')', ''))
|
return eval(item.replace('(', '').replace(')', ''))
|
||||||
|
|
||||||
fileobj.seek(0) # rewind the fileobj - this might not work for some file-like objects...
|
fileobj.seek(0) # rewind the fileobj - this might not work for some file-like objects...
|
||||||
|
|
||||||
reader = csv.reader(fileobj,
|
reader = csv.reader(fileobj,
|
||||||
delimiter = dialect.delimiter,
|
delimiter = dialect.delimiter,
|
||||||
quotechar = dialect.quotechar,
|
quotechar = dialect.quotechar,
|
||||||
|
@ -284,6 +284,3 @@ class Sniffer:
|
||||||
hasHeader -= 1
|
hasHeader -= 1
|
||||||
|
|
||||||
return hasHeader > 0
|
return hasHeader > 0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -120,4 +120,3 @@ def search_function(encoding):
|
||||||
|
|
||||||
# Register the search_function in the Python codec registry
|
# Register the search_function in the Python codec registry
|
||||||
codecs.register(search_function)
|
codecs.register(search_function)
|
||||||
|
|
||||||
|
|
|
@ -19,10 +19,10 @@ def nameprep(label):
|
||||||
continue
|
continue
|
||||||
newlabel.append(stringprep.map_table_b2(c))
|
newlabel.append(stringprep.map_table_b2(c))
|
||||||
label = u"".join(newlabel)
|
label = u"".join(newlabel)
|
||||||
|
|
||||||
# Normalize
|
# Normalize
|
||||||
label = unicodedata.normalize("NFKC", label)
|
label = unicodedata.normalize("NFKC", label)
|
||||||
|
|
||||||
# Prohibit
|
# Prohibit
|
||||||
for c in label:
|
for c in label:
|
||||||
if stringprep.in_table_c12(c) or \
|
if stringprep.in_table_c12(c) or \
|
||||||
|
@ -139,7 +139,7 @@ def ToUnicode(label):
|
||||||
|
|
||||||
# Step 8: return the result of step 5
|
# Step 8: return the result of step 5
|
||||||
return result
|
return result
|
||||||
|
|
||||||
### Codec APIs
|
### Codec APIs
|
||||||
|
|
||||||
class Codec(codecs.Codec):
|
class Codec(codecs.Codec):
|
||||||
|
@ -156,7 +156,7 @@ class Codec(codecs.Codec):
|
||||||
return ".".join(result), len(input)
|
return ".".join(result), len(input)
|
||||||
|
|
||||||
def decode(self,input,errors='strict'):
|
def decode(self,input,errors='strict'):
|
||||||
|
|
||||||
if errors != 'strict':
|
if errors != 'strict':
|
||||||
raise UnicodeError, "Unsupported error handling "+errors
|
raise UnicodeError, "Unsupported error handling "+errors
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import codecs
|
||||||
##################### Encoding #####################################
|
##################### Encoding #####################################
|
||||||
|
|
||||||
def segregate(str):
|
def segregate(str):
|
||||||
"""3.1 Basic code point segregation"""
|
"""3.1 Basic code point segregation"""
|
||||||
base = []
|
base = []
|
||||||
extended = {}
|
extended = {}
|
||||||
for c in str:
|
for c in str:
|
||||||
|
@ -66,7 +66,7 @@ def insertion_unsort(str, extended):
|
||||||
oldindex = index
|
oldindex = index
|
||||||
delta = 0
|
delta = 0
|
||||||
oldchar = char
|
oldchar = char
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def T(j, bias):
|
def T(j, bias):
|
||||||
|
@ -103,7 +103,7 @@ def adapt(delta, first, numchars):
|
||||||
divisions += 36
|
divisions += 36
|
||||||
bias = divisions + (36 * delta // (delta + 38))
|
bias = divisions + (36 * delta // (delta + 38))
|
||||||
return bias
|
return bias
|
||||||
|
|
||||||
|
|
||||||
def generate_integers(baselen, deltas):
|
def generate_integers(baselen, deltas):
|
||||||
"""3.4 Bias adaptation"""
|
"""3.4 Bias adaptation"""
|
||||||
|
@ -155,7 +155,7 @@ def decode_generalized_number(extended, extpos, bias, errors):
|
||||||
return extpos, result
|
return extpos, result
|
||||||
w = w * (36 - t)
|
w = w * (36 - t)
|
||||||
j += 1
|
j += 1
|
||||||
|
|
||||||
|
|
||||||
def insertion_sort(base, extended, errors):
|
def insertion_sort(base, extended, errors):
|
||||||
"""3.2 Insertion unsort coding"""
|
"""3.2 Insertion unsort coding"""
|
||||||
|
@ -193,7 +193,7 @@ def punycode_decode(text, errors):
|
||||||
base = unicode(base, "ascii", errors)
|
base = unicode(base, "ascii", errors)
|
||||||
extended = extended.upper()
|
extended = extended.upper()
|
||||||
return insertion_sort(base, extended, errors)
|
return insertion_sort(base, extended, errors)
|
||||||
|
|
||||||
### Codec APIs
|
### Codec APIs
|
||||||
|
|
||||||
class Codec(codecs.Codec):
|
class Codec(codecs.Codec):
|
||||||
|
|
|
@ -60,12 +60,12 @@ class ParserBase:
|
||||||
# This is some sort of declaration; in "HTML as
|
# This is some sort of declaration; in "HTML as
|
||||||
# deployed," this should only be the document type
|
# deployed," this should only be the document type
|
||||||
# declaration ("<!DOCTYPE html...>").
|
# declaration ("<!DOCTYPE html...>").
|
||||||
# ISO 8879:1986, however, has more complex
|
# ISO 8879:1986, however, has more complex
|
||||||
# declaration syntax for elements in <!...>, including:
|
# declaration syntax for elements in <!...>, including:
|
||||||
# --comment--
|
# --comment--
|
||||||
# [marked section]
|
# [marked section]
|
||||||
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
|
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
|
||||||
# ATTLIST, NOTATION, SHORTREF, USEMAP,
|
# ATTLIST, NOTATION, SHORTREF, USEMAP,
|
||||||
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
|
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
|
||||||
rawdata = self.rawdata
|
rawdata = self.rawdata
|
||||||
j = i + 2
|
j = i + 2
|
||||||
|
@ -151,7 +151,7 @@ class ParserBase:
|
||||||
j = match.start(0)
|
j = match.start(0)
|
||||||
self.unknown_decl(rawdata[i+3: j])
|
self.unknown_decl(rawdata[i+3: j])
|
||||||
return match.end(0)
|
return match.end(0)
|
||||||
|
|
||||||
# Internal -- parse comment, return length or -1 if not terminated
|
# Internal -- parse comment, return length or -1 if not terminated
|
||||||
def parse_comment(self, i, report=1):
|
def parse_comment(self, i, report=1):
|
||||||
rawdata = self.rawdata
|
rawdata = self.rawdata
|
||||||
|
|
|
@ -1380,4 +1380,3 @@ def _match_abbrev (s, wordmap):
|
||||||
# which will become a factory function when there are many Option
|
# which will become a factory function when there are many Option
|
||||||
# classes.
|
# classes.
|
||||||
make_option = Option
|
make_option = Option
|
||||||
|
|
||||||
|
|
|
@ -110,8 +110,8 @@ import sys,string,os,re
|
||||||
|
|
||||||
_libc_search = re.compile(r'(__libc_init)'
|
_libc_search = re.compile(r'(__libc_init)'
|
||||||
'|'
|
'|'
|
||||||
'(GLIBC_([0-9.]+))'
|
'(GLIBC_([0-9.]+))'
|
||||||
'|'
|
'|'
|
||||||
'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)')
|
'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)')
|
||||||
|
|
||||||
def libc_ver(executable=sys.executable,lib='',version='',
|
def libc_ver(executable=sys.executable,lib='',version='',
|
||||||
|
@ -126,7 +126,7 @@ def libc_ver(executable=sys.executable,lib='',version='',
|
||||||
|
|
||||||
Note that the function has intimate knowledge of how different
|
Note that the function has intimate knowledge of how different
|
||||||
libc versions add symbols to the executable is probably only
|
libc versions add symbols to the executable is probably only
|
||||||
useable for executables compiled using gcc.
|
useable for executables compiled using gcc.
|
||||||
|
|
||||||
The file is read and scanned in chunks of chunksize bytes.
|
The file is read and scanned in chunks of chunksize bytes.
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ def libc_ver(executable=sys.executable,lib='',version='',
|
||||||
|
|
||||||
def _dist_try_harder(distname,version,id):
|
def _dist_try_harder(distname,version,id):
|
||||||
|
|
||||||
""" Tries some special tricks to get the distribution
|
""" Tries some special tricks to get the distribution
|
||||||
information in case the default method fails.
|
information in case the default method fails.
|
||||||
|
|
||||||
Currently supports older SuSE Linux, Caldera OpenLinux and
|
Currently supports older SuSE Linux, Caldera OpenLinux and
|
||||||
|
@ -376,7 +376,7 @@ def _syscmd_ver(system='',release='',version='',
|
||||||
|
|
||||||
""" Tries to figure out the OS version used and returns
|
""" Tries to figure out the OS version used and returns
|
||||||
a tuple (system,release,version).
|
a tuple (system,release,version).
|
||||||
|
|
||||||
It uses the "ver" shell command for this which is known
|
It uses the "ver" shell command for this which is known
|
||||||
to exists on Windows, DOS and OS/2. XXX Others too ?
|
to exists on Windows, DOS and OS/2. XXX Others too ?
|
||||||
|
|
||||||
|
@ -501,7 +501,7 @@ def win32_ver(release='',version='',csd='',ptype=''):
|
||||||
RegQueryValueEx(keyCurVer,'SystemRoot')
|
RegQueryValueEx(keyCurVer,'SystemRoot')
|
||||||
except:
|
except:
|
||||||
return release,version,csd,ptype
|
return release,version,csd,ptype
|
||||||
|
|
||||||
# Parse values
|
# Parse values
|
||||||
#subversion = _win32_getvalue(keyCurVer,
|
#subversion = _win32_getvalue(keyCurVer,
|
||||||
# 'SubVersionNumber',
|
# 'SubVersionNumber',
|
||||||
|
@ -581,7 +581,7 @@ def mac_ver(release='',versioninfo=('','',''),machine=''):
|
||||||
0x80:'final'}.get(stage,'')
|
0x80:'final'}.get(stage,'')
|
||||||
versioninfo = (version,stage,nonrel)
|
versioninfo = (version,stage,nonrel)
|
||||||
if sysa:
|
if sysa:
|
||||||
machine = {0x1: '68k',
|
machine = {0x1: '68k',
|
||||||
0x2: 'PowerPC'}.get(sysa,'')
|
0x2: 'PowerPC'}.get(sysa,'')
|
||||||
return release,versioninfo,machine
|
return release,versioninfo,machine
|
||||||
|
|
||||||
|
@ -594,7 +594,7 @@ def _java_getprop(self,name,default):
|
||||||
return default
|
return default
|
||||||
|
|
||||||
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
|
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
|
||||||
|
|
||||||
""" Version interface for JPython.
|
""" Version interface for JPython.
|
||||||
|
|
||||||
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
|
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
|
||||||
|
@ -623,7 +623,7 @@ def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
|
||||||
os_name = _java_getprop('java.os.name',os_name)
|
os_name = _java_getprop('java.os.name',os_name)
|
||||||
os_version = _java_getprop('java.os.version',os_version)
|
os_version = _java_getprop('java.os.version',os_version)
|
||||||
osinfo = os_name,os_version,os_arch
|
osinfo = os_name,os_version,os_arch
|
||||||
|
|
||||||
return release,vendor,vminfo,osinfo
|
return release,vendor,vminfo,osinfo
|
||||||
|
|
||||||
### System name aliasing
|
### System name aliasing
|
||||||
|
@ -843,14 +843,14 @@ def architecture(executable=sys.executable,bits='',linkage=''):
|
||||||
# Older installations can only query longs
|
# Older installations can only query longs
|
||||||
size = struct.calcsize('l')
|
size = struct.calcsize('l')
|
||||||
bits = str(size*8) + 'bit'
|
bits = str(size*8) + 'bit'
|
||||||
|
|
||||||
# Get data from the 'file' system command
|
# Get data from the 'file' system command
|
||||||
output = _syscmd_file(executable,'')
|
output = _syscmd_file(executable,'')
|
||||||
|
|
||||||
if not output and \
|
if not output and \
|
||||||
executable == sys.executable:
|
executable == sys.executable:
|
||||||
# "file" command did not return anything; we'll try to provide
|
# "file" command did not return anything; we'll try to provide
|
||||||
# some sensible defaults then...
|
# some sensible defaults then...
|
||||||
if _default_architecture.has_key(sys.platform):
|
if _default_architecture.has_key(sys.platform):
|
||||||
b,l = _default_architecture[sys.platform]
|
b,l = _default_architecture[sys.platform]
|
||||||
if b:
|
if b:
|
||||||
|
@ -861,7 +861,7 @@ def architecture(executable=sys.executable,bits='',linkage=''):
|
||||||
|
|
||||||
# Split the output into a list of strings omitting the filename
|
# Split the output into a list of strings omitting the filename
|
||||||
fileout = _architecture_split(output)[1:]
|
fileout = _architecture_split(output)[1:]
|
||||||
|
|
||||||
if 'executable' not in fileout:
|
if 'executable' not in fileout:
|
||||||
# Format not supported
|
# Format not supported
|
||||||
return bits,linkage
|
return bits,linkage
|
||||||
|
@ -895,7 +895,7 @@ def architecture(executable=sys.executable,bits='',linkage=''):
|
||||||
return bits,linkage
|
return bits,linkage
|
||||||
|
|
||||||
### Portable uname() interface
|
### Portable uname() interface
|
||||||
|
|
||||||
_uname_cache = None
|
_uname_cache = None
|
||||||
|
|
||||||
def uname():
|
def uname():
|
||||||
|
@ -934,7 +934,7 @@ def uname():
|
||||||
release,version,csd,ptype = win32_ver()
|
release,version,csd,ptype = win32_ver()
|
||||||
if release and version:
|
if release and version:
|
||||||
use_syscmd_ver = 0
|
use_syscmd_ver = 0
|
||||||
|
|
||||||
# Try the 'ver' system command available on some
|
# Try the 'ver' system command available on some
|
||||||
# platforms
|
# platforms
|
||||||
if use_syscmd_ver:
|
if use_syscmd_ver:
|
||||||
|
@ -1140,7 +1140,7 @@ def platform(aliased=0, terse=0):
|
||||||
|
|
||||||
""" Returns a single string identifying the underlying platform
|
""" Returns a single string identifying the underlying platform
|
||||||
with as much useful information as possible (but no more :).
|
with as much useful information as possible (but no more :).
|
||||||
|
|
||||||
The output is intended to be human readable rather than
|
The output is intended to be human readable rather than
|
||||||
machine parseable. It may look different on different
|
machine parseable. It may look different on different
|
||||||
platforms and this is intended.
|
platforms and this is intended.
|
||||||
|
@ -1215,7 +1215,7 @@ def platform(aliased=0, terse=0):
|
||||||
else:
|
else:
|
||||||
bits,linkage = architecture(sys.executable)
|
bits,linkage = architecture(sys.executable)
|
||||||
platform = _platform(system,release,machine,processor,bits,linkage)
|
platform = _platform(system,release,machine,processor,bits,linkage)
|
||||||
|
|
||||||
if aliased:
|
if aliased:
|
||||||
_platform_aliased_cache = platform
|
_platform_aliased_cache = platform
|
||||||
elif terse:
|
elif terse:
|
||||||
|
@ -1228,7 +1228,7 @@ def platform(aliased=0, terse=0):
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
# Default is to print the aliased verbose platform string
|
# Default is to print the aliased verbose platform string
|
||||||
terse = ('terse' in sys.argv or '--terse' in sys.argv)
|
terse = ('terse' in sys.argv or '--terse' in sys.argv)
|
||||||
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
|
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
|
||||||
print platform(aliased,terse)
|
print platform(aliased,terse)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
|
@ -144,10 +144,10 @@ class SGMLParser(markupbase.ParserBase):
|
||||||
break
|
break
|
||||||
continue
|
continue
|
||||||
if rawdata.startswith("<!--", i):
|
if rawdata.startswith("<!--", i):
|
||||||
# Strictly speaking, a comment is --.*--
|
# Strictly speaking, a comment is --.*--
|
||||||
# within a declaration tag <!...>.
|
# within a declaration tag <!...>.
|
||||||
# This should be removed,
|
# This should be removed,
|
||||||
# and comments handled only in parse_declaration.
|
# and comments handled only in parse_declaration.
|
||||||
k = self.parse_comment(i)
|
k = self.parse_comment(i)
|
||||||
if k < 0: break
|
if k < 0: break
|
||||||
i = k
|
i = k
|
||||||
|
|
|
@ -15,7 +15,7 @@ object):
|
||||||
|
|
||||||
d[key] = data # store data at key (overwrites old data if
|
d[key] = data # store data at key (overwrites old data if
|
||||||
# using an existing key)
|
# using an existing key)
|
||||||
data = d[key] # retrieve a COPY of the data at key (raise
|
data = d[key] # retrieve a COPY of the data at key (raise
|
||||||
# KeyError if no such key) -- NOTE that this
|
# KeyError if no such key) -- NOTE that this
|
||||||
# access returns a *copy* of the entry!
|
# access returns a *copy* of the entry!
|
||||||
del d[key] # delete data stored at key (raises KeyError
|
del d[key] # delete data stored at key (raises KeyError
|
||||||
|
|
|
@ -270,4 +270,3 @@ def in_table_d1(code):
|
||||||
|
|
||||||
def in_table_d2(code):
|
def in_table_d2(code):
|
||||||
return unicodedata.bidirectional(code) == "L"
|
return unicodedata.bidirectional(code) == "L"
|
||||||
|
|
||||||
|
|
|
@ -631,4 +631,3 @@ class MixinStrUserStringTest:
|
||||||
|
|
||||||
self.checkraises(TypeError, 'xyz', 'decode', 42)
|
self.checkraises(TypeError, 'xyz', 'decode', 42)
|
||||||
self.checkraises(TypeError, 'xyz', 'encode', 42)
|
self.checkraises(TypeError, 'xyz', 'encode', 42)
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ def TestThreadState():
|
||||||
|
|
||||||
def callback():
|
def callback():
|
||||||
idents.append(thread.get_ident())
|
idents.append(thread.get_ident())
|
||||||
|
|
||||||
_testcapi._test_thread_state(callback)
|
_testcapi._test_thread_state(callback)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
# Check our main thread is in the list exactly 3 times.
|
# Check our main thread is in the list exactly 3 times.
|
||||||
|
@ -40,6 +40,6 @@ try:
|
||||||
have_thread_state = True
|
have_thread_state = True
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
have_thread_state = False
|
have_thread_state = False
|
||||||
|
|
||||||
if have_thread_state:
|
if have_thread_state:
|
||||||
TestThreadState()
|
TestThreadState()
|
||||||
|
|
|
@ -105,7 +105,7 @@ punycode_testcases = [
|
||||||
#(L) 3<nen>B<gumi><kinpachi><sensei>
|
#(L) 3<nen>B<gumi><kinpachi><sensei>
|
||||||
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
|
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
|
||||||
"3B-ww4c5e180e575a65lsy2b"),
|
"3B-ww4c5e180e575a65lsy2b"),
|
||||||
|
|
||||||
# (M) <amuro><namie>-with-SUPER-MONKEYS
|
# (M) <amuro><namie>-with-SUPER-MONKEYS
|
||||||
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
|
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
|
||||||
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
|
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
|
||||||
|
@ -264,7 +264,7 @@ nameprep_tests = [
|
||||||
('\xe2\xbf\xb5',
|
('\xe2\xbf\xb5',
|
||||||
None),
|
None),
|
||||||
# 3.33 Display property character U+0341.
|
# 3.33 Display property character U+0341.
|
||||||
('\xcd\x81',
|
('\xcd\x81',
|
||||||
'\xcc\x81'),
|
'\xcc\x81'),
|
||||||
# 3.34 Left-to-right mark U+200E.
|
# 3.34 Left-to-right mark U+200E.
|
||||||
('\xe2\x80\x8e',
|
('\xe2\x80\x8e',
|
||||||
|
|
|
@ -10,7 +10,7 @@ from test.test_support import verbose
|
||||||
|
|
||||||
class Test_Csv(unittest.TestCase):
|
class Test_Csv(unittest.TestCase):
|
||||||
"""
|
"""
|
||||||
Test the underlying C csv parser in ways that are not appropriate
|
Test the underlying C csv parser in ways that are not appropriate
|
||||||
from the high level interface. Further tests of this nature are done
|
from the high level interface. Further tests of this nature are done
|
||||||
in TestDialectRegistry.
|
in TestDialectRegistry.
|
||||||
"""
|
"""
|
||||||
|
@ -38,7 +38,7 @@ class Test_Csv(unittest.TestCase):
|
||||||
obj.dialect.delimiter = '\t'
|
obj.dialect.delimiter = '\t'
|
||||||
self.assertEqual(obj.dialect.delimiter, '\t')
|
self.assertEqual(obj.dialect.delimiter, '\t')
|
||||||
self.assertRaises(TypeError, delattr, obj.dialect, 'delimiter')
|
self.assertRaises(TypeError, delattr, obj.dialect, 'delimiter')
|
||||||
self.assertRaises(TypeError, setattr, obj.dialect,
|
self.assertRaises(TypeError, setattr, obj.dialect,
|
||||||
'lineterminator', None)
|
'lineterminator', None)
|
||||||
obj.dialect.escapechar = None
|
obj.dialect.escapechar = None
|
||||||
self.assertEqual(obj.dialect.escapechar, None)
|
self.assertEqual(obj.dialect.escapechar, None)
|
||||||
|
@ -57,14 +57,14 @@ class Test_Csv(unittest.TestCase):
|
||||||
fileobj = StringIO()
|
fileobj = StringIO()
|
||||||
writer = csv.writer(fileobj, **kwargs)
|
writer = csv.writer(fileobj, **kwargs)
|
||||||
writer.writerow(fields)
|
writer.writerow(fields)
|
||||||
self.assertEqual(fileobj.getvalue(),
|
self.assertEqual(fileobj.getvalue(),
|
||||||
expect + writer.dialect.lineterminator)
|
expect + writer.dialect.lineterminator)
|
||||||
|
|
||||||
def test_write_arg_valid(self):
|
def test_write_arg_valid(self):
|
||||||
self.assertRaises(csv.Error, self._write_test, None, '')
|
self.assertRaises(csv.Error, self._write_test, None, '')
|
||||||
self._write_test((), '')
|
self._write_test((), '')
|
||||||
self._write_test([None], '""')
|
self._write_test([None], '""')
|
||||||
self.assertRaises(csv.Error, self._write_test,
|
self.assertRaises(csv.Error, self._write_test,
|
||||||
[None], None, quoting = csv.QUOTE_NONE)
|
[None], None, quoting = csv.QUOTE_NONE)
|
||||||
# Check that exceptions are passed up the chain
|
# Check that exceptions are passed up the chain
|
||||||
class BadList:
|
class BadList:
|
||||||
|
@ -87,7 +87,7 @@ class Test_Csv(unittest.TestCase):
|
||||||
|
|
||||||
def test_write_quoting(self):
|
def test_write_quoting(self):
|
||||||
self._write_test(['a','1','p,q'], 'a,1,"p,q"')
|
self._write_test(['a','1','p,q'], 'a,1,"p,q"')
|
||||||
self.assertRaises(csv.Error,
|
self.assertRaises(csv.Error,
|
||||||
self._write_test,
|
self._write_test,
|
||||||
['a','1','p,q'], 'a,1,"p,q"',
|
['a','1','p,q'], 'a,1,"p,q"',
|
||||||
quoting = csv.QUOTE_NONE)
|
quoting = csv.QUOTE_NONE)
|
||||||
|
@ -311,15 +311,15 @@ class TestDialectExcel(TestCsvBase):
|
||||||
self.readerAssertEqual(' "a"', [[' "a"']])
|
self.readerAssertEqual(' "a"', [[' "a"']])
|
||||||
|
|
||||||
def test_quoted(self):
|
def test_quoted(self):
|
||||||
self.readerAssertEqual('1,2,3,"I think, therefore I am",5,6',
|
self.readerAssertEqual('1,2,3,"I think, therefore I am",5,6',
|
||||||
[['1', '2', '3',
|
[['1', '2', '3',
|
||||||
'I think, therefore I am',
|
'I think, therefore I am',
|
||||||
'5', '6']])
|
'5', '6']])
|
||||||
|
|
||||||
def test_quoted_quote(self):
|
def test_quoted_quote(self):
|
||||||
self.readerAssertEqual('1,2,3,"""I see,"" said the blind man","as he picked up his hammer and saw"',
|
self.readerAssertEqual('1,2,3,"""I see,"" said the blind man","as he picked up his hammer and saw"',
|
||||||
[['1', '2', '3',
|
[['1', '2', '3',
|
||||||
'"I see," said the blind man',
|
'"I see," said the blind man',
|
||||||
'as he picked up his hammer and saw']])
|
'as he picked up his hammer and saw']])
|
||||||
|
|
||||||
def test_quoted_nl(self):
|
def test_quoted_nl(self):
|
||||||
|
@ -329,8 +329,8 @@ said the blind man","as he picked up his
|
||||||
hammer and saw"
|
hammer and saw"
|
||||||
9,8,7,6'''
|
9,8,7,6'''
|
||||||
self.readerAssertEqual(input,
|
self.readerAssertEqual(input,
|
||||||
[['1', '2', '3',
|
[['1', '2', '3',
|
||||||
'"I see,"\nsaid the blind man',
|
'"I see,"\nsaid the blind man',
|
||||||
'as he picked up his\nhammer and saw'],
|
'as he picked up his\nhammer and saw'],
|
||||||
['9','8','7','6']])
|
['9','8','7','6']])
|
||||||
|
|
||||||
|
|
|
@ -199,7 +199,7 @@ class LongLong_TestCase(unittest.TestCase):
|
||||||
self.failUnlessEqual(42, ll_convert("L", 42))
|
self.failUnlessEqual(42, ll_convert("L", 42))
|
||||||
self.failUnlessEqual(42, ll_convert("L", 42L))
|
self.failUnlessEqual(42, ll_convert("L", 42L))
|
||||||
self.assertRaises(OverflowError, ll_convert, "L", VERY_LARGE)
|
self.assertRaises(OverflowError, ll_convert, "L", VERY_LARGE)
|
||||||
|
|
||||||
def test_K(self):
|
def test_K(self):
|
||||||
# K return 'unsigned long long', no range checking
|
# K return 'unsigned long long', no range checking
|
||||||
self.assertRaises(TypeError, ull_convert, "K", 3.14)
|
self.assertRaises(TypeError, ull_convert, "K", 3.14)
|
||||||
|
|
|
@ -296,7 +296,7 @@ def suite():
|
||||||
suite.addTest(unittest.makeSuite(PluralFormsTestCase))
|
suite.addTest(unittest.makeSuite(PluralFormsTestCase))
|
||||||
suite.addTest(unittest.makeSuite(UnicodeTranslationsTest))
|
suite.addTest(unittest.makeSuite(UnicodeTranslationsTest))
|
||||||
return suite
|
return suite
|
||||||
|
|
||||||
|
|
||||||
def test_main():
|
def test_main():
|
||||||
run_suite(suite())
|
run_suite(suite())
|
||||||
|
|
|
@ -29,7 +29,7 @@ class TestMacfs(unittest.TestCase):
|
||||||
def test_fsref(self):
|
def test_fsref(self):
|
||||||
fsr = macfs.FSRef(test_support.TESTFN)
|
fsr = macfs.FSRef(test_support.TESTFN)
|
||||||
self.assertEqual(os.path.realpath(test_support.TESTFN), fsr.as_pathname())
|
self.assertEqual(os.path.realpath(test_support.TESTFN), fsr.as_pathname())
|
||||||
|
|
||||||
def test_fsref_unicode(self):
|
def test_fsref_unicode(self):
|
||||||
if sys.getfilesystemencoding():
|
if sys.getfilesystemencoding():
|
||||||
testfn_unicode = unicode(test_support.TESTFN)
|
testfn_unicode = unicode(test_support.TESTFN)
|
||||||
|
|
|
@ -92,4 +92,3 @@ def test_main():
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
test_main()
|
test_main()
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ from test import test_support
|
||||||
import aetools
|
import aetools
|
||||||
|
|
||||||
class TestScriptpackages(unittest.TestCase):
|
class TestScriptpackages(unittest.TestCase):
|
||||||
|
|
||||||
def _test_scriptpackage(self, package, testobject=1):
|
def _test_scriptpackage(self, package, testobject=1):
|
||||||
# Check that we can import the package
|
# Check that we can import the package
|
||||||
mod = __import__(package)
|
mod = __import__(package)
|
||||||
|
@ -19,28 +19,28 @@ class TestScriptpackages(unittest.TestCase):
|
||||||
if testobject:
|
if testobject:
|
||||||
# Test that we can get an application object
|
# Test that we can get an application object
|
||||||
obj = mod.application(0)
|
obj = mod.application(0)
|
||||||
|
|
||||||
def test__builtinSuites(self):
|
def test__builtinSuites(self):
|
||||||
self._test_scriptpackage('_builtinSuites', testobject=0)
|
self._test_scriptpackage('_builtinSuites', testobject=0)
|
||||||
|
|
||||||
def test_StdSuites(self):
|
def test_StdSuites(self):
|
||||||
self._test_scriptpackage('StdSuites')
|
self._test_scriptpackage('StdSuites')
|
||||||
|
|
||||||
def test_SystemEvents(self):
|
def test_SystemEvents(self):
|
||||||
self._test_scriptpackage('SystemEvents')
|
self._test_scriptpackage('SystemEvents')
|
||||||
|
|
||||||
def test_Finder(self):
|
def test_Finder(self):
|
||||||
self._test_scriptpackage('Finder')
|
self._test_scriptpackage('Finder')
|
||||||
|
|
||||||
def test_Terminal(self):
|
def test_Terminal(self):
|
||||||
self._test_scriptpackage('Terminal')
|
self._test_scriptpackage('Terminal')
|
||||||
|
|
||||||
def test_Netscape(self):
|
def test_Netscape(self):
|
||||||
self._test_scriptpackage('Netscape')
|
self._test_scriptpackage('Netscape')
|
||||||
|
|
||||||
def test_Explorer(self):
|
def test_Explorer(self):
|
||||||
self._test_scriptpackage('Explorer')
|
self._test_scriptpackage('Explorer')
|
||||||
|
|
||||||
def test_CodeWarrior(self):
|
def test_CodeWarrior(self):
|
||||||
self._test_scriptpackage('CodeWarrior')
|
self._test_scriptpackage('CodeWarrior')
|
||||||
|
|
||||||
|
|
|
@ -166,10 +166,10 @@ class ShlexTest(unittest.TestCase):
|
||||||
ret.append(tok)
|
ret.append(tok)
|
||||||
tok = lex.get_token()
|
tok = lex.get_token()
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def testSplitPosix(self):
|
def testSplitPosix(self):
|
||||||
"""Test data splitting with posix parser"""
|
"""Test data splitting with posix parser"""
|
||||||
self.splitTest(self.posix_data, comments=True)
|
self.splitTest(self.posix_data, comments=True)
|
||||||
|
|
||||||
def testCompat(self):
|
def testCompat(self):
|
||||||
"""Test compatibility interface"""
|
"""Test compatibility interface"""
|
||||||
|
|
|
@ -63,4 +63,3 @@ try:
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
if verbose:
|
if verbose:
|
||||||
print "KeyboardInterrupt (assume the alarm() went off)"
|
print "KeyboardInterrupt (assume the alarm() went off)"
|
||||||
|
|
||||||
|
|
|
@ -86,5 +86,3 @@ verify(not in_table_d2(u"\u0040"))
|
||||||
# h = sha.sha()
|
# h = sha.sha()
|
||||||
# h.update(data)
|
# h.update(data)
|
||||||
# print p,h.hexdigest()
|
# print p,h.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -58,16 +58,16 @@ class TimeTestCase(unittest.TestCase):
|
||||||
|
|
||||||
from os import environ
|
from os import environ
|
||||||
|
|
||||||
# Epoch time of midnight Dec 25th 2002. Never DST in northern
|
# Epoch time of midnight Dec 25th 2002. Never DST in northern
|
||||||
# hemisphere.
|
# hemisphere.
|
||||||
xmas2002 = 1040774400.0
|
xmas2002 = 1040774400.0
|
||||||
|
|
||||||
# These formats are correct for 2002, and possibly future years
|
# These formats are correct for 2002, and possibly future years
|
||||||
# This format is the 'standard' as documented at:
|
# This format is the 'standard' as documented at:
|
||||||
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
|
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
|
||||||
# They are also documented in the tzset(3) man page on most Unix
|
# They are also documented in the tzset(3) man page on most Unix
|
||||||
# systems.
|
# systems.
|
||||||
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
|
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
|
||||||
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
|
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
|
||||||
utc='UTC+0'
|
utc='UTC+0'
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ class TimeTestCase(unittest.TestCase):
|
||||||
self.failUnlessEqual(
|
self.failUnlessEqual(
|
||||||
time.gmtime(xmas2002), time.localtime(xmas2002)
|
time.gmtime(xmas2002), time.localtime(xmas2002)
|
||||||
)
|
)
|
||||||
self.failUnlessEqual(time.daylight, 0)
|
self.failUnlessEqual(time.daylight, 0)
|
||||||
self.failUnlessEqual(time.timezone, 0)
|
self.failUnlessEqual(time.timezone, 0)
|
||||||
self.failUnlessEqual(time.localtime(xmas2002).tm_isdst, 0)
|
self.failUnlessEqual(time.localtime(xmas2002).tm_isdst, 0)
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ class TimeTestCase(unittest.TestCase):
|
||||||
elif environ.has_key('TZ'):
|
elif environ.has_key('TZ'):
|
||||||
del environ['TZ']
|
del environ['TZ']
|
||||||
time.tzset()
|
time.tzset()
|
||||||
|
|
||||||
|
|
||||||
def test_main():
|
def test_main():
|
||||||
test_support.run_unittest(TimeTestCase)
|
test_support.run_unittest(TimeTestCase)
|
||||||
|
|
|
@ -6,7 +6,7 @@ import os, glob
|
||||||
from test.test_support import verify, TestSkipped, TESTFN_UNICODE
|
from test.test_support import verify, TestSkipped, TESTFN_UNICODE
|
||||||
from test.test_support import TESTFN_ENCODING
|
from test.test_support import TESTFN_ENCODING
|
||||||
try:
|
try:
|
||||||
TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
|
TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
|
||||||
except (UnicodeError, TypeError):
|
except (UnicodeError, TypeError):
|
||||||
# Either the file system encoding is None, or the file name
|
# Either the file system encoding is None, or the file name
|
||||||
# cannot be encoded in the file system encoding.
|
# cannot be encoded in the file system encoding.
|
||||||
|
|
|
@ -268,7 +268,7 @@ class CoverageResults:
|
||||||
coverpath = os.path.join(dir, modulename + ".cover")
|
coverpath = os.path.join(dir, modulename + ".cover")
|
||||||
n_hits, n_lines = self.write_results_file(coverpath, source,
|
n_hits, n_lines = self.write_results_file(coverpath, source,
|
||||||
lnotab, count)
|
lnotab, count)
|
||||||
|
|
||||||
if summary and n_lines:
|
if summary and n_lines:
|
||||||
percent = int(100 * n_hits / n_lines)
|
percent = int(100 * n_hits / n_lines)
|
||||||
sums[modulename] = n_lines, percent, modulename, filename
|
sums[modulename] = n_lines, percent, modulename, filename
|
||||||
|
@ -467,7 +467,7 @@ class Trace:
|
||||||
|
|
||||||
def globaltrace_countfuncs(self, frame, why, arg):
|
def globaltrace_countfuncs(self, frame, why, arg):
|
||||||
"""Handler for call events.
|
"""Handler for call events.
|
||||||
|
|
||||||
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
|
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
|
||||||
"""
|
"""
|
||||||
if why == 'call':
|
if why == 'call':
|
||||||
|
|
|
@ -26,7 +26,7 @@ def registerDOMImplementation(name, factory):
|
||||||
interface. The factory function can either return the same object,
|
interface. The factory function can either return the same object,
|
||||||
or a new one (e.g. if that implementation supports some
|
or a new one (e.g. if that implementation supports some
|
||||||
customization)."""
|
customization)."""
|
||||||
|
|
||||||
registered[name] = factory
|
registered[name] = factory
|
||||||
|
|
||||||
def _good_enough(dom, features):
|
def _good_enough(dom, features):
|
||||||
|
@ -48,7 +48,7 @@ def getDOMImplementation(name = None, features = ()):
|
||||||
find one with the required feature set. If no implementation can
|
find one with the required feature set. If no implementation can
|
||||||
be found, raise an ImportError. The features list must be a sequence
|
be found, raise an ImportError. The features list must be a sequence
|
||||||
of (feature, version) pairs which are passed to hasFeature."""
|
of (feature, version) pairs which are passed to hasFeature."""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
creator = None
|
creator = None
|
||||||
mod = well_known_implementations.get(name)
|
mod = well_known_implementations.get(name)
|
||||||
|
|
|
@ -236,7 +236,7 @@ class DOMEventStream:
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def expandNode(self, node):
|
def expandNode(self, node):
|
||||||
event = self.getEvent()
|
event = self.getEvent()
|
||||||
parents = [node]
|
parents = [node]
|
||||||
|
|
|
@ -326,7 +326,7 @@ property_encoding = "http://www.python.org/sax/properties/encoding"
|
||||||
# processing a META tag)
|
# processing a META tag)
|
||||||
# read: return the current encoding (possibly established through
|
# read: return the current encoding (possibly established through
|
||||||
# auto-detection.
|
# auto-detection.
|
||||||
# initial value: UTF-8
|
# initial value: UTF-8
|
||||||
#
|
#
|
||||||
|
|
||||||
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
|
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
|
||||||
|
|
|
@ -20,7 +20,7 @@ def __dict_replace(s, d):
|
||||||
|
|
||||||
def escape(data, entities={}):
|
def escape(data, entities={}):
|
||||||
"""Escape &, <, and > in a string of data.
|
"""Escape &, <, and > in a string of data.
|
||||||
|
|
||||||
You can escape other strings of data by passing a dictionary as
|
You can escape other strings of data by passing a dictionary as
|
||||||
the optional entities parameter. The keys and values must all be
|
the optional entities parameter. The keys and values must all be
|
||||||
strings; each key will be replaced with its corresponding value.
|
strings; each key will be replaced with its corresponding value.
|
||||||
|
|
|
@ -103,7 +103,7 @@ def main(args):
|
||||||
sys.stderr.write("Unable to open %s. " % dbfile)
|
sys.stderr.write("Unable to open %s. " % dbfile)
|
||||||
sys.stderr.write("Check for format or version mismatch.\n")
|
sys.stderr.write("Check for format or version mismatch.\n")
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
for k in db.keys():
|
for k in db.keys():
|
||||||
pickle.dump((k, db[k]), pfile, 1==1)
|
pickle.dump((k, db[k]), pfile, 1==1)
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ except ImportError:
|
||||||
prog = sys.argv[0]
|
prog = sys.argv[0]
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
sys.stderr.write(__doc__ % globals())
|
sys.stderr.write(__doc__ % globals())
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -146,7 +146,7 @@ class Ignore:
|
||||||
|
|
||||||
class CoverageResults:
|
class CoverageResults:
|
||||||
def __init__(self, counts=None, calledfuncs=None, infile=None,
|
def __init__(self, counts=None, calledfuncs=None, infile=None,
|
||||||
outfile=None):
|
outfile=None):
|
||||||
self.counts = counts
|
self.counts = counts
|
||||||
if self.counts is None:
|
if self.counts is None:
|
||||||
self.counts = {}
|
self.counts = {}
|
||||||
|
@ -164,7 +164,7 @@ class CoverageResults:
|
||||||
if type(thingie) is types.DictType:
|
if type(thingie) is types.DictType:
|
||||||
# backwards compatibility for old trace.py after
|
# backwards compatibility for old trace.py after
|
||||||
# Zooko touched it but before calledfuncs --Zooko
|
# Zooko touched it but before calledfuncs --Zooko
|
||||||
# 2001-10-24
|
# 2001-10-24
|
||||||
self.update(self.__class__(thingie))
|
self.update(self.__class__(thingie))
|
||||||
elif type(thingie) is types.TupleType and len(thingie) == 2:
|
elif type(thingie) is types.TupleType and len(thingie) == 2:
|
||||||
counts, calledfuncs = thingie
|
counts, calledfuncs = thingie
|
||||||
|
@ -173,7 +173,7 @@ class CoverageResults:
|
||||||
pass
|
pass
|
||||||
except pickle.UnpicklingError:
|
except pickle.UnpicklingError:
|
||||||
# backwards compatibility for old trace.py before
|
# backwards compatibility for old trace.py before
|
||||||
# Zooko touched it --Zooko 2001-10-24
|
# Zooko touched it --Zooko 2001-10-24
|
||||||
self.update(self.__class__(marshal.load(open(self.infile))))
|
self.update(self.__class__(marshal.load(open(self.infile))))
|
||||||
|
|
||||||
def update(self, other):
|
def update(self, other):
|
||||||
|
@ -187,7 +187,7 @@ class CoverageResults:
|
||||||
if key != 'calledfuncs':
|
if key != 'calledfuncs':
|
||||||
# backwards compatibility for abortive attempt to
|
# backwards compatibility for abortive attempt to
|
||||||
# stuff calledfuncs into self.counts, by Zooko
|
# stuff calledfuncs into self.counts, by Zooko
|
||||||
# --Zooko 2001-10-24
|
# --Zooko 2001-10-24
|
||||||
counts[key] = counts.get(key, 0) + other_counts[key]
|
counts[key] = counts.get(key, 0) + other_counts[key]
|
||||||
|
|
||||||
for key in other_calledfuncs.keys():
|
for key in other_calledfuncs.keys():
|
||||||
|
@ -406,18 +406,18 @@ class Trace:
|
||||||
ignoredirs=(), infile=None, outfile=None):
|
ignoredirs=(), infile=None, outfile=None):
|
||||||
"""
|
"""
|
||||||
@param count true iff it should count number of times each
|
@param count true iff it should count number of times each
|
||||||
line is executed
|
line is executed
|
||||||
@param trace true iff it should print out each line that is
|
@param trace true iff it should print out each line that is
|
||||||
being counted
|
being counted
|
||||||
@param countfuncs true iff it should just output a list of
|
@param countfuncs true iff it should just output a list of
|
||||||
(filename, modulename, funcname,) for functions
|
(filename, modulename, funcname,) for functions
|
||||||
that were called at least once; This overrides
|
that were called at least once; This overrides
|
||||||
`count' and `trace'
|
`count' and `trace'
|
||||||
@param ignoremods a list of the names of modules to ignore
|
@param ignoremods a list of the names of modules to ignore
|
||||||
@param ignoredirs a list of the names of directories to ignore
|
@param ignoredirs a list of the names of directories to ignore
|
||||||
all of the (recursive) contents of
|
all of the (recursive) contents of
|
||||||
@param infile file from which to read stored counts to be
|
@param infile file from which to read stored counts to be
|
||||||
added into the results
|
added into the results
|
||||||
@param outfile file in which to write the results
|
@param outfile file in which to write the results
|
||||||
"""
|
"""
|
||||||
self.infile = infile
|
self.infile = infile
|
||||||
|
@ -516,19 +516,19 @@ class Trace:
|
||||||
# XXX I wish inspect offered me an optimized
|
# XXX I wish inspect offered me an optimized
|
||||||
# `getfilename(frame)' to use in place of the presumably
|
# `getfilename(frame)' to use in place of the presumably
|
||||||
# heavier `getframeinfo()'. --Zooko 2001-10-14
|
# heavier `getframeinfo()'. --Zooko 2001-10-14
|
||||||
|
|
||||||
filename, lineno, funcname, context, lineindex = \
|
filename, lineno, funcname, context, lineindex = \
|
||||||
inspect.getframeinfo(frame, 1)
|
inspect.getframeinfo(frame, 1)
|
||||||
key = filename, lineno
|
key = filename, lineno
|
||||||
self.counts[key] = self.counts.get(key, 0) + 1
|
self.counts[key] = self.counts.get(key, 0) + 1
|
||||||
|
|
||||||
# XXX not convinced that this memoizing is a performance
|
# XXX not convinced that this memoizing is a performance
|
||||||
# win -- I don't know enough about Python guts to tell.
|
# win -- I don't know enough about Python guts to tell.
|
||||||
# --Zooko 2001-10-14
|
# --Zooko 2001-10-14
|
||||||
|
|
||||||
bname = self.pathtobasename.get(filename)
|
bname = self.pathtobasename.get(filename)
|
||||||
if bname is None:
|
if bname is None:
|
||||||
|
|
||||||
# Using setdefault faster than two separate lines?
|
# Using setdefault faster than two separate lines?
|
||||||
# --Zooko 2001-10-14
|
# --Zooko 2001-10-14
|
||||||
bname = self.pathtobasename.setdefault(filename,
|
bname = self.pathtobasename.setdefault(filename,
|
||||||
|
@ -553,7 +553,7 @@ class Trace:
|
||||||
# heavier `getframeinfo()'. --Zooko 2001-10-14
|
# heavier `getframeinfo()'. --Zooko 2001-10-14
|
||||||
filename, lineno, funcname, context, lineindex = \
|
filename, lineno, funcname, context, lineindex = \
|
||||||
inspect.getframeinfo(frame)
|
inspect.getframeinfo(frame)
|
||||||
|
|
||||||
# XXX not convinced that this memoizing is a performance
|
# XXX not convinced that this memoizing is a performance
|
||||||
# win -- I don't know enough about Python guts to tell.
|
# win -- I don't know enough about Python guts to tell.
|
||||||
# --Zooko 2001-10-14
|
# --Zooko 2001-10-14
|
||||||
|
|
Loading…
Reference in New Issue