Whitespace normalization.

This commit is contained in:
Tim Peters 2001-01-14 23:47:14 +00:00
parent 88869f9787
commit 07e99cb774
18 changed files with 1933 additions and 1936 deletions

View File

@ -30,7 +30,7 @@ class MimeWriter:
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>

View File

@ -14,106 +14,105 @@ On the Mac EasyDialogs.AskPassword is used, if available.
import sys
def unix_getpass(prompt='Password: '):
"""Prompt for a password, with echo turned off.
"""Prompt for a password, with echo turned off.
Restore terminal settings at end.
"""
Restore terminal settings at end.
"""
try:
fd = sys.stdin.fileno()
except:
return default_getpass(prompt)
try:
fd = sys.stdin.fileno()
except:
return default_getpass(prompt)
getpass = default_getpass
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
getpass = default_getpass
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
new[3] = new[3] & ~TERMIOS.ECHO # 3 == 'lflags'
try:
termios.tcsetattr(fd, TERMIOS.TCSADRAIN, new)
passwd = _raw_input(prompt)
finally:
termios.tcsetattr(fd, TERMIOS.TCSADRAIN, old)
new[3] = new[3] & ~TERMIOS.ECHO # 3 == 'lflags'
try:
termios.tcsetattr(fd, TERMIOS.TCSADRAIN, new)
passwd = _raw_input(prompt)
finally:
termios.tcsetattr(fd, TERMIOS.TCSADRAIN, old)
sys.stdout.write('\n')
return passwd
sys.stdout.write('\n')
return passwd
def win_getpass(prompt='Password: '):
"""Prompt for password with echo off, using Windows getch()."""
import msvcrt
for c in prompt:
msvcrt.putch(c)
pw = ""
while 1:
c = msvcrt.getch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
pw = pw[:-1]
else:
pw = pw + c
msvcrt.putch('\r')
msvcrt.putch('\n')
return pw
"""Prompt for password with echo off, using Windows getch()."""
import msvcrt
for c in prompt:
msvcrt.putch(c)
pw = ""
while 1:
c = msvcrt.getch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
pw = pw[:-1]
else:
pw = pw + c
msvcrt.putch('\r')
msvcrt.putch('\n')
return pw
def default_getpass(prompt='Password: '):
print "Warning: Problem with getpass. Passwords may be echoed."
return _raw_input(prompt)
print "Warning: Problem with getpass. Passwords may be echoed."
return _raw_input(prompt)
def _raw_input(prompt=""):
# A raw_input() replacement that doesn't save the string in the
# GNU readline history.
import sys
prompt = str(prompt)
if prompt:
sys.stdout.write(prompt)
line = sys.stdin.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
# A raw_input() replacement that doesn't save the string in the
# GNU readline history.
import sys
prompt = str(prompt)
if prompt:
sys.stdout.write(prompt)
line = sys.stdin.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
def getuser():
"""Get the username from the environment or password database.
"""Get the username from the environment or password database.
First try various environment variables, then the password
database. This works on Windows as long as USERNAME is set.
First try various environment variables, then the password
database. This works on Windows as long as USERNAME is set.
"""
"""
import os
import os
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
# If this fails, the exception will "explain" why
import pwd
return pwd.getpwuid(os.getuid())[0]
# If this fails, the exception will "explain" why
import pwd
return pwd.getpwuid(os.getuid())[0]
# Bind the name getpass to the appropriate function
try:
import termios, TERMIOS
import termios, TERMIOS
except ImportError:
try:
import msvcrt
except ImportError:
try:
from EasyDialogs import AskPassword
except ImportError:
getpass = default_getpass
else:
getpass = AskPassword
else:
getpass = win_getpass
try:
import msvcrt
except ImportError:
try:
from EasyDialogs import AskPassword
except ImportError:
getpass = default_getpass
else:
getpass = AskPassword
else:
getpass = win_getpass
else:
getpass = unix_getpass
getpass = unix_getpass

View File

@ -51,7 +51,7 @@ from errno import ENOENT
_default_localedir = os.path.join(sys.prefix, 'share', 'locale')
def _expand_lang(locale):
from locale import normalize
locale = normalize(locale)
@ -94,7 +94,7 @@ def _expand_lang(locale):
return ret
class NullTranslations:
def __init__(self, fp=None):
self._info = {}
@ -192,7 +192,7 @@ class GNUTranslations(NullTranslations):
return unicode(tmsg, self._charset)
# Locate a .mo file using the gettext strategy
def find(domain, localedir=None, languages=None):
# Get some reasonable defaults for arguments that were not supplied
@ -223,7 +223,7 @@ def find(domain, localedir=None, languages=None):
return None
# a mapping between absolute .mo file path and Translation object
_translations = {}
@ -243,12 +243,12 @@ def translation(domain, localedir=None, languages=None, class_=None):
return t
def install(domain, localedir=None, unicode=0):
translation(domain, localedir).install(unicode)
# a mapping b/w domains and locale directories
_localedirs = {}
# current global domain, `messages' used for compatibility w/ GNU gettext
@ -275,7 +275,7 @@ def dgettext(domain, message):
except IOError:
return message
return t.gettext(message)
def gettext(message):
return dgettext(_current_domain, message)

View File

@ -6,51 +6,51 @@ import re
def glob(pathname):
"""Return a list of paths matching a pathname pattern.
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
if not has_magic(pathname):
if os.path.exists(pathname):
return [pathname]
else:
return []
dirname, basename = os.path.split(pathname)
if has_magic(dirname):
list = glob(dirname)
else:
list = [dirname]
if not has_magic(basename):
result = []
for dirname in list:
if basename or os.path.isdir(dirname):
name = os.path.join(dirname, basename)
if os.path.exists(name):
result.append(name)
else:
result = []
for dirname in list:
sublist = glob1(dirname, basename)
for name in sublist:
result.append(os.path.join(dirname, name))
return result
"""
if not has_magic(pathname):
if os.path.exists(pathname):
return [pathname]
else:
return []
dirname, basename = os.path.split(pathname)
if has_magic(dirname):
list = glob(dirname)
else:
list = [dirname]
if not has_magic(basename):
result = []
for dirname in list:
if basename or os.path.isdir(dirname):
name = os.path.join(dirname, basename)
if os.path.exists(name):
result.append(name)
else:
result = []
for dirname in list:
sublist = glob1(dirname, basename)
for name in sublist:
result.append(os.path.join(dirname, name))
return result
def glob1(dirname, pattern):
if not dirname: dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return []
result = []
for name in names:
if name[0] != '.' or pattern[0] == '.':
if fnmatch.fnmatch(name, pattern):
result.append(name)
return result
if not dirname: dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return []
result = []
for name in names:
if name[0] != '.' or pattern[0] == '.':
if fnmatch.fnmatch(name, pattern):
result.append(name)
return result
magic_check = re.compile('[*?[]')
def has_magic(s):
return magic_check.search(s) is not None
return magic_check.search(s) is not None

View File

@ -15,7 +15,7 @@ READ, WRITE = 1, 2
def write32(output, value):
output.write(struct.pack("<l", value))
def write32u(output, value):
output.write(struct.pack("<L", value))
@ -29,7 +29,7 @@ class GzipFile:
myfileobj = None
def __init__(self, filename=None, mode=None,
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
@ -42,8 +42,8 @@ class GzipFile:
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = 1
# Set flag indicating start of a new member
self._new_member = 1
self.extrabuf = ""
self.extrasize = 0
self.filename = filename
@ -52,7 +52,7 @@ class GzipFile:
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
@ -110,7 +110,7 @@ class GzipFile:
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen=ord(self.fileobj.read(1))
xlen=ord(self.fileobj.read(1))
xlen=xlen+256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
@ -158,7 +158,7 @@ class GzipFile:
except EOFError:
if size > self.extrasize:
size = self.extrasize
chunk = self.extrabuf[:size]
self.extrabuf = self.extrabuf[size:]
self.extrasize = self.extrasize - size
@ -171,11 +171,11 @@ class GzipFile:
def _read(self, size=1024):
if self.fileobj is None: raise EOFError, "Reached EOF"
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
#
#
# First, check if we're at the end of the file;
# if so, it's time to stop; no more members to read.
pos = self.fileobj.tell() # Save current position
@ -183,27 +183,27 @@ class GzipFile:
if pos == self.fileobj.tell():
self.fileobj = None
raise EOFError, "Reached EOF"
else:
else:
self.fileobj.seek( pos ) # Return to original position
self._init_read()
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = 0
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == "":
uncompress = self.decompress.flush()
self._read_eof()
self.fileobj = None
self._add_read_data( uncompress )
raise EOFError, 'Reached EOF'
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
@ -216,11 +216,11 @@ class GzipFile:
self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
# a new member on the next call
self._read_eof()
self._new_member = 1
def _add_read_data(self, data):
self._new_member = 1
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc)
self.extrabuf = self.extrabuf + data
self.extrasize = self.extrasize + len(data)
@ -228,7 +228,7 @@ class GzipFile:
def _read_eof(self):
# We've read to the end of the file, so we have to rewind in order
# to reread the 8 bytes containing the CRC and the file size.
# to reread the 8 bytes containing the CRC and the file size.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values.
self.fileobj.seek(-8, 1)
@ -238,7 +238,7 @@ class GzipFile:
raise ValueError, "CRC check failed"
elif isize != self.size:
raise ValueError, "Incorrect length of data produced"
def close(self):
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
@ -259,7 +259,7 @@ class GzipFile:
except AttributeError:
return
self.close()
def flush(self):
self.fileobj.flush()
@ -285,7 +285,7 @@ class GzipFile:
i = string.find(c, '\n')
if size is not None:
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if i==-1 and len(c) > size: i=size-1
@ -300,7 +300,7 @@ class GzipFile:
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
def readlines(self, sizehint=0):
# Negative numbers result in reading all the lines
if sizehint <= 0: sizehint = sys.maxint

View File

@ -1,257 +1,257 @@
"""HTML character entity references."""
entitydefs = {
'AElig': '\306', # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': '\301', # latin capital letter A with acute, U+00C1 ISOlat1
'Acirc': '\302', # latin capital letter A with circumflex, U+00C2 ISOlat1
'Agrave': '\300', # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
'Alpha': '&#913;', # greek capital letter alpha, U+0391
'Aring': '\305', # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
'Atilde': '\303', # latin capital letter A with tilde, U+00C3 ISOlat1
'Auml': '\304', # latin capital letter A with diaeresis, U+00C4 ISOlat1
'Beta': '&#914;', # greek capital letter beta, U+0392
'Ccedil': '\307', # latin capital letter C with cedilla, U+00C7 ISOlat1
'Chi': '&#935;', # greek capital letter chi, U+03A7
'Dagger': '&#8225;', # double dagger, U+2021 ISOpub
'Delta': '&#916;', # greek capital letter delta, U+0394 ISOgrk3
'ETH': '\320', # latin capital letter ETH, U+00D0 ISOlat1
'Eacute': '\311', # latin capital letter E with acute, U+00C9 ISOlat1
'Ecirc': '\312', # latin capital letter E with circumflex, U+00CA ISOlat1
'Egrave': '\310', # latin capital letter E with grave, U+00C8 ISOlat1
'Epsilon': '&#917;', # greek capital letter epsilon, U+0395
'Eta': '&#919;', # greek capital letter eta, U+0397
'Euml': '\313', # latin capital letter E with diaeresis, U+00CB ISOlat1
'Gamma': '&#915;', # greek capital letter gamma, U+0393 ISOgrk3
'Iacute': '\315', # latin capital letter I with acute, U+00CD ISOlat1
'Icirc': '\316', # latin capital letter I with circumflex, U+00CE ISOlat1
'Igrave': '\314', # latin capital letter I with grave, U+00CC ISOlat1
'Iota': '&#921;', # greek capital letter iota, U+0399
'Iuml': '\317', # latin capital letter I with diaeresis, U+00CF ISOlat1
'Kappa': '&#922;', # greek capital letter kappa, U+039A
'Lambda': '&#923;', # greek capital letter lambda, U+039B ISOgrk3
'Mu': '&#924;', # greek capital letter mu, U+039C
'Ntilde': '\321', # latin capital letter N with tilde, U+00D1 ISOlat1
'Nu': '&#925;', # greek capital letter nu, U+039D
'OElig': '&#338;', # latin capital ligature OE, U+0152 ISOlat2
'Oacute': '\323', # latin capital letter O with acute, U+00D3 ISOlat1
'Ocirc': '\324', # latin capital letter O with circumflex, U+00D4 ISOlat1
'Ograve': '\322', # latin capital letter O with grave, U+00D2 ISOlat1
'Omega': '&#937;', # greek capital letter omega, U+03A9 ISOgrk3
'Omicron': '&#927;', # greek capital letter omicron, U+039F
'Oslash': '\330', # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
'Otilde': '\325', # latin capital letter O with tilde, U+00D5 ISOlat1
'Ouml': '\326', # latin capital letter O with diaeresis, U+00D6 ISOlat1
'Phi': '&#934;', # greek capital letter phi, U+03A6 ISOgrk3
'Pi': '&#928;', # greek capital letter pi, U+03A0 ISOgrk3
'Prime': '&#8243;', # double prime = seconds = inches, U+2033 ISOtech
'Psi': '&#936;', # greek capital letter psi, U+03A8 ISOgrk3
'Rho': '&#929;', # greek capital letter rho, U+03A1
'Scaron': '&#352;', # latin capital letter S with caron, U+0160 ISOlat2
'Sigma': '&#931;', # greek capital letter sigma, U+03A3 ISOgrk3
'THORN': '\336', # latin capital letter THORN, U+00DE ISOlat1
'Tau': '&#932;', # greek capital letter tau, U+03A4
'Theta': '&#920;', # greek capital letter theta, U+0398 ISOgrk3
'Uacute': '\332', # latin capital letter U with acute, U+00DA ISOlat1
'Ucirc': '\333', # latin capital letter U with circumflex, U+00DB ISOlat1
'Ugrave': '\331', # latin capital letter U with grave, U+00D9 ISOlat1
'Upsilon': '&#933;', # greek capital letter upsilon, U+03A5 ISOgrk3
'Uuml': '\334', # latin capital letter U with diaeresis, U+00DC ISOlat1
'Xi': '&#926;', # greek capital letter xi, U+039E ISOgrk3
'Yacute': '\335', # latin capital letter Y with acute, U+00DD ISOlat1
'Yuml': '&#376;', # latin capital letter Y with diaeresis, U+0178 ISOlat2
'Zeta': '&#918;', # greek capital letter zeta, U+0396
'aacute': '\341', # latin small letter a with acute, U+00E1 ISOlat1
'acirc': '\342', # latin small letter a with circumflex, U+00E2 ISOlat1
'acute': '\264', # acute accent = spacing acute, U+00B4 ISOdia
'aelig': '\346', # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
'agrave': '\340', # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
'alefsym': '&#8501;', # alef symbol = first transfinite cardinal, U+2135 NEW
'alpha': '&#945;', # greek small letter alpha, U+03B1 ISOgrk3
'amp': '\46', # ampersand, U+0026 ISOnum
'and': '&#8743;', # logical and = wedge, U+2227 ISOtech
'ang': '&#8736;', # angle, U+2220 ISOamso
'aring': '\345', # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
'asymp': '&#8776;', # almost equal to = asymptotic to, U+2248 ISOamsr
'atilde': '\343', # latin small letter a with tilde, U+00E3 ISOlat1
'auml': '\344', # latin small letter a with diaeresis, U+00E4 ISOlat1
'bdquo': '&#8222;', # double low-9 quotation mark, U+201E NEW
'beta': '&#946;', # greek small letter beta, U+03B2 ISOgrk3
'brvbar': '\246', # broken bar = broken vertical bar, U+00A6 ISOnum
'bull': '&#8226;', # bullet = black small circle, U+2022 ISOpub
'cap': '&#8745;', # intersection = cap, U+2229 ISOtech
'ccedil': '\347', # latin small letter c with cedilla, U+00E7 ISOlat1
'cedil': '\270', # cedilla = spacing cedilla, U+00B8 ISOdia
'cent': '\242', # cent sign, U+00A2 ISOnum
'chi': '&#967;', # greek small letter chi, U+03C7 ISOgrk3
'circ': '&#710;', # modifier letter circumflex accent, U+02C6 ISOpub
'clubs': '&#9827;', # black club suit = shamrock, U+2663 ISOpub
'cong': '&#8773;', # approximately equal to, U+2245 ISOtech
'copy': '\251', # copyright sign, U+00A9 ISOnum
'crarr': '&#8629;', # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
'cup': '&#8746;', # union = cup, U+222A ISOtech
'curren': '\244', # currency sign, U+00A4 ISOnum
'dArr': '&#8659;', # downwards double arrow, U+21D3 ISOamsa
'dagger': '&#8224;', # dagger, U+2020 ISOpub
'darr': '&#8595;', # downwards arrow, U+2193 ISOnum
'deg': '\260', # degree sign, U+00B0 ISOnum
'delta': '&#948;', # greek small letter delta, U+03B4 ISOgrk3
'diams': '&#9830;', # black diamond suit, U+2666 ISOpub
'divide': '\367', # division sign, U+00F7 ISOnum
'eacute': '\351', # latin small letter e with acute, U+00E9 ISOlat1
'ecirc': '\352', # latin small letter e with circumflex, U+00EA ISOlat1
'egrave': '\350', # latin small letter e with grave, U+00E8 ISOlat1
'empty': '&#8709;', # empty set = null set = diameter, U+2205 ISOamso
'emsp': '&#8195;', # em space, U+2003 ISOpub
'ensp': '&#8194;', # en space, U+2002 ISOpub
'epsilon': '&#949;', # greek small letter epsilon, U+03B5 ISOgrk3
'equiv': '&#8801;', # identical to, U+2261 ISOtech
'eta': '&#951;', # greek small letter eta, U+03B7 ISOgrk3
'eth': '\360', # latin small letter eth, U+00F0 ISOlat1
'euml': '\353', # latin small letter e with diaeresis, U+00EB ISOlat1
'euro': '&#8364;', # euro sign, U+20AC NEW
'exist': '&#8707;', # there exists, U+2203 ISOtech
'fnof': '&#402;', # latin small f with hook = function = florin, U+0192 ISOtech
'forall': '&#8704;', # for all, U+2200 ISOtech
'frac12': '\275', # vulgar fraction one half = fraction one half, U+00BD ISOnum
'frac14': '\274', # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
'frac34': '\276', # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
'frasl': '&#8260;', # fraction slash, U+2044 NEW
'gamma': '&#947;', # greek small letter gamma, U+03B3 ISOgrk3
'ge': '&#8805;', # greater-than or equal to, U+2265 ISOtech
'gt': '\76', # greater-than sign, U+003E ISOnum
'hArr': '&#8660;', # left right double arrow, U+21D4 ISOamsa
'harr': '&#8596;', # left right arrow, U+2194 ISOamsa
'hearts': '&#9829;', # black heart suit = valentine, U+2665 ISOpub
'hellip': '&#8230;', # horizontal ellipsis = three dot leader, U+2026 ISOpub
'iacute': '\355', # latin small letter i with acute, U+00ED ISOlat1
'icirc': '\356', # latin small letter i with circumflex, U+00EE ISOlat1
'iexcl': '\241', # inverted exclamation mark, U+00A1 ISOnum
'igrave': '\354', # latin small letter i with grave, U+00EC ISOlat1
'image': '&#8465;', # blackletter capital I = imaginary part, U+2111 ISOamso
'infin': '&#8734;', # infinity, U+221E ISOtech
'int': '&#8747;', # integral, U+222B ISOtech
'iota': '&#953;', # greek small letter iota, U+03B9 ISOgrk3
'iquest': '\277', # inverted question mark = turned question mark, U+00BF ISOnum
'isin': '&#8712;', # element of, U+2208 ISOtech
'iuml': '\357', # latin small letter i with diaeresis, U+00EF ISOlat1
'kappa': '&#954;', # greek small letter kappa, U+03BA ISOgrk3
'lArr': '&#8656;', # leftwards double arrow, U+21D0 ISOtech
'lambda': '&#955;', # greek small letter lambda, U+03BB ISOgrk3
'lang': '&#9001;', # left-pointing angle bracket = bra, U+2329 ISOtech
'laquo': '\253', # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
'larr': '&#8592;', # leftwards arrow, U+2190 ISOnum
'lceil': '&#8968;', # left ceiling = apl upstile, U+2308 ISOamsc
'ldquo': '&#8220;', # left double quotation mark, U+201C ISOnum
'le': '&#8804;', # less-than or equal to, U+2264 ISOtech
'lfloor': '&#8970;', # left floor = apl downstile, U+230A ISOamsc
'lowast': '&#8727;', # asterisk operator, U+2217 ISOtech
'loz': '&#9674;', # lozenge, U+25CA ISOpub
'lrm': '&#8206;', # left-to-right mark, U+200E NEW RFC 2070
'lsaquo': '&#8249;', # single left-pointing angle quotation mark, U+2039 ISO proposed
'lsquo': '&#8216;', # left single quotation mark, U+2018 ISOnum
'lt': '\74', # less-than sign, U+003C ISOnum
'macr': '\257', # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
'mdash': '&#8212;', # em dash, U+2014 ISOpub
'micro': '\265', # micro sign, U+00B5 ISOnum
'middot': '\267', # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
'minus': '&#8722;', # minus sign, U+2212 ISOtech
'mu': '&#956;', # greek small letter mu, U+03BC ISOgrk3
'nabla': '&#8711;', # nabla = backward difference, U+2207 ISOtech
'nbsp': '\240', # no-break space = non-breaking space, U+00A0 ISOnum
'ndash': '&#8211;', # en dash, U+2013 ISOpub
'ne': '&#8800;', # not equal to, U+2260 ISOtech
'ni': '&#8715;', # contains as member, U+220B ISOtech
'not': '\254', # not sign, U+00AC ISOnum
'notin': '&#8713;', # not an element of, U+2209 ISOtech
'nsub': '&#8836;', # not a subset of, U+2284 ISOamsn
'ntilde': '\361', # latin small letter n with tilde, U+00F1 ISOlat1
'nu': '&#957;', # greek small letter nu, U+03BD ISOgrk3
'oacute': '\363', # latin small letter o with acute, U+00F3 ISOlat1
'ocirc': '\364', # latin small letter o with circumflex, U+00F4 ISOlat1
'oelig': '&#339;', # latin small ligature oe, U+0153 ISOlat2
'ograve': '\362', # latin small letter o with grave, U+00F2 ISOlat1
'oline': '&#8254;', # overline = spacing overscore, U+203E NEW
'omega': '&#969;', # greek small letter omega, U+03C9 ISOgrk3
'omicron': '&#959;', # greek small letter omicron, U+03BF NEW
'oplus': '&#8853;', # circled plus = direct sum, U+2295 ISOamsb
'or': '&#8744;', # logical or = vee, U+2228 ISOtech
'ordf': '\252', # feminine ordinal indicator, U+00AA ISOnum
'ordm': '\272', # masculine ordinal indicator, U+00BA ISOnum
'oslash': '\370', # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
'otilde': '\365', # latin small letter o with tilde, U+00F5 ISOlat1
'otimes': '&#8855;', # circled times = vector product, U+2297 ISOamsb
'ouml': '\366', # latin small letter o with diaeresis, U+00F6 ISOlat1
'para': '\266', # pilcrow sign = paragraph sign, U+00B6 ISOnum
'part': '&#8706;', # partial differential, U+2202 ISOtech
'permil': '&#8240;', # per mille sign, U+2030 ISOtech
'perp': '&#8869;', # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
'phi': '&#966;', # greek small letter phi, U+03C6 ISOgrk3
'pi': '&#960;', # greek small letter pi, U+03C0 ISOgrk3
'piv': '&#982;', # greek pi symbol, U+03D6 ISOgrk3
'plusmn': '\261', # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
'pound': '\243', # pound sign, U+00A3 ISOnum
'prime': '&#8242;', # prime = minutes = feet, U+2032 ISOtech
'prod': '&#8719;', # n-ary product = product sign, U+220F ISOamsb
'prop': '&#8733;', # proportional to, U+221D ISOtech
'psi': '&#968;', # greek small letter psi, U+03C8 ISOgrk3
'quot': '\42', # quotation mark = APL quote, U+0022 ISOnum
'rArr': '&#8658;', # rightwards double arrow, U+21D2 ISOtech
'radic': '&#8730;', # square root = radical sign, U+221A ISOtech
'rang': '&#9002;', # right-pointing angle bracket = ket, U+232A ISOtech
'raquo': '\273', # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
'rarr': '&#8594;', # rightwards arrow, U+2192 ISOnum
'rceil': '&#8969;', # right ceiling, U+2309 ISOamsc
'rdquo': '&#8221;', # right double quotation mark, U+201D ISOnum
'real': '&#8476;', # blackletter capital R = real part symbol, U+211C ISOamso
'reg': '\256', # registered sign = registered trade mark sign, U+00AE ISOnum
'rfloor': '&#8971;', # right floor, U+230B ISOamsc
'rho': '&#961;', # greek small letter rho, U+03C1 ISOgrk3
'rlm': '&#8207;', # right-to-left mark, U+200F NEW RFC 2070
'rsaquo': '&#8250;', # single right-pointing angle quotation mark, U+203A ISO proposed
'rsquo': '&#8217;', # right single quotation mark, U+2019 ISOnum
'sbquo': '&#8218;', # single low-9 quotation mark, U+201A NEW
'scaron': '&#353;', # latin small letter s with caron, U+0161 ISOlat2
'sdot': '&#8901;', # dot operator, U+22C5 ISOamsb
'sect': '\247', # section sign, U+00A7 ISOnum
'shy': '\255', # soft hyphen = discretionary hyphen, U+00AD ISOnum
'sigma': '&#963;', # greek small letter sigma, U+03C3 ISOgrk3
'sigmaf': '&#962;', # greek small letter final sigma, U+03C2 ISOgrk3
'sim': '&#8764;', # tilde operator = varies with = similar to, U+223C ISOtech
'spades': '&#9824;', # black spade suit, U+2660 ISOpub
'sub': '&#8834;', # subset of, U+2282 ISOtech
'sube': '&#8838;', # subset of or equal to, U+2286 ISOtech
'sum': '&#8721;', # n-ary sumation, U+2211 ISOamsb
'sup': '&#8835;', # superset of, U+2283 ISOtech
'sup1': '\271', # superscript one = superscript digit one, U+00B9 ISOnum
'sup2': '\262', # superscript two = superscript digit two = squared, U+00B2 ISOnum
'sup3': '\263', # superscript three = superscript digit three = cubed, U+00B3 ISOnum
'supe': '&#8839;', # superset of or equal to, U+2287 ISOtech
'szlig': '\337', # latin small letter sharp s = ess-zed, U+00DF ISOlat1
'tau': '&#964;', # greek small letter tau, U+03C4 ISOgrk3
'there4': '&#8756;', # therefore, U+2234 ISOtech
'theta': '&#952;', # greek small letter theta, U+03B8 ISOgrk3
'thetasym': '&#977;', # greek small letter theta symbol, U+03D1 NEW
'thinsp': '&#8201;', # thin space, U+2009 ISOpub
'thorn': '\376', # latin small letter thorn with, U+00FE ISOlat1
'tilde': '&#732;', # small tilde, U+02DC ISOdia
'times': '\327', # multiplication sign, U+00D7 ISOnum
'trade': '&#8482;', # trade mark sign, U+2122 ISOnum
'uArr': '&#8657;', # upwards double arrow, U+21D1 ISOamsa
'uacute': '\372', # latin small letter u with acute, U+00FA ISOlat1
'uarr': '&#8593;', # upwards arrow, U+2191 ISOnum
'ucirc': '\373', # latin small letter u with circumflex, U+00FB ISOlat1
'ugrave': '\371', # latin small letter u with grave, U+00F9 ISOlat1
'uml': '\250', # diaeresis = spacing diaeresis, U+00A8 ISOdia
'upsih': '&#978;', # greek upsilon with hook symbol, U+03D2 NEW
'upsilon': '&#965;', # greek small letter upsilon, U+03C5 ISOgrk3
'uuml': '\374', # latin small letter u with diaeresis, U+00FC ISOlat1
'weierp': '&#8472;', # script capital P = power set = Weierstrass p, U+2118 ISOamso
'xi': '&#958;', # greek small letter xi, U+03BE ISOgrk3
'yacute': '\375', # latin small letter y with acute, U+00FD ISOlat1
'yen': '\245', # yen sign = yuan sign, U+00A5 ISOnum
'yuml': '\377', # latin small letter y with diaeresis, U+00FF ISOlat1
'zeta': '&#950;', # greek small letter zeta, U+03B6 ISOgrk3
'zwj': '&#8205;', # zero width joiner, U+200D NEW RFC 2070
'zwnj': '&#8204;', # zero width non-joiner, U+200C NEW RFC 2070
'AElig': '\306', # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': '\301', # latin capital letter A with acute, U+00C1 ISOlat1
'Acirc': '\302', # latin capital letter A with circumflex, U+00C2 ISOlat1
'Agrave': '\300', # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
'Alpha': '&#913;', # greek capital letter alpha, U+0391
'Aring': '\305', # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
'Atilde': '\303', # latin capital letter A with tilde, U+00C3 ISOlat1
'Auml': '\304', # latin capital letter A with diaeresis, U+00C4 ISOlat1
'Beta': '&#914;', # greek capital letter beta, U+0392
'Ccedil': '\307', # latin capital letter C with cedilla, U+00C7 ISOlat1
'Chi': '&#935;', # greek capital letter chi, U+03A7
'Dagger': '&#8225;', # double dagger, U+2021 ISOpub
'Delta': '&#916;', # greek capital letter delta, U+0394 ISOgrk3
'ETH': '\320', # latin capital letter ETH, U+00D0 ISOlat1
'Eacute': '\311', # latin capital letter E with acute, U+00C9 ISOlat1
'Ecirc': '\312', # latin capital letter E with circumflex, U+00CA ISOlat1
'Egrave': '\310', # latin capital letter E with grave, U+00C8 ISOlat1
'Epsilon': '&#917;', # greek capital letter epsilon, U+0395
'Eta': '&#919;', # greek capital letter eta, U+0397
'Euml': '\313', # latin capital letter E with diaeresis, U+00CB ISOlat1
'Gamma': '&#915;', # greek capital letter gamma, U+0393 ISOgrk3
'Iacute': '\315', # latin capital letter I with acute, U+00CD ISOlat1
'Icirc': '\316', # latin capital letter I with circumflex, U+00CE ISOlat1
'Igrave': '\314', # latin capital letter I with grave, U+00CC ISOlat1
'Iota': '&#921;', # greek capital letter iota, U+0399
'Iuml': '\317', # latin capital letter I with diaeresis, U+00CF ISOlat1
'Kappa': '&#922;', # greek capital letter kappa, U+039A
'Lambda': '&#923;', # greek capital letter lambda, U+039B ISOgrk3
'Mu': '&#924;', # greek capital letter mu, U+039C
'Ntilde': '\321', # latin capital letter N with tilde, U+00D1 ISOlat1
'Nu': '&#925;', # greek capital letter nu, U+039D
'OElig': '&#338;', # latin capital ligature OE, U+0152 ISOlat2
'Oacute': '\323', # latin capital letter O with acute, U+00D3 ISOlat1
'Ocirc': '\324', # latin capital letter O with circumflex, U+00D4 ISOlat1
'Ograve': '\322', # latin capital letter O with grave, U+00D2 ISOlat1
'Omega': '&#937;', # greek capital letter omega, U+03A9 ISOgrk3
'Omicron': '&#927;', # greek capital letter omicron, U+039F
'Oslash': '\330', # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
'Otilde': '\325', # latin capital letter O with tilde, U+00D5 ISOlat1
'Ouml': '\326', # latin capital letter O with diaeresis, U+00D6 ISOlat1
'Phi': '&#934;', # greek capital letter phi, U+03A6 ISOgrk3
'Pi': '&#928;', # greek capital letter pi, U+03A0 ISOgrk3
'Prime': '&#8243;', # double prime = seconds = inches, U+2033 ISOtech
'Psi': '&#936;', # greek capital letter psi, U+03A8 ISOgrk3
'Rho': '&#929;', # greek capital letter rho, U+03A1
'Scaron': '&#352;', # latin capital letter S with caron, U+0160 ISOlat2
'Sigma': '&#931;', # greek capital letter sigma, U+03A3 ISOgrk3
'THORN': '\336', # latin capital letter THORN, U+00DE ISOlat1
'Tau': '&#932;', # greek capital letter tau, U+03A4
'Theta': '&#920;', # greek capital letter theta, U+0398 ISOgrk3
'Uacute': '\332', # latin capital letter U with acute, U+00DA ISOlat1
'Ucirc': '\333', # latin capital letter U with circumflex, U+00DB ISOlat1
'Ugrave': '\331', # latin capital letter U with grave, U+00D9 ISOlat1
'Upsilon': '&#933;', # greek capital letter upsilon, U+03A5 ISOgrk3
'Uuml': '\334', # latin capital letter U with diaeresis, U+00DC ISOlat1
'Xi': '&#926;', # greek capital letter xi, U+039E ISOgrk3
'Yacute': '\335', # latin capital letter Y with acute, U+00DD ISOlat1
'Yuml': '&#376;', # latin capital letter Y with diaeresis, U+0178 ISOlat2
'Zeta': '&#918;', # greek capital letter zeta, U+0396
'aacute': '\341', # latin small letter a with acute, U+00E1 ISOlat1
'acirc': '\342', # latin small letter a with circumflex, U+00E2 ISOlat1
'acute': '\264', # acute accent = spacing acute, U+00B4 ISOdia
'aelig': '\346', # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
'agrave': '\340', # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
'alefsym': '&#8501;', # alef symbol = first transfinite cardinal, U+2135 NEW
'alpha': '&#945;', # greek small letter alpha, U+03B1 ISOgrk3
'amp': '\46', # ampersand, U+0026 ISOnum
'and': '&#8743;', # logical and = wedge, U+2227 ISOtech
'ang': '&#8736;', # angle, U+2220 ISOamso
'aring': '\345', # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
'asymp': '&#8776;', # almost equal to = asymptotic to, U+2248 ISOamsr
'atilde': '\343', # latin small letter a with tilde, U+00E3 ISOlat1
'auml': '\344', # latin small letter a with diaeresis, U+00E4 ISOlat1
'bdquo': '&#8222;', # double low-9 quotation mark, U+201E NEW
'beta': '&#946;', # greek small letter beta, U+03B2 ISOgrk3
'brvbar': '\246', # broken bar = broken vertical bar, U+00A6 ISOnum
'bull': '&#8226;', # bullet = black small circle, U+2022 ISOpub
'cap': '&#8745;', # intersection = cap, U+2229 ISOtech
'ccedil': '\347', # latin small letter c with cedilla, U+00E7 ISOlat1
'cedil': '\270', # cedilla = spacing cedilla, U+00B8 ISOdia
'cent': '\242', # cent sign, U+00A2 ISOnum
'chi': '&#967;', # greek small letter chi, U+03C7 ISOgrk3
'circ': '&#710;', # modifier letter circumflex accent, U+02C6 ISOpub
'clubs': '&#9827;', # black club suit = shamrock, U+2663 ISOpub
'cong': '&#8773;', # approximately equal to, U+2245 ISOtech
'copy': '\251', # copyright sign, U+00A9 ISOnum
'crarr': '&#8629;', # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
'cup': '&#8746;', # union = cup, U+222A ISOtech
'curren': '\244', # currency sign, U+00A4 ISOnum
'dArr': '&#8659;', # downwards double arrow, U+21D3 ISOamsa
'dagger': '&#8224;', # dagger, U+2020 ISOpub
'darr': '&#8595;', # downwards arrow, U+2193 ISOnum
'deg': '\260', # degree sign, U+00B0 ISOnum
'delta': '&#948;', # greek small letter delta, U+03B4 ISOgrk3
'diams': '&#9830;', # black diamond suit, U+2666 ISOpub
'divide': '\367', # division sign, U+00F7 ISOnum
'eacute': '\351', # latin small letter e with acute, U+00E9 ISOlat1
'ecirc': '\352', # latin small letter e with circumflex, U+00EA ISOlat1
'egrave': '\350', # latin small letter e with grave, U+00E8 ISOlat1
'empty': '&#8709;', # empty set = null set = diameter, U+2205 ISOamso
'emsp': '&#8195;', # em space, U+2003 ISOpub
'ensp': '&#8194;', # en space, U+2002 ISOpub
'epsilon': '&#949;', # greek small letter epsilon, U+03B5 ISOgrk3
'equiv': '&#8801;', # identical to, U+2261 ISOtech
'eta': '&#951;', # greek small letter eta, U+03B7 ISOgrk3
'eth': '\360', # latin small letter eth, U+00F0 ISOlat1
'euml': '\353', # latin small letter e with diaeresis, U+00EB ISOlat1
'euro': '&#8364;', # euro sign, U+20AC NEW
'exist': '&#8707;', # there exists, U+2203 ISOtech
'fnof': '&#402;', # latin small f with hook = function = florin, U+0192 ISOtech
'forall': '&#8704;', # for all, U+2200 ISOtech
'frac12': '\275', # vulgar fraction one half = fraction one half, U+00BD ISOnum
'frac14': '\274', # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
'frac34': '\276', # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
'frasl': '&#8260;', # fraction slash, U+2044 NEW
'gamma': '&#947;', # greek small letter gamma, U+03B3 ISOgrk3
'ge': '&#8805;', # greater-than or equal to, U+2265 ISOtech
'gt': '\76', # greater-than sign, U+003E ISOnum
'hArr': '&#8660;', # left right double arrow, U+21D4 ISOamsa
'harr': '&#8596;', # left right arrow, U+2194 ISOamsa
'hearts': '&#9829;', # black heart suit = valentine, U+2665 ISOpub
'hellip': '&#8230;', # horizontal ellipsis = three dot leader, U+2026 ISOpub
'iacute': '\355', # latin small letter i with acute, U+00ED ISOlat1
'icirc': '\356', # latin small letter i with circumflex, U+00EE ISOlat1
'iexcl': '\241', # inverted exclamation mark, U+00A1 ISOnum
'igrave': '\354', # latin small letter i with grave, U+00EC ISOlat1
'image': '&#8465;', # blackletter capital I = imaginary part, U+2111 ISOamso
'infin': '&#8734;', # infinity, U+221E ISOtech
'int': '&#8747;', # integral, U+222B ISOtech
'iota': '&#953;', # greek small letter iota, U+03B9 ISOgrk3
'iquest': '\277', # inverted question mark = turned question mark, U+00BF ISOnum
'isin': '&#8712;', # element of, U+2208 ISOtech
'iuml': '\357', # latin small letter i with diaeresis, U+00EF ISOlat1
'kappa': '&#954;', # greek small letter kappa, U+03BA ISOgrk3
'lArr': '&#8656;', # leftwards double arrow, U+21D0 ISOtech
'lambda': '&#955;', # greek small letter lambda, U+03BB ISOgrk3
'lang': '&#9001;', # left-pointing angle bracket = bra, U+2329 ISOtech
'laquo': '\253', # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
'larr': '&#8592;', # leftwards arrow, U+2190 ISOnum
'lceil': '&#8968;', # left ceiling = apl upstile, U+2308 ISOamsc
'ldquo': '&#8220;', # left double quotation mark, U+201C ISOnum
'le': '&#8804;', # less-than or equal to, U+2264 ISOtech
'lfloor': '&#8970;', # left floor = apl downstile, U+230A ISOamsc
'lowast': '&#8727;', # asterisk operator, U+2217 ISOtech
'loz': '&#9674;', # lozenge, U+25CA ISOpub
'lrm': '&#8206;', # left-to-right mark, U+200E NEW RFC 2070
'lsaquo': '&#8249;', # single left-pointing angle quotation mark, U+2039 ISO proposed
'lsquo': '&#8216;', # left single quotation mark, U+2018 ISOnum
'lt': '\74', # less-than sign, U+003C ISOnum
'macr': '\257', # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
'mdash': '&#8212;', # em dash, U+2014 ISOpub
'micro': '\265', # micro sign, U+00B5 ISOnum
'middot': '\267', # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
'minus': '&#8722;', # minus sign, U+2212 ISOtech
'mu': '&#956;', # greek small letter mu, U+03BC ISOgrk3
'nabla': '&#8711;', # nabla = backward difference, U+2207 ISOtech
'nbsp': '\240', # no-break space = non-breaking space, U+00A0 ISOnum
'ndash': '&#8211;', # en dash, U+2013 ISOpub
'ne': '&#8800;', # not equal to, U+2260 ISOtech
'ni': '&#8715;', # contains as member, U+220B ISOtech
'not': '\254', # not sign, U+00AC ISOnum
'notin': '&#8713;', # not an element of, U+2209 ISOtech
'nsub': '&#8836;', # not a subset of, U+2284 ISOamsn
'ntilde': '\361', # latin small letter n with tilde, U+00F1 ISOlat1
'nu': '&#957;', # greek small letter nu, U+03BD ISOgrk3
'oacute': '\363', # latin small letter o with acute, U+00F3 ISOlat1
'ocirc': '\364', # latin small letter o with circumflex, U+00F4 ISOlat1
'oelig': '&#339;', # latin small ligature oe, U+0153 ISOlat2
'ograve': '\362', # latin small letter o with grave, U+00F2 ISOlat1
'oline': '&#8254;', # overline = spacing overscore, U+203E NEW
'omega': '&#969;', # greek small letter omega, U+03C9 ISOgrk3
'omicron': '&#959;', # greek small letter omicron, U+03BF NEW
'oplus': '&#8853;', # circled plus = direct sum, U+2295 ISOamsb
'or': '&#8744;', # logical or = vee, U+2228 ISOtech
'ordf': '\252', # feminine ordinal indicator, U+00AA ISOnum
'ordm': '\272', # masculine ordinal indicator, U+00BA ISOnum
'oslash': '\370', # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
'otilde': '\365', # latin small letter o with tilde, U+00F5 ISOlat1
'otimes': '&#8855;', # circled times = vector product, U+2297 ISOamsb
'ouml': '\366', # latin small letter o with diaeresis, U+00F6 ISOlat1
'para': '\266', # pilcrow sign = paragraph sign, U+00B6 ISOnum
'part': '&#8706;', # partial differential, U+2202 ISOtech
'permil': '&#8240;', # per mille sign, U+2030 ISOtech
'perp': '&#8869;', # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
'phi': '&#966;', # greek small letter phi, U+03C6 ISOgrk3
'pi': '&#960;', # greek small letter pi, U+03C0 ISOgrk3
'piv': '&#982;', # greek pi symbol, U+03D6 ISOgrk3
'plusmn': '\261', # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
'pound': '\243', # pound sign, U+00A3 ISOnum
'prime': '&#8242;', # prime = minutes = feet, U+2032 ISOtech
'prod': '&#8719;', # n-ary product = product sign, U+220F ISOamsb
'prop': '&#8733;', # proportional to, U+221D ISOtech
'psi': '&#968;', # greek small letter psi, U+03C8 ISOgrk3
'quot': '\42', # quotation mark = APL quote, U+0022 ISOnum
'rArr': '&#8658;', # rightwards double arrow, U+21D2 ISOtech
'radic': '&#8730;', # square root = radical sign, U+221A ISOtech
'rang': '&#9002;', # right-pointing angle bracket = ket, U+232A ISOtech
'raquo': '\273', # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
'rarr': '&#8594;', # rightwards arrow, U+2192 ISOnum
'rceil': '&#8969;', # right ceiling, U+2309 ISOamsc
'rdquo': '&#8221;', # right double quotation mark, U+201D ISOnum
'real': '&#8476;', # blackletter capital R = real part symbol, U+211C ISOamso
'reg': '\256', # registered sign = registered trade mark sign, U+00AE ISOnum
'rfloor': '&#8971;', # right floor, U+230B ISOamsc
'rho': '&#961;', # greek small letter rho, U+03C1 ISOgrk3
'rlm': '&#8207;', # right-to-left mark, U+200F NEW RFC 2070
'rsaquo': '&#8250;', # single right-pointing angle quotation mark, U+203A ISO proposed
'rsquo': '&#8217;', # right single quotation mark, U+2019 ISOnum
'sbquo': '&#8218;', # single low-9 quotation mark, U+201A NEW
'scaron': '&#353;', # latin small letter s with caron, U+0161 ISOlat2
'sdot': '&#8901;', # dot operator, U+22C5 ISOamsb
'sect': '\247', # section sign, U+00A7 ISOnum
'shy': '\255', # soft hyphen = discretionary hyphen, U+00AD ISOnum
'sigma': '&#963;', # greek small letter sigma, U+03C3 ISOgrk3
'sigmaf': '&#962;', # greek small letter final sigma, U+03C2 ISOgrk3
'sim': '&#8764;', # tilde operator = varies with = similar to, U+223C ISOtech
'spades': '&#9824;', # black spade suit, U+2660 ISOpub
'sub': '&#8834;', # subset of, U+2282 ISOtech
'sube': '&#8838;', # subset of or equal to, U+2286 ISOtech
'sum': '&#8721;', # n-ary sumation, U+2211 ISOamsb
'sup': '&#8835;', # superset of, U+2283 ISOtech
'sup1': '\271', # superscript one = superscript digit one, U+00B9 ISOnum
'sup2': '\262', # superscript two = superscript digit two = squared, U+00B2 ISOnum
'sup3': '\263', # superscript three = superscript digit three = cubed, U+00B3 ISOnum
'supe': '&#8839;', # superset of or equal to, U+2287 ISOtech
'szlig': '\337', # latin small letter sharp s = ess-zed, U+00DF ISOlat1
'tau': '&#964;', # greek small letter tau, U+03C4 ISOgrk3
'there4': '&#8756;', # therefore, U+2234 ISOtech
'theta': '&#952;', # greek small letter theta, U+03B8 ISOgrk3
'thetasym': '&#977;', # greek small letter theta symbol, U+03D1 NEW
'thinsp': '&#8201;', # thin space, U+2009 ISOpub
'thorn': '\376', # latin small letter thorn with, U+00FE ISOlat1
'tilde': '&#732;', # small tilde, U+02DC ISOdia
'times': '\327', # multiplication sign, U+00D7 ISOnum
'trade': '&#8482;', # trade mark sign, U+2122 ISOnum
'uArr': '&#8657;', # upwards double arrow, U+21D1 ISOamsa
'uacute': '\372', # latin small letter u with acute, U+00FA ISOlat1
'uarr': '&#8593;', # upwards arrow, U+2191 ISOnum
'ucirc': '\373', # latin small letter u with circumflex, U+00FB ISOlat1
'ugrave': '\371', # latin small letter u with grave, U+00F9 ISOlat1
'uml': '\250', # diaeresis = spacing diaeresis, U+00A8 ISOdia
'upsih': '&#978;', # greek upsilon with hook symbol, U+03D2 NEW
'upsilon': '&#965;', # greek small letter upsilon, U+03C5 ISOgrk3
'uuml': '\374', # latin small letter u with diaeresis, U+00FC ISOlat1
'weierp': '&#8472;', # script capital P = power set = Weierstrass p, U+2118 ISOamso
'xi': '&#958;', # greek small letter xi, U+03BE ISOgrk3
'yacute': '\375', # latin small letter y with acute, U+00FD ISOlat1
'yen': '\245', # yen sign = yuan sign, U+00A5 ISOnum
'yuml': '\377', # latin small letter y with diaeresis, U+00FF ISOlat1
'zeta': '&#950;', # greek small letter zeta, U+03B6 ISOgrk3
'zwj': '&#8205;', # zero width joiner, U+200D NEW RFC 2070
'zwnj': '&#8204;', # zero width non-joiner, U+200C NEW RFC 2070
}

View File

@ -411,7 +411,7 @@ def test(args = None):
if f is not sys.stdin:
f.close()
if silent:
f = formatter.NullFormatter()
else:

View File

@ -93,14 +93,14 @@ class HTTPResponse:
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def begin(self):
if self.msg is not None:
@ -130,7 +130,7 @@ class HTTPResponse:
if version == 'HTTP/1.0':
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == 'HTTP/0.9':
self.version = 9
else:
@ -186,9 +186,9 @@ class HTTPResponse:
self.length = None
# does the body have a fixed length? (of zero)
if (status == 204 or # No Content
status == 304 or # Not Modified
100 <= status < 200): # 1xx codes
if (status == 204 or # No Content
status == 304 or # Not Modified
100 <= status < 200): # 1xx codes
self.length = 0
# if the connection remains open, and we aren't using chunked, and
@ -225,7 +225,7 @@ class HTTPResponse:
line = self.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
line = line[:i] # strip chunk-extensions
chunk_left = int(line, 16)
if chunk_left == 0:
break
@ -237,7 +237,7 @@ class HTTPResponse:
return value
elif amt == chunk_left:
value = value + self._safe_read(amt)
self._safe_read(2) # toss the CRLF at the end of the chunk
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return value
else:
@ -245,7 +245,7 @@ class HTTPResponse:
amt = amt - chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
@ -266,7 +266,7 @@ class HTTPResponse:
s = self.fp.read()
else:
s = self._safe_read(self.length)
self.close() # we read everything
self.close() # we read everything
return s
if self.length is not None:
@ -355,7 +355,7 @@ class HTTPConnection:
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
@ -380,7 +380,7 @@ class HTTPConnection:
try:
self.sock.send(str)
except socket.error, v:
if v[0] == 32: # Broken pipe
if v[0] == 32: # Broken pipe
self.close()
raise

View File

@ -109,7 +109,7 @@ class BasicModuleLoader(_Verbose):
"""
def find_module(self, name, path = None):
if path is None:
if path is None:
path = [None] + self.default_path()
for dir in path:
stuff = self.find_module_in_dir(name, dir)
@ -390,7 +390,7 @@ class BasicModuleImporter(_Verbose):
class ModuleImporter(BasicModuleImporter):
"""A module importer that supports packages."""
def import_module(self, name, globals=None, locals=None, fromlist=None):
parent = self.determine_parent(globals)
q, tail = self.find_head_package(parent, name)

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,7 @@ def what(file, h=None):
location = file.tell()
h = file.read(32)
file.seek(location)
f = None
f = None
else:
f = None
try:
@ -103,7 +103,7 @@ tests.append(test_jpeg)
def test_bmp(h, f):
if h[:2] == 'BM':
return 'bmp'
tests.append(test_bmp)
def test_png(h, f):

View File

@ -5,7 +5,7 @@
### docco needed here and in Docs/ ...
# note: avoid importing non-builtin modules
import imp ### not available in JPython?
import imp ### not available in JPython?
import sys
import strop
import __builtin__
@ -15,7 +15,7 @@ import struct
import marshal
_StringType = type('')
_ModuleType = type(sys) ### doesn't work in JPython...
_ModuleType = type(sys) ### doesn't work in JPython...
class ImportManager:
"Manage the import process."
@ -663,7 +663,7 @@ def _test_revamp():
#
#
# Guido's comments on sys.path caching:
#
#
# We could cache this in a dictionary: the ImportManager can have a
# cache dict mapping pathnames to importer objects, and a separate
# method for coming up with an importer given a pathname that's not yet
@ -679,16 +679,16 @@ def _test_revamp():
# My/Guido's comments on factoring ImportManager and Importer:
#
# > However, we still have a tension occurring here:
# >
# >
# > 1) implementing policy in ImportManager assists in single-point policy
# > changes for app/rexec situations
# > 2) implementing policy in Importer assists in package-private policy
# > changes for normal, operating conditions
# >
# >
# > I'll see if I can sort out a way to do this. Maybe the Importer class will
# > implement the methods (which can be overridden to change policy) by
# > delegating to ImportManager.
#
#
# Maybe also think about what kind of policies an Importer would be
# likely to want to change. I have a feeling that a lot of the code
# there is actually not so much policy but a *necessity* to get things

View File

@ -16,7 +16,7 @@ def url2pathname(pathname):
raise RuntimeError, 'Cannot convert non-local URL to pathname'
# Turn starting /// into /, an empty hostname means current host
if pathname[:3] == '///':
pathname = pathname[2:]
pathname = pathname[2:]
elif pathname[:2] == '//':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
components = string.split(pathname, '/')
@ -68,11 +68,11 @@ def pathname2url(pathname):
return '/' + string.join(components, '/')
else:
return string.join(components, '/')
def _pncomp2url(component):
component = urllib.quote(component[:31], safe='') # We want to quote slashes
return component
component = urllib.quote(component[:31], safe='') # We want to quote slashes
return component
def test():
for url in ["index.html",
"bar/index.html",

View File

@ -8,7 +8,7 @@ import string
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
@ -137,7 +137,7 @@ def parsefield(line, i, n):
def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""Find a match for a mailcap entry.
Return a tuple containing the command line, and the mailcap entry
used; (None, None) if no match is found. This may invoke the
'test' command of several matching entries before deciding which
@ -145,7 +145,7 @@ def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if e.has_key('test'):
test = subst(e['test'], filename, plist)

View File

@ -7,85 +7,85 @@ import tempfile
class Message(rfc822.Message):
"""A derived class of rfc822.Message that knows about MIME headers and
contains some hooks for decoding encoded and multipart messages."""
"""A derived class of rfc822.Message that knows about MIME headers and
contains some hooks for decoding encoded and multipart messages."""
def __init__(self, fp, seekable = 1):
rfc822.Message.__init__(self, fp, seekable)
self.encodingheader = \
self.getheader('content-transfer-encoding')
self.typeheader = \
self.getheader('content-type')
self.parsetype()
self.parseplist()
def __init__(self, fp, seekable = 1):
rfc822.Message.__init__(self, fp, seekable)
self.encodingheader = \
self.getheader('content-transfer-encoding')
self.typeheader = \
self.getheader('content-type')
self.parsetype()
self.parseplist()
def parsetype(self):
str = self.typeheader
if str is None:
str = 'text/plain'
if ';' in str:
i = str.index(';')
self.plisttext = str[i:]
str = str[:i]
else:
self.plisttext = ''
fields = str.split('/')
for i in range(len(fields)):
fields[i] = fields[i].strip().lower()
self.type = '/'.join(fields)
self.maintype = fields[0]
self.subtype = '/'.join(fields[1:])
def parsetype(self):
str = self.typeheader
if str is None:
str = 'text/plain'
if ';' in str:
i = str.index(';')
self.plisttext = str[i:]
str = str[:i]
else:
self.plisttext = ''
fields = str.split('/')
for i in range(len(fields)):
fields[i] = fields[i].strip().lower()
self.type = '/'.join(fields)
self.maintype = fields[0]
self.subtype = '/'.join(fields[1:])
def parseplist(self):
str = self.plisttext
self.plist = []
while str[:1] == ';':
str = str[1:]
if ';' in str:
# XXX Should parse quotes!
end = str.index(';')
else:
end = len(str)
f = str[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + \
'=' + f[i+1:].strip()
self.plist.append(f.strip())
str = str[end:]
def parseplist(self):
str = self.plisttext
self.plist = []
while str[:1] == ';':
str = str[1:]
if ';' in str:
# XXX Should parse quotes!
end = str.index(';')
else:
end = len(str)
f = str[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + \
'=' + f[i+1:].strip()
self.plist.append(f.strip())
str = str[end:]
def getplist(self):
return self.plist
def getplist(self):
return self.plist
def getparam(self, name):
name = name.lower() + '='
n = len(name)
for p in self.plist:
if p[:n] == name:
return rfc822.unquote(p[n:])
return None
def getparam(self, name):
name = name.lower() + '='
n = len(name)
for p in self.plist:
if p[:n] == name:
return rfc822.unquote(p[n:])
return None
def getparamnames(self):
result = []
for p in self.plist:
i = p.find('=')
if i >= 0:
result.append(p[:i].lower())
return result
def getparamnames(self):
result = []
for p in self.plist:
i = p.find('=')
if i >= 0:
result.append(p[:i].lower())
return result
def getencoding(self):
if self.encodingheader is None:
return '7bit'
return self.encodingheader.lower()
def getencoding(self):
if self.encodingheader is None:
return '7bit'
return self.encodingheader.lower()
def gettype(self):
return self.type
def gettype(self):
return self.type
def getmaintype(self):
return self.maintype
def getmaintype(self):
return self.maintype
def getsubtype(self):
return self.subtype
def getsubtype(self):
return self.subtype
@ -97,74 +97,74 @@ class Message(rfc822.Message):
_prefix = None
def choose_boundary():
"""Return a random string usable as a multipart boundary.
The method used is so that it is *very* unlikely that the same
string of characters will every occur again in the Universe,
so the caller needn't check the data it is packing for the
occurrence of the boundary.
"""Return a random string usable as a multipart boundary.
The method used is so that it is *very* unlikely that the same
string of characters will every occur again in the Universe,
so the caller needn't check the data it is packing for the
occurrence of the boundary.
The boundary contains dots so you have to quote it in the header."""
The boundary contains dots so you have to quote it in the header."""
global _prefix
import time
import random
if _prefix is None:
import socket
import os
hostid = socket.gethostbyname(socket.gethostname())
try:
uid = `os.getuid()`
except:
uid = '1'
try:
pid = `os.getpid()`
except:
pid = '1'
_prefix = hostid + '.' + uid + '.' + pid
timestamp = '%.3f' % time.time()
seed = `random.randint(0, 32767)`
return _prefix + '.' + timestamp + '.' + seed
global _prefix
import time
import random
if _prefix is None:
import socket
import os
hostid = socket.gethostbyname(socket.gethostname())
try:
uid = `os.getuid()`
except:
uid = '1'
try:
pid = `os.getpid()`
except:
pid = '1'
_prefix = hostid + '.' + uid + '.' + pid
timestamp = '%.3f' % time.time()
seed = `random.randint(0, 32767)`
return _prefix + '.' + timestamp + '.' + seed
# Subroutines for decoding some common content-transfer-types
def decode(input, output, encoding):
"""Decode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.decode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.decode(input, output)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.decode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if decodetab.has_key(encoding):
pipethrough(input, decodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
"""Decode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.decode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.decode(input, output)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.decode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if decodetab.has_key(encoding):
pipethrough(input, decodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
def encode(input, output, encoding):
"""Encode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.encode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.encode(input, output, 0)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.encode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encodetab.has_key(encoding):
pipethrough(input, encodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
"""Encode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.encode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.encode(input, output, 0)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.encode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encodetab.has_key(encoding):
pipethrough(input, encodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
# The following is no longer used for standard encodings
@ -178,51 +178,51 @@ rm $TEMP
)'''
decodetab = {
'uuencode': uudecode_pipe,
'x-uuencode': uudecode_pipe,
'uue': uudecode_pipe,
'x-uue': uudecode_pipe,
'quoted-printable': 'mmencode -u -q',
'base64': 'mmencode -u -b',
'uuencode': uudecode_pipe,
'x-uuencode': uudecode_pipe,
'uue': uudecode_pipe,
'x-uue': uudecode_pipe,
'quoted-printable': 'mmencode -u -q',
'base64': 'mmencode -u -b',
}
encodetab = {
'x-uuencode': 'uuencode tempfile',
'uuencode': 'uuencode tempfile',
'x-uue': 'uuencode tempfile',
'uue': 'uuencode tempfile',
'quoted-printable': 'mmencode -q',
'base64': 'mmencode -b',
'x-uuencode': 'uuencode tempfile',
'uuencode': 'uuencode tempfile',
'x-uue': 'uuencode tempfile',
'uue': 'uuencode tempfile',
'quoted-printable': 'mmencode -q',
'base64': 'mmencode -b',
}
def pipeto(input, command):
pipe = os.popen(command, 'w')
copyliteral(input, pipe)
pipe.close()
pipe = os.popen(command, 'w')
copyliteral(input, pipe)
pipe.close()
def pipethrough(input, command, output):
tempname = tempfile.mktemp()
try:
temp = open(tempname, 'w')
except IOError:
print '*** Cannot create temp file', `tempname`
return
copyliteral(input, temp)
temp.close()
pipe = os.popen(command + ' <' + tempname, 'r')
copybinary(pipe, output)
pipe.close()
os.unlink(tempname)
tempname = tempfile.mktemp()
try:
temp = open(tempname, 'w')
except IOError:
print '*** Cannot create temp file', `tempname`
return
copyliteral(input, temp)
temp.close()
pipe = os.popen(command + ' <' + tempname, 'r')
copybinary(pipe, output)
pipe.close()
os.unlink(tempname)
def copyliteral(input, output):
while 1:
line = input.readline()
if not line: break
output.write(line)
while 1:
line = input.readline()
if not line: break
output.write(line)
def copybinary(input, output):
BUFSIZE = 8192
while 1:
line = input.read(BUFSIZE)
if not line: break
output.write(line)
BUFSIZE = 8192
while 1:
line = input.read(BUFSIZE)
if not line: break
output.write(line)

View File

@ -6,8 +6,8 @@ Decode quoted-printable parts of a mail message or encode using
quoted-printable.
Usage:
mimify(input, output)
unmimify(input, output, decode_base64 = 0)
mimify(input, output)
unmimify(input, output, decode_base64 = 0)
to encode and decode respectively. Input and output may be the name
of a file or an open file object. Only a readline() method is used
on the input file, only a write() method is used on the output file.
@ -15,16 +15,16 @@ When using file names, the input and output file names may be the
same.
Interactive usage:
mimify.py -e [infile [outfile]]
mimify.py -d [infile [outfile]]
mimify.py -e [infile [outfile]]
mimify.py -d [infile [outfile]]
to encode and decode respectively. Infile defaults to standard
input and outfile to standard output.
"""
# Configure
MAXLEN = 200 # if lines longer than this, encode as quoted-printable
CHARSET = 'ISO-8859-1' # default charset for non-US-ASCII mail
QUOTE = '> ' # string replies are quoted with
MAXLEN = 200 # if lines longer than this, encode as quoted-printable
CHARSET = 'ISO-8859-1' # default charset for non-US-ASCII mail
QUOTE = '> ' # string replies are quoted with
# End configure
import re, string
@ -39,425 +39,424 @@ mime_head = re.compile('=\\?iso-8859-1\\?q\\?([^? \t\n]+)\\?=', re.I)
repl = re.compile('^subject:\\s+re: ', re.I)
class File:
"""A simple fake file object that knows about limited read-ahead and
boundaries. The only supported method is readline()."""
"""A simple fake file object that knows about limited read-ahead and
boundaries. The only supported method is readline()."""
def __init__(self, file, boundary):
self.file = file
self.boundary = boundary
self.peek = None
def __init__(self, file, boundary):
self.file = file
self.boundary = boundary
self.peek = None
def readline(self):
if self.peek is not None:
return ''
line = self.file.readline()
if not line:
return line
if self.boundary:
if line == self.boundary + '\n':
self.peek = line
return ''
if line == self.boundary + '--\n':
self.peek = line
return ''
return line
def readline(self):
if self.peek is not None:
return ''
line = self.file.readline()
if not line:
return line
if self.boundary:
if line == self.boundary + '\n':
self.peek = line
return ''
if line == self.boundary + '--\n':
self.peek = line
return ''
return line
class HeaderFile:
def __init__(self, file):
self.file = file
self.peek = None
def __init__(self, file):
self.file = file
self.peek = None
def readline(self):
if self.peek is not None:
line = self.peek
self.peek = None
else:
line = self.file.readline()
if not line:
return line
if he.match(line):
return line
while 1:
self.peek = self.file.readline()
if len(self.peek) == 0 or \
(self.peek[0] != ' ' and self.peek[0] != '\t'):
return line
line = line + self.peek
self.peek = None
def readline(self):
if self.peek is not None:
line = self.peek
self.peek = None
else:
line = self.file.readline()
if not line:
return line
if he.match(line):
return line
while 1:
self.peek = self.file.readline()
if len(self.peek) == 0 or \
(self.peek[0] != ' ' and self.peek[0] != '\t'):
return line
line = line + self.peek
self.peek = None
def mime_decode(line):
"""Decode a single line of quoted-printable text to 8bit."""
newline = ''
pos = 0
while 1:
res = mime_code.search(line, pos)
if res is None:
break
newline = newline + line[pos:res.start(0)] + \
chr(string.atoi(res.group(1), 16))
pos = res.end(0)
return newline + line[pos:]
"""Decode a single line of quoted-printable text to 8bit."""
newline = ''
pos = 0
while 1:
res = mime_code.search(line, pos)
if res is None:
break
newline = newline + line[pos:res.start(0)] + \
chr(string.atoi(res.group(1), 16))
pos = res.end(0)
return newline + line[pos:]
def mime_decode_header(line):
"""Decode a header line to 8bit."""
newline = ''
pos = 0
while 1:
res = mime_head.search(line, pos)
if res is None:
break
match = res.group(1)
# convert underscores to spaces (before =XX conversion!)
match = string.join(string.split(match, '_'), ' ')
newline = newline + line[pos:res.start(0)] + mime_decode(match)
pos = res.end(0)
return newline + line[pos:]
"""Decode a header line to 8bit."""
newline = ''
pos = 0
while 1:
res = mime_head.search(line, pos)
if res is None:
break
match = res.group(1)
# convert underscores to spaces (before =XX conversion!)
match = string.join(string.split(match, '_'), ' ')
newline = newline + line[pos:res.start(0)] + mime_decode(match)
pos = res.end(0)
return newline + line[pos:]
def unmimify_part(ifile, ofile, decode_base64 = 0):
"""Convert a quoted-printable part of a MIME mail message to 8bit."""
multipart = None
quoted_printable = 0
is_base64 = 0
is_repl = 0
if ifile.boundary and ifile.boundary[:2] == QUOTE:
prefix = QUOTE
else:
prefix = ''
"""Convert a quoted-printable part of a MIME mail message to 8bit."""
multipart = None
quoted_printable = 0
is_base64 = 0
is_repl = 0
if ifile.boundary and ifile.boundary[:2] == QUOTE:
prefix = QUOTE
else:
prefix = ''
# read header
hfile = HeaderFile(ifile)
while 1:
line = hfile.readline()
if not line:
return
if prefix and line[:len(prefix)] == prefix:
line = line[len(prefix):]
pref = prefix
else:
pref = ''
line = mime_decode_header(line)
if qp.match(line):
quoted_printable = 1
continue # skip this header
if decode_base64 and base64_re.match(line):
is_base64 = 1
continue
ofile.write(pref + line)
if not prefix and repl.match(line):
# we're dealing with a reply message
is_repl = 1
mp_res = mp.match(line)
if mp_res:
multipart = '--' + mp_res.group(1)
if he.match(line):
break
if is_repl and (quoted_printable or multipart):
is_repl = 0
# read header
hfile = HeaderFile(ifile)
while 1:
line = hfile.readline()
if not line:
return
if prefix and line[:len(prefix)] == prefix:
line = line[len(prefix):]
pref = prefix
else:
pref = ''
line = mime_decode_header(line)
if qp.match(line):
quoted_printable = 1
continue # skip this header
if decode_base64 and base64_re.match(line):
is_base64 = 1
continue
ofile.write(pref + line)
if not prefix and repl.match(line):
# we're dealing with a reply message
is_repl = 1
mp_res = mp.match(line)
if mp_res:
multipart = '--' + mp_res.group(1)
if he.match(line):
break
if is_repl and (quoted_printable or multipart):
is_repl = 0
# read body
while 1:
line = ifile.readline()
if not line:
return
line = re.sub(mime_head, '\\1', line)
if prefix and line[:len(prefix)] == prefix:
line = line[len(prefix):]
pref = prefix
else:
pref = ''
## if is_repl and len(line) >= 4 and line[:4] == QUOTE+'--' and line[-3:] != '--\n':
## multipart = line[:-1]
while multipart:
if line == multipart + '--\n':
ofile.write(pref + line)
multipart = None
line = None
break
if line == multipart + '\n':
ofile.write(pref + line)
nifile = File(ifile, multipart)
unmimify_part(nifile, ofile, decode_base64)
line = nifile.peek
if not line:
# premature end of file
break
continue
# not a boundary between parts
break
if line and quoted_printable:
while line[-2:] == '=\n':
line = line[:-2]
newline = ifile.readline()
if newline[:len(QUOTE)] == QUOTE:
newline = newline[len(QUOTE):]
line = line + newline
line = mime_decode(line)
if line and is_base64 and not pref:
import base64
line = base64.decodestring(line)
if line:
ofile.write(pref + line)
# read body
while 1:
line = ifile.readline()
if not line:
return
line = re.sub(mime_head, '\\1', line)
if prefix and line[:len(prefix)] == prefix:
line = line[len(prefix):]
pref = prefix
else:
pref = ''
## if is_repl and len(line) >= 4 and line[:4] == QUOTE+'--' and line[-3:] != '--\n':
## multipart = line[:-1]
while multipart:
if line == multipart + '--\n':
ofile.write(pref + line)
multipart = None
line = None
break
if line == multipart + '\n':
ofile.write(pref + line)
nifile = File(ifile, multipart)
unmimify_part(nifile, ofile, decode_base64)
line = nifile.peek
if not line:
# premature end of file
break
continue
# not a boundary between parts
break
if line and quoted_printable:
while line[-2:] == '=\n':
line = line[:-2]
newline = ifile.readline()
if newline[:len(QUOTE)] == QUOTE:
newline = newline[len(QUOTE):]
line = line + newline
line = mime_decode(line)
if line and is_base64 and not pref:
import base64
line = base64.decodestring(line)
if line:
ofile.write(pref + line)
def unmimify(infile, outfile, decode_base64 = 0):
"""Convert quoted-printable parts of a MIME mail message to 8bit."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:
import os
d, f = os.path.split(infile)
os.rename(infile, os.path.join(d, ',' + f))
else:
ifile = infile
if type(outfile) == type(''):
ofile = open(outfile, 'w')
else:
ofile = outfile
nifile = File(ifile, None)
unmimify_part(nifile, ofile, decode_base64)
ofile.flush()
"""Convert quoted-printable parts of a MIME mail message to 8bit."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:
import os
d, f = os.path.split(infile)
os.rename(infile, os.path.join(d, ',' + f))
else:
ifile = infile
if type(outfile) == type(''):
ofile = open(outfile, 'w')
else:
ofile = outfile
nifile = File(ifile, None)
unmimify_part(nifile, ofile, decode_base64)
ofile.flush()
mime_char = re.compile('[=\177-\377]') # quote these chars in body
mime_header_char = re.compile('[=?\177-\377]') # quote these in header
def mime_encode(line, header):
"""Code a single line as quoted-printable.
If header is set, quote some extra characters."""
if header:
reg = mime_header_char
else:
reg = mime_char
newline = ''
pos = 0
if len(line) >= 5 and line[:5] == 'From ':
# quote 'From ' at the start of a line for stupid mailers
newline = string.upper('=%02x' % ord('F'))
pos = 1
while 1:
res = reg.search(line, pos)
if res is None:
break
newline = newline + line[pos:res.start(0)] + \
string.upper('=%02x' % ord(res.group(0)))
pos = res.end(0)
line = newline + line[pos:]
"""Code a single line as quoted-printable.
If header is set, quote some extra characters."""
if header:
reg = mime_header_char
else:
reg = mime_char
newline = ''
pos = 0
if len(line) >= 5 and line[:5] == 'From ':
# quote 'From ' at the start of a line for stupid mailers
newline = string.upper('=%02x' % ord('F'))
pos = 1
while 1:
res = reg.search(line, pos)
if res is None:
break
newline = newline + line[pos:res.start(0)] + \
string.upper('=%02x' % ord(res.group(0)))
pos = res.end(0)
line = newline + line[pos:]
newline = ''
while len(line) >= 75:
i = 73
while line[i] == '=' or line[i-1] == '=':
i = i - 1
i = i + 1
newline = newline + line[:i] + '=\n'
line = line[i:]
return newline + line
newline = ''
while len(line) >= 75:
i = 73
while line[i] == '=' or line[i-1] == '=':
i = i - 1
i = i + 1
newline = newline + line[:i] + '=\n'
line = line[i:]
return newline + line
mime_header = re.compile('([ \t(]|^)([-a-zA-Z0-9_+]*[\177-\377][-a-zA-Z0-9_+\177-\377]*)([ \t)]|\n)')
def mime_encode_header(line):
"""Code a single header line as quoted-printable."""
newline = ''
pos = 0
while 1:
res = mime_header.search(line, pos)
if res is None:
break
newline = '%s%s%s=?%s?Q?%s?=%s' % \
(newline, line[pos:res.start(0)], res.group(1),
CHARSET, mime_encode(res.group(2), 1), res.group(3))
pos = res.end(0)
return newline + line[pos:]
"""Code a single header line as quoted-printable."""
newline = ''
pos = 0
while 1:
res = mime_header.search(line, pos)
if res is None:
break
newline = '%s%s%s=?%s?Q?%s?=%s' % \
(newline, line[pos:res.start(0)], res.group(1),
CHARSET, mime_encode(res.group(2), 1), res.group(3))
pos = res.end(0)
return newline + line[pos:]
mv = re.compile('^mime-version:', re.I)
cte = re.compile('^content-transfer-encoding:', re.I)
iso_char = re.compile('[\177-\377]')
def mimify_part(ifile, ofile, is_mime):
"""Convert an 8bit part of a MIME mail message to quoted-printable."""
has_cte = is_qp = is_base64 = 0
multipart = None
must_quote_body = must_quote_header = has_iso_chars = 0
"""Convert an 8bit part of a MIME mail message to quoted-printable."""
has_cte = is_qp = is_base64 = 0
multipart = None
must_quote_body = must_quote_header = has_iso_chars = 0
header = []
header_end = ''
message = []
message_end = ''
# read header
hfile = HeaderFile(ifile)
while 1:
line = hfile.readline()
if not line:
break
if not must_quote_header and iso_char.search(line):
must_quote_header = 1
if mv.match(line):
is_mime = 1
if cte.match(line):
has_cte = 1
if qp.match(line):
is_qp = 1
elif base64_re.match(line):
is_base64 = 1
mp_res = mp.match(line)
if mp_res:
multipart = '--' + mp_res.group(1)
if he.match(line):
header_end = line
break
header.append(line)
header = []
header_end = ''
message = []
message_end = ''
# read header
hfile = HeaderFile(ifile)
while 1:
line = hfile.readline()
if not line:
break
if not must_quote_header and iso_char.search(line):
must_quote_header = 1
if mv.match(line):
is_mime = 1
if cte.match(line):
has_cte = 1
if qp.match(line):
is_qp = 1
elif base64_re.match(line):
is_base64 = 1
mp_res = mp.match(line)
if mp_res:
multipart = '--' + mp_res.group(1)
if he.match(line):
header_end = line
break
header.append(line)
# read body
while 1:
line = ifile.readline()
if not line:
break
if multipart:
if line == multipart + '--\n':
message_end = line
break
if line == multipart + '\n':
message_end = line
break
if is_base64:
message.append(line)
continue
if is_qp:
while line[-2:] == '=\n':
line = line[:-2]
newline = ifile.readline()
if newline[:len(QUOTE)] == QUOTE:
newline = newline[len(QUOTE):]
line = line + newline
line = mime_decode(line)
message.append(line)
if not has_iso_chars:
if iso_char.search(line):
has_iso_chars = must_quote_body = 1
if not must_quote_body:
if len(line) > MAXLEN:
must_quote_body = 1
# read body
while 1:
line = ifile.readline()
if not line:
break
if multipart:
if line == multipart + '--\n':
message_end = line
break
if line == multipart + '\n':
message_end = line
break
if is_base64:
message.append(line)
continue
if is_qp:
while line[-2:] == '=\n':
line = line[:-2]
newline = ifile.readline()
if newline[:len(QUOTE)] == QUOTE:
newline = newline[len(QUOTE):]
line = line + newline
line = mime_decode(line)
message.append(line)
if not has_iso_chars:
if iso_char.search(line):
has_iso_chars = must_quote_body = 1
if not must_quote_body:
if len(line) > MAXLEN:
must_quote_body = 1
# convert and output header and body
for line in header:
if must_quote_header:
line = mime_encode_header(line)
chrset_res = chrset.match(line)
if chrset_res:
if has_iso_chars:
# change us-ascii into iso-8859-1
if string.lower(chrset_res.group(2)) == 'us-ascii':
line = '%s%s%s' % (chrset_res.group(1),
CHARSET,
chrset_res.group(3))
else:
# change iso-8859-* into us-ascii
line = '%sus-ascii%s' % chrset_res.group(1, 3)
if has_cte and cte.match(line):
line = 'Content-Transfer-Encoding: '
if is_base64:
line = line + 'base64\n'
elif must_quote_body:
line = line + 'quoted-printable\n'
else:
line = line + '7bit\n'
ofile.write(line)
if (must_quote_header or must_quote_body) and not is_mime:
ofile.write('Mime-Version: 1.0\n')
ofile.write('Content-Type: text/plain; ')
if has_iso_chars:
ofile.write('charset="%s"\n' % CHARSET)
else:
ofile.write('charset="us-ascii"\n')
if must_quote_body and not has_cte:
ofile.write('Content-Transfer-Encoding: quoted-printable\n')
ofile.write(header_end)
# convert and output header and body
for line in header:
if must_quote_header:
line = mime_encode_header(line)
chrset_res = chrset.match(line)
if chrset_res:
if has_iso_chars:
# change us-ascii into iso-8859-1
if string.lower(chrset_res.group(2)) == 'us-ascii':
line = '%s%s%s' % (chrset_res.group(1),
CHARSET,
chrset_res.group(3))
else:
# change iso-8859-* into us-ascii
line = '%sus-ascii%s' % chrset_res.group(1, 3)
if has_cte and cte.match(line):
line = 'Content-Transfer-Encoding: '
if is_base64:
line = line + 'base64\n'
elif must_quote_body:
line = line + 'quoted-printable\n'
else:
line = line + '7bit\n'
ofile.write(line)
if (must_quote_header or must_quote_body) and not is_mime:
ofile.write('Mime-Version: 1.0\n')
ofile.write('Content-Type: text/plain; ')
if has_iso_chars:
ofile.write('charset="%s"\n' % CHARSET)
else:
ofile.write('charset="us-ascii"\n')
if must_quote_body and not has_cte:
ofile.write('Content-Transfer-Encoding: quoted-printable\n')
ofile.write(header_end)
for line in message:
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
ofile.write(message_end)
for line in message:
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
ofile.write(message_end)
line = message_end
while multipart:
if line == multipart + '--\n':
# read bit after the end of the last part
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
if line == multipart + '\n':
nifile = File(ifile, multipart)
mimify_part(nifile, ofile, 1)
line = nifile.peek
if not line:
# premature end of file
break
ofile.write(line)
continue
# unexpectedly no multipart separator--copy rest of file
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
line = message_end
while multipart:
if line == multipart + '--\n':
# read bit after the end of the last part
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
if line == multipart + '\n':
nifile = File(ifile, multipart)
mimify_part(nifile, ofile, 1)
line = nifile.peek
if not line:
# premature end of file
break
ofile.write(line)
continue
# unexpectedly no multipart separator--copy rest of file
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
def mimify(infile, outfile):
"""Convert 8bit parts of a MIME mail message to quoted-printable."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:
import os
d, f = os.path.split(infile)
os.rename(infile, os.path.join(d, ',' + f))
else:
ifile = infile
if type(outfile) == type(''):
ofile = open(outfile, 'w')
else:
ofile = outfile
nifile = File(ifile, None)
mimify_part(nifile, ofile, 0)
ofile.flush()
"""Convert 8bit parts of a MIME mail message to quoted-printable."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:
import os
d, f = os.path.split(infile)
os.rename(infile, os.path.join(d, ',' + f))
else:
ifile = infile
if type(outfile) == type(''):
ofile = open(outfile, 'w')
else:
ofile = outfile
nifile = File(ifile, None)
mimify_part(nifile, ofile, 0)
ofile.flush()
import sys
if __name__ == '__main__' or (len(sys.argv) > 0 and sys.argv[0] == 'mimify'):
import getopt
usage = 'Usage: mimify [-l len] -[ed] [infile [outfile]]'
decode_base64 = 0
opts, args = getopt.getopt(sys.argv[1:], 'l:edb')
if len(args) not in (0, 1, 2):
print usage
sys.exit(1)
if (('-e', '') in opts) == (('-d', '') in opts) or \
((('-b', '') in opts) and (('-d', '') not in opts)):
print usage
sys.exit(1)
for o, a in opts:
if o == '-e':
encode = mimify
elif o == '-d':
encode = unmimify
elif o == '-l':
try:
MAXLEN = string.atoi(a)
except:
print usage
sys.exit(1)
elif o == '-b':
decode_base64 = 1
if len(args) == 0:
encode_args = (sys.stdin, sys.stdout)
elif len(args) == 1:
encode_args = (args[0], sys.stdout)
else:
encode_args = (args[0], args[1])
if decode_base64:
encode_args = encode_args + (decode_base64,)
apply(encode, encode_args)
import getopt
usage = 'Usage: mimify [-l len] -[ed] [infile [outfile]]'
decode_base64 = 0
opts, args = getopt.getopt(sys.argv[1:], 'l:edb')
if len(args) not in (0, 1, 2):
print usage
sys.exit(1)
if (('-e', '') in opts) == (('-d', '') in opts) or \
((('-b', '') in opts) and (('-d', '') not in opts)):
print usage
sys.exit(1)
for o, a in opts:
if o == '-e':
encode = mimify
elif o == '-d':
encode = unmimify
elif o == '-l':
try:
MAXLEN = string.atoi(a)
except:
print usage
sys.exit(1)
elif o == '-b':
decode_base64 = 1
if len(args) == 0:
encode_args = (sys.stdin, sys.stdout)
elif len(args) == 1:
encode_args = (args[0], sys.stdout)
else:
encode_args = (args[0], args[1])
if decode_base64:
encode_args = encode_args + (decode_base64,)
apply(encode, encode_args)

View File

@ -13,8 +13,8 @@ fp = MultiFile(real_fp)
"read some lines from fp"
fp.push(separator)
while 1:
"read lines from fp until it returns an empty string" (A)
if not fp.next(): break
"read lines from fp until it returns an empty string" (A)
if not fp.next(): break
fp.pop()
"read remaining lines from fp until it returns an empty string"
@ -31,134 +31,134 @@ import sys
import string
class Error(Exception):
pass
pass
class MultiFile:
seekable = 0
seekable = 0
def __init__(self, fp, seekable=1):
self.fp = fp
self.stack = [] # Grows down
self.level = 0
self.last = 0
if seekable:
self.seekable = 1
self.start = self.fp.tell()
self.posstack = [] # Grows down
def __init__(self, fp, seekable=1):
self.fp = fp
self.stack = [] # Grows down
self.level = 0
self.last = 0
if seekable:
self.seekable = 1
self.start = self.fp.tell()
self.posstack = [] # Grows down
def tell(self):
if self.level > 0:
return self.lastpos
return self.fp.tell() - self.start
def tell(self):
if self.level > 0:
return self.lastpos
return self.fp.tell() - self.start
def seek(self, pos, whence=0):
here = self.tell()
if whence:
if whence == 1:
pos = pos + here
elif whence == 2:
if self.level > 0:
pos = pos + self.lastpos
else:
raise Error, "can't use whence=2 yet"
if not 0 <= pos <= here or \
self.level > 0 and pos > self.lastpos:
raise Error, 'bad MultiFile.seek() call'
self.fp.seek(pos + self.start)
self.level = 0
self.last = 0
def seek(self, pos, whence=0):
here = self.tell()
if whence:
if whence == 1:
pos = pos + here
elif whence == 2:
if self.level > 0:
pos = pos + self.lastpos
else:
raise Error, "can't use whence=2 yet"
if not 0 <= pos <= here or \
self.level > 0 and pos > self.lastpos:
raise Error, 'bad MultiFile.seek() call'
self.fp.seek(pos + self.start)
self.level = 0
self.last = 0
def readline(self):
if self.level > 0:
return ''
line = self.fp.readline()
# Real EOF?
if not line:
self.level = len(self.stack)
self.last = (self.level > 0)
if self.last:
raise Error, 'sudden EOF in MultiFile.readline()'
return ''
assert self.level == 0
# Fast check to see if this is just data
if self.is_data(line):
return line
else:
# Ignore trailing whitespace on marker lines
k = len(line) - 1
while line[k] in string.whitespace:
k = k - 1
marker = line[:k+1]
# No? OK, try to match a boundary.
# Return the line (unstripped) if we don't.
for i in range(len(self.stack)):
sep = self.stack[i]
if marker == self.section_divider(sep):
self.last = 0
break
elif marker == self.end_marker(sep):
self.last = 1
break
else:
return line
# We only get here if we see a section divider or EOM line
if self.seekable:
self.lastpos = self.tell() - len(line)
self.level = i+1
if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()'
return ''
def readline(self):
if self.level > 0:
return ''
line = self.fp.readline()
# Real EOF?
if not line:
self.level = len(self.stack)
self.last = (self.level > 0)
if self.last:
raise Error, 'sudden EOF in MultiFile.readline()'
return ''
assert self.level == 0
# Fast check to see if this is just data
if self.is_data(line):
return line
else:
# Ignore trailing whitespace on marker lines
k = len(line) - 1
while line[k] in string.whitespace:
k = k - 1
marker = line[:k+1]
# No? OK, try to match a boundary.
# Return the line (unstripped) if we don't.
for i in range(len(self.stack)):
sep = self.stack[i]
if marker == self.section_divider(sep):
self.last = 0
break
elif marker == self.end_marker(sep):
self.last = 1
break
else:
return line
# We only get here if we see a section divider or EOM line
if self.seekable:
self.lastpos = self.tell() - len(line)
self.level = i+1
if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()'
return ''
def readlines(self):
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
return list
def readlines(self):
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
return list
def read(self): # Note: no size argument -- read until EOF only!
return string.joinfields(self.readlines(), '')
def read(self): # Note: no size argument -- read until EOF only!
return string.joinfields(self.readlines(), '')
def next(self):
while self.readline(): pass
if self.level > 1 or self.last:
return 0
self.level = 0
self.last = 0
if self.seekable:
self.start = self.fp.tell()
return 1
def next(self):
while self.readline(): pass
if self.level > 1 or self.last:
return 0
self.level = 0
self.last = 0
if self.seekable:
self.start = self.fp.tell()
return 1
def push(self, sep):
if self.level > 0:
raise Error, 'bad MultiFile.push() call'
self.stack.insert(0, sep)
if self.seekable:
self.posstack.insert(0, self.start)
self.start = self.fp.tell()
def push(self, sep):
if self.level > 0:
raise Error, 'bad MultiFile.push() call'
self.stack.insert(0, sep)
if self.seekable:
self.posstack.insert(0, self.start)
self.start = self.fp.tell()
def pop(self):
if self.stack == []:
raise Error, 'bad MultiFile.pop() call'
if self.level <= 1:
self.last = 0
else:
abslastpos = self.lastpos + self.start
self.level = max(0, self.level - 1)
del self.stack[0]
if self.seekable:
self.start = self.posstack[0]
del self.posstack[0]
if self.level > 0:
self.lastpos = abslastpos - self.start
def pop(self):
if self.stack == []:
raise Error, 'bad MultiFile.pop() call'
if self.level <= 1:
self.last = 0
else:
abslastpos = self.lastpos + self.start
self.level = max(0, self.level - 1)
del self.stack[0]
if self.seekable:
self.start = self.posstack[0]
del self.posstack[0]
if self.level > 0:
self.lastpos = abslastpos - self.start
def is_data(self, line):
return line[:2] != '--'
def is_data(self, line):
return line[:2] != '--'
def section_divider(self, str):
return "--" + str
def section_divider(self, str):
return "--" + str
def end_marker(self, str):
return "--" + str + "--"
def end_marker(self, str):
return "--" + str + "--"

View File

@ -13,39 +13,39 @@ for lock, where a function is called once the lock is aquired.
"""
class mutex:
def __init__(self):
"""Create a new mutex -- initially unlocked."""
self.locked = 0
self.queue = []
def __init__(self):
"""Create a new mutex -- initially unlocked."""
self.locked = 0
self.queue = []
def test(self):
"""Test the locked bit of the mutex."""
return self.locked
def test(self):
"""Test the locked bit of the mutex."""
return self.locked
def testandset(self):
"""Atomic test-and-set -- grab the lock if it is not set,
return true if it succeeded."""
if not self.locked:
self.locked = 1
return 1
else:
return 0
def testandset(self):
"""Atomic test-and-set -- grab the lock if it is not set,
return true if it succeeded."""
if not self.locked:
self.locked = 1
return 1
else:
return 0
def lock(self, function, argument):
"""Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue."""
if self.testandset():
function(argument)
else:
self.queue.append((function, argument))
def lock(self, function, argument):
"""Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue."""
if self.testandset():
function(argument)
else:
self.queue.append((function, argument))
def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue:
function, argument = self.queue[0]
del self.queue[0]
function(argument)
else:
self.locked = 0
def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue:
function, argument = self.queue[0]
del self.queue[0]
function(argument)
else:
self.locked = 0