diff --git a/Doc/README b/Doc/README index 484fce61537..a426ba2615f 100644 --- a/Doc/README +++ b/Doc/README @@ -229,7 +229,7 @@ The Python source is copyrighted, but you can freely use and copy it as long as you don't change or remove the copyright notice: ---------------------------------------------------------------------- -Copyright (c) 2000-2006 Python Software Foundation. +Copyright (c) 2000-2007 Python Software Foundation. All rights reserved. Copyright (c) 2000 BeOpen.com. diff --git a/Doc/api/newtypes.tex b/Doc/api/newtypes.tex index 43d2f8839ab..e5c5aaced2f 100644 --- a/Doc/api/newtypes.tex +++ b/Doc/api/newtypes.tex @@ -103,8 +103,6 @@ defining new object types. the value for the \var{methods} argument]{2.3} \end{cfuncdesc} -DL_IMPORT - \begin{cvardesc}{PyObject}{_Py_NoneStruct} Object which is visible in Python as \code{None}. This should only be accessed using the \code{Py_None} macro, which evaluates to a diff --git a/Doc/howto/functional.rst b/Doc/howto/functional.rst index 2e5a6a9202d..124dd01bbc6 100644 --- a/Doc/howto/functional.rst +++ b/Doc/howto/functional.rst @@ -1398,10 +1398,10 @@ Python documentation ''''''''''''''''''''''''''' http://docs.python.org/lib/module-itertools.html: -Documentation ``for the itertools`` module. +Documentation for the ``itertools`` module. http://docs.python.org/lib/module-operator.html: -Documentation ``for the operator`` module. +Documentation for the ``operator`` module. http://www.python.org/dev/peps/pep-0289/: PEP 289: "Generator Expressions" diff --git a/Doc/lib/libbsddb.tex b/Doc/lib/libbsddb.tex index 85ea824db08..e9d7e217b8f 100644 --- a/Doc/lib/libbsddb.tex +++ b/Doc/lib/libbsddb.tex @@ -16,7 +16,7 @@ serialize them somehow, typically using \function{marshal.dumps()} or \function{pickle.dumps()}. The \module{bsddb} module requires a Berkeley DB library version from -3.3 thru 4.4. +3.3 thru 4.5. \begin{seealso} \seeurl{http://pybsddb.sourceforge.net/} diff --git a/Doc/lib/libbz2.tex b/Doc/lib/libbz2.tex index 11801fe7e6f..36bc0d2e7d8 100644 --- a/Doc/lib/libbz2.tex +++ b/Doc/lib/libbz2.tex @@ -81,10 +81,10 @@ is an approximate bound on the total number of bytes in the lines returned. \begin{methoddesc}[BZ2File]{seek}{offset\optional{, whence}} Move to new file position. Argument \var{offset} is a byte count. Optional -argument \var{whence} defaults to \code{0} (offset from start of file, -offset should be \code{>= 0}); other values are \code{1} (move relative to -current position, positive or negative), and \code{2} (move relative to end -of file, usually negative, although many platforms allow seeking beyond +argument \var{whence} defaults to \code{os.SEEK_SET} or \code{0} (offset from start of file; +offset should be \code{>= 0}); other values are \code{os.SEEK_CUR} or \code{1} (move relative to +current position; offset can be positive or negative), and \code{os.SEEK_END} or \code{2} (move relative to end +of file; offset is usually negative, although many platforms allow seeking beyond the end of a file). Note that seeking of bz2 files is emulated, and depending on the parameters diff --git a/Doc/lib/libctypes.tex b/Doc/lib/libctypes.tex index c0e23101bab..2f880f23fc8 100755 --- a/Doc/lib/libctypes.tex +++ b/Doc/lib/libctypes.tex @@ -2085,10 +2085,10 @@ classmethod, normally it returns \code{obj} if that is an instance of the type. Some types accept other objects as well. \end{methoddesc} -\begin{methoddesc}{in_dll}{name, library} +\begin{methoddesc}{in_dll}{library, name} This method returns a ctypes type instance exported by a shared library. \var{name} is the name of the symbol that exports the data, -\code{library} is the loaded shared library. +\var{library} is the loaded shared library. \end{methoddesc} Common instance variables of ctypes data types: diff --git a/Doc/lib/libfuncs.tex b/Doc/lib/libfuncs.tex index 7e0b88d0959..02cca83ebbf 100644 --- a/Doc/lib/libfuncs.tex +++ b/Doc/lib/libfuncs.tex @@ -237,11 +237,11 @@ class C: \code{del \var{x}.\var{foobar}}. \end{funcdesc} -\begin{funcdesc}{dict}{\optional{mapping-or-sequence}} +\begin{funcdesc}{dict}{\optional{arg}} Return a new dictionary initialized from an optional positional argument or from a set of keyword arguments. If no arguments are given, return a new empty dictionary. - If the positional argument is a mapping object, return a dictionary + If the positional argument \var{arg} is a mapping object, return a dictionary mapping the same keys to the same values as does the mapping object. Otherwise the positional argument must be a sequence, a container that supports iteration, or an iterator object. The elements of the argument @@ -448,18 +448,18 @@ class C: \versionadded{2.2} \end{funcdesc} -\begin{funcdesc}{filter}{function, list} - Construct a list from those elements of \var{list} for which - \var{function} returns true. \var{list} may be either a sequence, a - container which supports iteration, or an iterator, If \var{list} +\begin{funcdesc}{filter}{function, iterable} + Construct a list from those elements of \var{iterable} for which + \var{function} returns true. \var{iterable} may be either a sequence, a + container which supports iteration, or an iterator, If \var{iterable} is a string or a tuple, the result also has that type; otherwise it is always a list. If \var{function} is \code{None}, the identity function is assumed, that is, all elements of - \var{list} that are false are removed. + \var{iterable} that are false are removed. - Note that \code{filter(function, \var{list})} is equivalent to - \code{[item for item in \var{list} if function(item)]} if function is - not \code{None} and \code{[item for item in \var{list} if item]} if + Note that \code{filter(function, \var{iterable})} is equivalent to + \code{[item for item in \var{iterable} if function(item)]} if function is + not \code{None} and \code{[item for item in \var{iterable} if item]} if function is \code{None}. \end{funcdesc} @@ -608,12 +608,12 @@ class C: may be a sequence (string, tuple or list) or a mapping (dictionary). \end{funcdesc} -\begin{funcdesc}{list}{\optional{sequence}} +\begin{funcdesc}{list}{\optional{iterable}} Return a list whose items are the same and in the same order as - \var{sequence}'s items. \var{sequence} may be either a sequence, a + \var{iterable}'s items. \var{iterable} may be either a sequence, a container that supports iteration, or an iterator object. If - \var{sequence} is already a list, a copy is made and returned, - similar to \code{\var{sequence}[:]}. For instance, + \var{iterable} is already a list, a copy is made and returned, + similar to \code{\var{iterable}[:]}. For instance, \code{list('abc')} returns \code{['a', 'b', 'c']} and \code{list( (1, 2, 3) )} returns \code{[1, 2, 3]}. If no argument is given, returns a new empty list, \code{[]}. @@ -639,22 +639,22 @@ class C: are given, returns \code{0L}. \end{funcdesc} -\begin{funcdesc}{map}{function, list, ...} - Apply \var{function} to every item of \var{list} and return a list - of the results. If additional \var{list} arguments are passed, +\begin{funcdesc}{map}{function, iterable, ...} + Apply \var{function} to every item of \var{iterable} and return a list + of the results. If additional \var{iterable} arguments are passed, \var{function} must take that many arguments and is applied to the - items of all lists in parallel; if a list is shorter than another it + items from all iterables in parallel. If one iterable is shorter than another it is assumed to be extended with \code{None} items. If \var{function} is \code{None}, the identity function is assumed; if there are - multiple list arguments, \function{map()} returns a list consisting - of tuples containing the corresponding items from all lists (a kind - of transpose operation). The \var{list} arguments may be any kind - of sequence; the result is always a list. + multiple arguments, \function{map()} returns a list consisting + of tuples containing the corresponding items from all iterables (a kind + of transpose operation). The \var{iterable} arguments may be a sequence + or any iterable object; the result is always a list. \end{funcdesc} -\begin{funcdesc}{max}{s\optional{, args...}\optional{key}} - With a single argument \var{s}, return the largest item of a - non-empty sequence (such as a string, tuple or list). With more +\begin{funcdesc}{max}{iterable\optional{, args...}\optional{key}} + With a single argument \var{iterable}, return the largest item of a + non-empty iterable (such as a string, tuple or list). With more than one argument, return the largest of the arguments. The optional \var{key} argument specifies a one-argument ordering @@ -664,16 +664,16 @@ class C: \versionchanged[Added support for the optional \var{key} argument]{2.5} \end{funcdesc} -\begin{funcdesc}{min}{s\optional{, args...}\optional{key}} - With a single argument \var{s}, return the smallest item of a - non-empty sequence (such as a string, tuple or list). With more +\begin{funcdesc}{min}{iterable\optional{, args...}\optional{key}} + With a single argument \var{iterable}, return the smallest item of a + non-empty iterable (such as a string, tuple or list). With more than one argument, return the smallest of the arguments. The optional \var{key} argument specifies a one-argument ordering function like that used for \method{list.sort()}. The \var{key} argument, if supplied, must be in keyword form (for example, \samp{min(a,b,c,key=func)}). - \versionchanged[Added support for the optional \var{key} argument]{2.5} + \versionchanged[Added support for the optional \var{key} argument]{2.5} \end{funcdesc} \begin{funcdesc}{object}{} @@ -1073,11 +1073,11 @@ class C: string, \code{''}. \end{funcdesc} -\begin{funcdesc}{sum}{sequence\optional{, start}} - Sums \var{start} and the items of a \var{sequence}, from left to - right, and returns the total. \var{start} defaults to \code{0}. - The \var{sequence}'s items are normally numbers, and are not allowed - to be strings. The fast, correct way to concatenate sequence of +\begin{funcdesc}{sum}{iterable\optional{, start}} + Sums \var{start} and the items of an \var{iterable} from left to + right and returns the total. \var{start} defaults to \code{0}. + The \var{iterable}'s items are normally numbers, and are not allowed + to be strings. The fast, correct way to concatenate a sequence of strings is by calling \code{''.join(\var{sequence})}. \versionadded{2.3} \end{funcdesc} @@ -1105,11 +1105,11 @@ class C(B): \versionadded{2.2} \end{funcdesc} -\begin{funcdesc}{tuple}{\optional{sequence}} +\begin{funcdesc}{tuple}{\optional{iterable}} Return a tuple whose items are the same and in the same order as - \var{sequence}'s items. \var{sequence} may be a sequence, a + \var{iterable}'s items. \var{iterable} may be a sequence, a container that supports iteration, or an iterator object. - If \var{sequence} is already a tuple, it + If \var{iterable} is already a tuple, it is returned unchanged. For instance, \code{tuple('abc')} returns \code{('a', 'b', 'c')} and \code{tuple([1, 2, 3])} returns \code{(1, 2, 3)}. If no argument is given, returns a new empty diff --git a/Doc/lib/liblogging.tex b/Doc/lib/liblogging.tex index e01fe0beb72..b97854d4e72 100644 --- a/Doc/lib/liblogging.tex +++ b/Doc/lib/liblogging.tex @@ -516,8 +516,10 @@ Removes the specified handler \var{hdlr} from this logger. \end{methoddesc} \begin{methoddesc}{findCaller}{} -Finds the caller's source filename and line number. Returns the filename -and line number as a 2-element tuple. +Finds the caller's source filename and line number. Returns the filename, +line number and function name as a 3-element tuple. +\versionchanged[The function name was added. In earlier versions, the +filename and line number were returned as a 2-element tuple.]{2.5} \end{methoddesc} \begin{methoddesc}{handle}{record} diff --git a/Doc/lib/libmmap.tex b/Doc/lib/libmmap.tex index 3dca40f6c4b..3763d4f84b1 100644 --- a/Doc/lib/libmmap.tex +++ b/Doc/lib/libmmap.tex @@ -140,8 +140,9 @@ Memory-mapped file objects support the following methods: \begin{methoddesc}{seek}{pos\optional{, whence}} Set the file's current position. \var{whence} argument is optional - and defaults to \code{0} (absolute file positioning); other values - are \code{1} (seek relative to the current position) and \code{2} + and defaults to \code{os.SEEK_SET} or \code{0} (absolute file + positioning); other values are \code{os.SEEK_CUR} or \code{1} (seek + relative to the current position) and \code{os.SEEK_END} or \code{2} (seek relative to the file's end). \end{methoddesc} diff --git a/Doc/lib/libsimplexmlrpc.tex b/Doc/lib/libsimplexmlrpc.tex index 7a9786125d5..6b458558cd5 100644 --- a/Doc/lib/libsimplexmlrpc.tex +++ b/Doc/lib/libsimplexmlrpc.tex @@ -15,7 +15,7 @@ CGI environment, using \class{CGIXMLRPCRequestHandler}. \begin{classdesc}{SimpleXMLRPCServer}{addr\optional{, requestHandler\optional{, - logRequests\optional{allow_none\optional{, encoding}}}}} + logRequests\optional{, allow_none\optional{, encoding}}}}} Create a new server instance. This class provides methods for registration of functions that can be called by diff --git a/Doc/lib/libsocket.tex b/Doc/lib/libsocket.tex index f510fd40c0a..f20c56c5a3c 100644 --- a/Doc/lib/libsocket.tex +++ b/Doc/lib/libsocket.tex @@ -569,11 +569,32 @@ at once is specified by \var{bufsize}. See the \UNIX{} manual page Receive data from the socket. The return value is a pair \code{(\var{string}, \var{address})} where \var{string} is a string representing the data received and \var{address} is the address of the -socket sending the data. The optional \var{flags} argument has the -same meaning as for \method{recv()} above. +socket sending the data. See the \UNIX{} manual page +\manpage{recv}{2} for the meaning of the optional argument +\var{flags}; it defaults to zero. (The format of \var{address} depends on the address family --- see above.) \end{methoddesc} +\begin{methoddesc}[socket]{recvfrom_into}{buffer\optional{, nbytes\optional{, flags}}} +Receive data from the socket, writing it into \var{buffer} instead of +creating a new string. The return value is a pair +\code{(\var{nbytes}, \var{address})} where \var{nbytes} is the number +of bytes received and \var{address} is the address of the socket +sending the data. See the \UNIX{} manual page +\manpage{recv}{2} for the meaning of the optional argument +\var{flags}; it defaults to zero. (The format of \var{address} +depends on the address family --- see above.) +\end{methoddesc} + +\begin{methoddesc}[socket]{recv_into}{buffer\optional{, nbytes\optional{, flags}}} +Receive up to \var{nbytes} bytes from the socket, +storing the data into a buffer rather than creating a new string. +If \var{nbytes} is not specified (or 0), +receive up to the size available in the given buffer. +See the \UNIX{} manual page \manpage{recv}{2} for the meaning of the +optional argument \var{flags}; it defaults to zero. +\end{methoddesc} + \begin{methoddesc}[socket]{send}{string\optional{, flags}} Send data to the socket. The socket must be connected to a remote socket. The optional \var{flags} argument has the same meaning as for diff --git a/Doc/lib/libsqlite3.tex b/Doc/lib/libsqlite3.tex index 82416fa53d9..aeb60c1ac68 100644 --- a/Doc/lib/libsqlite3.tex +++ b/Doc/lib/libsqlite3.tex @@ -187,12 +187,12 @@ int, long, float, str (UTF-8 encoded), unicode or buffer. \end{funcdesc} \begin{funcdesc}{complete_statement}{sql} -Returns \constant{True} if the string \var{sql} one or more complete SQL -statements terminated by semicolons. It does not verify if the SQL is -syntactically correct, only if there are no unclosed string literals and if the +Returns \constant{True} if the string \var{sql} contains one or more complete SQL +statements terminated by semicolons. It does not verify that the SQL is +syntactically correct, only that there are no unclosed string literals and the statement is terminated by a semicolon. -This can be used to build a shell for SQLite, like in the following example: +This can be used to build a shell for SQLite, as in the following example: \verbatiminput{sqlite3/complete_statement.py} \end{funcdesc} diff --git a/Doc/lib/libstdtypes.tex b/Doc/lib/libstdtypes.tex index b433bc44861..8a527f60497 100644 --- a/Doc/lib/libstdtypes.tex +++ b/Doc/lib/libstdtypes.tex @@ -1660,9 +1660,12 @@ flush the read-ahead buffer. \begin{methoddesc}[file]{seek}{offset\optional{, whence}} Set the file's current position, like \code{stdio}'s \cfunction{fseek()}. - The \var{whence} argument is optional and defaults to \code{0} - (absolute file positioning); other values are \code{1} (seek - relative to the current position) and \code{2} (seek relative to the + The \var{whence} argument is optional and defaults to + \code{os.SEEK_SET} or \code{0} + (absolute file positioning); other values are \code{os.SEEK_CUR} or \code{1} + (seek + relative to the current position) and \code{os.SEEK_END} or \code{2} + (seek relative to the file's end). There is no return value. Note that if the file is opened for appending (mode \code{'a'} or \code{'a+'}), any \method{seek()} operations will be undone at the next write. If the diff --git a/Doc/lib/libtarfile.tex b/Doc/lib/libtarfile.tex index ca6e65a8ad5..5f277dafef4 100644 --- a/Doc/lib/libtarfile.tex +++ b/Doc/lib/libtarfile.tex @@ -124,6 +124,11 @@ Some facts and figures: only if \member{TarFile.errorlevel}\code{ == 2}. \end{excdesc} +\begin{excdesc}{HeaderError} + Is raised by \method{frombuf()} if the buffer it gets is invalid. + \versionadded{2.6} +\end{excdesc} + \begin{seealso} \seemodule{zipfile}{Documentation of the \refmodule{zipfile} standard module.} @@ -332,6 +337,8 @@ the file's data itself. \begin{methoddesc}{frombuf}{} Create and return a \class{TarInfo} object from a string buffer. + \versionadded[Raises \exception{HeaderError} if the buffer is + invalid.]{2.6} \end{methoddesc} \begin{methoddesc}{tobuf}{posix} diff --git a/Doc/lib/libtest.tex b/Doc/lib/libtest.tex index 54a24b1c98e..f89c70723c9 100644 --- a/Doc/lib/libtest.tex +++ b/Doc/lib/libtest.tex @@ -263,6 +263,12 @@ If no match is found \var{filename} is returned. This does not equal a failure since it could be the path to the file. \end{funcdesc} +\begin{funcdesc}{guard_warnings_filter}{} +Returns a context manager that guards the \module{warnings} module's +filter settings. +\versionadded{2.6} +\end{funcdesc} + \begin{funcdesc}{run_unittest}{*classes} Execute \class{unittest.TestCase} subclasses passed to the function. The function scans the classes for methods starting with the prefix @@ -275,4 +281,22 @@ Execute the \class{unittest.TestSuite} instance \var{suite}. The optional argument \var{testclass} accepts one of the test classes in the suite so as to print out more detailed information on where the testing suite originated from. + +The \module{test.test_support} module defines the following classes: + +\begin{classdesc}{EnvironmentVarGuard}{} +Class used to temporarily set or unset environment variables. Instances can be +used as a context manager. +\versionadded{2.6} +\end{classdesc} + +\begin{methoddesc}{set}{envvar, value} +Temporarily set the environment variable \code{envvar} to the value of +\code{value}. +\end{methoddesc} + +\begin{methoddesc}{unset}{envvar} +Temporarily unset the environment variable \code{envvar}. +\end{methoddesc} + \end{funcdesc} diff --git a/Doc/lib/liburlparse.tex b/Doc/lib/liburlparse.tex index 86036056e97..0473aed0198 100644 --- a/Doc/lib/liburlparse.tex +++ b/Doc/lib/liburlparse.tex @@ -89,7 +89,7 @@ information on the result object. \begin{funcdesc}{urlunparse}{parts} Construct a URL from a tuple as returned by \code{urlparse()}. -The \var{parts} argument be any six-item iterable. +The \var{parts} argument can be any six-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent). @@ -133,7 +133,7 @@ information on the result object. \begin{funcdesc}{urlunsplit}{parts} Combine the elements of a tuple as returned by \function{urlsplit()} into a complete URL as a string. -The \var{parts} argument be any five-item iterable. +The \var{parts} argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent). diff --git a/Doc/mac/toolbox.tex b/Doc/mac/toolbox.tex index 9fbcb8440d4..e7ce24f0a73 100644 --- a/Doc/mac/toolbox.tex +++ b/Doc/mac/toolbox.tex @@ -65,7 +65,7 @@ only partially. \modulesynopsis{Interface to the Component Manager.} \section{\module{Carbon.CarbonEvt} --- Carbon Event Manager} -\declaremodule{standard}{Carbon.CaronEvt} +\declaremodule{standard}{Carbon.CarbonEvt} \platform{Mac} \modulesynopsis{Interface to the Carbon Event Manager.} diff --git a/Doc/whatsnew/whatsnew25.tex b/Doc/whatsnew/whatsnew25.tex index fb68acc03c7..fce392736e9 100644 --- a/Doc/whatsnew/whatsnew25.tex +++ b/Doc/whatsnew/whatsnew25.tex @@ -5,7 +5,7 @@ % Fix XXX comments \title{What's New in Python 2.5} -\release{1.0} +\release{1.01} \author{A.M. Kuchling} \authoraddress{\email{amk@amk.ca}} @@ -556,13 +556,14 @@ generators: where the generator's execution is paused. \item \method{close()} raises a new \exception{GeneratorExit} - exception inside the generator to terminate the iteration. - On receiving this - exception, the generator's code must either raise - \exception{GeneratorExit} or \exception{StopIteration}; catching the - exception and doing anything else is illegal and will trigger - a \exception{RuntimeError}. \method{close()} will also be called by - Python's garbage collector when the generator is garbage-collected. + exception inside the generator to terminate the iteration. On + receiving this exception, the generator's code must either raise + \exception{GeneratorExit} or \exception{StopIteration}. Catching + the \exception{GeneratorExit} exception and returning a value is + illegal and will trigger a \exception{RuntimeError}; if the function + raises some other exception, that exception is propagated to the + caller. \method{close()} will also be called by Python's garbage + collector when the generator is garbage-collected. If you need to run cleanup code when a \exception{GeneratorExit} occurs, I suggest using a \code{try: ... finally:} suite instead of @@ -1663,6 +1664,13 @@ single number as \file{pystone.py} does. \item The \module{pyexpat} module now uses version 2.0 of the Expat parser. (Contributed by Trent Mick.) +\item The \class{Queue} class provided by the \module{Queue} module +gained two new methods. \method{join()} blocks until all items in +the queue have been retrieved and all processing work on the items +have been completed. Worker threads call the other new method, +\method{task_done()}, to signal that processing for an item has been +completed. (Contributed by Raymond Hettinger.) + \item The old \module{regex} and \module{regsub} modules, which have been deprecated ever since Python 2.0, have finally been deleted. Other deleted modules: \module{statcache}, \module{tzparse}, diff --git a/LICENSE b/LICENSE index 5affefc9fcd..0ae414c9eb5 100644 --- a/LICENSE +++ b/LICENSE @@ -88,9 +88,9 @@ license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) -2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights -Reserved" are retained in Python alone or in any derivative version -prepared by Licensee. +2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative +version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make diff --git a/Lib/CGIHTTPServer.py b/Lib/CGIHTTPServer.py index 7a5c8190d82..c119c9a6930 100644 --- a/Lib/CGIHTTPServer.py +++ b/Lib/CGIHTTPServer.py @@ -105,17 +105,36 @@ class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def run_cgi(self): """Execute a CGI script.""" + path = self.path dir, rest = self.cgi_info + + i = path.find('/', len(dir) + 1) + while i >= 0: + nextdir = path[:i] + nextrest = path[i+1:] + + scriptdir = self.translate_path(nextdir) + if os.path.isdir(scriptdir): + dir, rest = nextdir, nextrest + i = path.find('/', len(dir) + 1) + else: + break + + # find an explicit query string, if present. i = rest.rfind('?') if i >= 0: rest, query = rest[:i], rest[i+1:] else: query = '' + + # dissect the part after the directory name into a script name & + # a possible additional path, to be stored in PATH_INFO. i = rest.find('/') if i >= 0: script, rest = rest[:i], rest[i:] else: script, rest = rest, '' + scriptname = dir + '/' + script scriptfile = self.translate_path(scriptname) if not os.path.exists(scriptfile): diff --git a/Lib/SimpleHTTPServer.py b/Lib/SimpleHTTPServer.py index fae551a5659..86c669ea409 100644 --- a/Lib/SimpleHTTPServer.py +++ b/Lib/SimpleHTTPServer.py @@ -66,6 +66,12 @@ class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): path = self.translate_path(self.path) f = None if os.path.isdir(path): + if not self.path.endswith('/'): + # redirect browser - doing basically what apache does + self.send_response(301) + self.send_header("Location", self.path + "/") + self.end_headers() + return None for index in "index.html", "index.htm": index = os.path.join(path, index) if os.path.exists(index): diff --git a/Lib/StringIO.py b/Lib/StringIO.py index 7d57d80947f..9394360fa78 100644 --- a/Lib/StringIO.py +++ b/Lib/StringIO.py @@ -139,7 +139,7 @@ class StringIO: return r def readline(self, length=None): - """Read one entire line from the file. + r"""Read one entire line from the file. A trailing newline character is kept in the string (but may be absent when a file ends with an incomplete line). If the size argument is diff --git a/Lib/bsddb/dbobj.py b/Lib/bsddb/dbobj.py index 346c1adc6fe..987f7735f50 100644 --- a/Lib/bsddb/dbobj.py +++ b/Lib/bsddb/dbobj.py @@ -55,8 +55,9 @@ class DBEnv: return self._cobj.set_lg_max(*args, **kwargs) def set_lk_detect(self, *args, **kwargs): return self._cobj.set_lk_detect(*args, **kwargs) - def set_lk_max(self, *args, **kwargs): - return self._cobj.set_lk_max(*args, **kwargs) + if db.version() < (4,5): + def set_lk_max(self, *args, **kwargs): + return self._cobj.set_lk_max(*args, **kwargs) def set_lk_max_locks(self, *args, **kwargs): return self._cobj.set_lk_max_locks(*args, **kwargs) def set_lk_max_lockers(self, *args, **kwargs): diff --git a/Lib/bsddb/test/test_1413192.py b/Lib/bsddb/test/test_1413192.py index 3c135365c95..436f407867d 100644 --- a/Lib/bsddb/test/test_1413192.py +++ b/Lib/bsddb/test/test_1413192.py @@ -14,7 +14,7 @@ except ImportError: env_name = '.' env = db.DBEnv() -env.open(env_name, db.DB_CREATE | db.DB_INIT_TXN) +env.open(env_name, db.DB_CREATE | db.DB_INIT_TXN | db.DB_INIT_MPOOL) the_txn = env.txn_begin() map = db.DB(env) diff --git a/Lib/bsddb/test/test_associate.py b/Lib/bsddb/test/test_associate.py index 33a78372450..7ae7c53ff80 100644 --- a/Lib/bsddb/test/test_associate.py +++ b/Lib/bsddb/test/test_associate.py @@ -91,7 +91,7 @@ musicdata = { class AssociateErrorTestCase(unittest.TestCase): def setUp(self): self.filename = self.__class__.__name__ + '.db' - homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + homeDir = os.path.join(tempfile.gettempdir(), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) diff --git a/Lib/bsddb/test/test_basics.py b/Lib/bsddb/test/test_basics.py index e0452df92f9..48ecdb95c8f 100644 --- a/Lib/bsddb/test/test_basics.py +++ b/Lib/bsddb/test/test_basics.py @@ -54,7 +54,7 @@ class BasicTestCase(unittest.TestCase): def setUp(self): if self.useEnv: - homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + homeDir = os.path.join(tempfile.gettempdir(), 'db_home') self.homeDir = homeDir try: shutil.rmtree(homeDir) diff --git a/Lib/bsddb/test/test_dbobj.py b/Lib/bsddb/test/test_dbobj.py index bba6a5b6a1f..b15de2f0ef9 100644 --- a/Lib/bsddb/test/test_dbobj.py +++ b/Lib/bsddb/test/test_dbobj.py @@ -2,6 +2,7 @@ import sys, os, string import unittest import glob +import tempfile try: # For Pythons w/distutils pybsddb @@ -19,7 +20,7 @@ class dbobjTestCase(unittest.TestCase): db_name = 'test-dbobj.db' def setUp(self): - homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + homeDir = os.path.join(tempfile.gettempdir(), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except os.error: pass diff --git a/Lib/bsddb/test/test_dbshelve.py b/Lib/bsddb/test/test_dbshelve.py index 374ccd8c60f..1da6546a55a 100644 --- a/Lib/bsddb/test/test_dbshelve.py +++ b/Lib/bsddb/test/test_dbshelve.py @@ -242,7 +242,7 @@ class ThreadHashShelveTestCase(BasicShelveTestCase): class BasicEnvShelveTestCase(DBShelveTestCase): def do_open(self): self.homeDir = homeDir = os.path.join( - os.path.dirname(sys.argv[0]), 'db_home') + tempfile.gettempdir(), 'db_home') try: os.mkdir(homeDir) except os.error: pass self.env = db.DBEnv() diff --git a/Lib/bsddb/test/test_dbtables.py b/Lib/bsddb/test/test_dbtables.py index 2ff93a3ef12..a31fcec910c 100644 --- a/Lib/bsddb/test/test_dbtables.py +++ b/Lib/bsddb/test/test_dbtables.py @@ -26,6 +26,7 @@ try: pickle = cPickle except ImportError: import pickle +import tempfile import unittest from .test_all import verbose @@ -46,7 +47,7 @@ class TableDBTestCase(unittest.TestCase): db_name = 'test-table.db' def setUp(self): - homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + homeDir = os.path.join(tempfile.gettempdir(), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except os.error: pass diff --git a/Lib/bsddb/test/test_env_close.py b/Lib/bsddb/test/test_env_close.py index 43dcabe92b8..12e103746af 100644 --- a/Lib/bsddb/test/test_env_close.py +++ b/Lib/bsddb/test/test_env_close.py @@ -33,7 +33,7 @@ else: class DBEnvClosedEarlyCrash(unittest.TestCase): def setUp(self): - self.homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = os.path.join(tempfile.gettempdir(), 'db_home') try: os.mkdir(self.homeDir) except os.error: pass tempfile.tempdir = self.homeDir diff --git a/Lib/bsddb/test/test_join.py b/Lib/bsddb/test/test_join.py index 6e98b0b621a..5e307aead75 100644 --- a/Lib/bsddb/test/test_join.py +++ b/Lib/bsddb/test/test_join.py @@ -49,7 +49,7 @@ class JoinTestCase(unittest.TestCase): def setUp(self): self.filename = self.__class__.__name__ + '.db' - homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + homeDir = os.path.join(tempfile.gettempdir(), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except os.error: pass diff --git a/Lib/bsddb/test/test_lock.py b/Lib/bsddb/test/test_lock.py index 53f11a82ff8..61bdae8b760 100644 --- a/Lib/bsddb/test/test_lock.py +++ b/Lib/bsddb/test/test_lock.py @@ -30,7 +30,7 @@ except ImportError: class LockingTestCase(unittest.TestCase): def setUp(self): - homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + homeDir = os.path.join(tempfile.gettempdir(), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except os.error: pass diff --git a/Lib/bsddb/test/test_misc.py b/Lib/bsddb/test/test_misc.py index 88f700b4697..6b2df073c9f 100644 --- a/Lib/bsddb/test/test_misc.py +++ b/Lib/bsddb/test/test_misc.py @@ -4,6 +4,7 @@ import os import sys import unittest +import tempfile try: # For Pythons w/distutils pybsddb @@ -17,7 +18,7 @@ except ImportError: class MiscTestCase(unittest.TestCase): def setUp(self): self.filename = self.__class__.__name__ + '.db' - homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + homeDir = os.path.join(tempfile.gettempdir(), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) diff --git a/Lib/bsddb/test/test_recno.py b/Lib/bsddb/test/test_recno.py index e325aac89a7..35399b517b5 100644 --- a/Lib/bsddb/test/test_recno.py +++ b/Lib/bsddb/test/test_recno.py @@ -203,10 +203,10 @@ class SimpleRecnoTestCase(unittest.TestCase): just a line in the file, but you can set a different record delimiter if needed. """ - source = os.path.join(os.path.dirname(sys.argv[0]), - 'db_home/test_recno.txt') - if not os.path.isdir('db_home'): - os.mkdir('db_home') + homeDir = os.path.join(tempfile.gettempdir(), 'db_home') + source = os.path.join(homeDir, 'test_recno.txt') + if not os.path.isdir(homeDir): + os.mkdir(homeDir) f = open(source, 'w') # create the file f.close() diff --git a/Lib/bsddb/test/test_thread.py b/Lib/bsddb/test/test_thread.py index 6942aa222cc..bf19d21e6d6 100644 --- a/Lib/bsddb/test/test_thread.py +++ b/Lib/bsddb/test/test_thread.py @@ -53,7 +53,7 @@ class BaseThreadedTestCase(unittest.TestCase): if verbose: dbutils._deadlock_VerboseFile = sys.stdout - homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + homeDir = os.path.join(tempfile.gettempdir(), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) diff --git a/Lib/cookielib.py b/Lib/cookielib.py index e8fee0ee6b4..ce037b0fcd2 100644 --- a/Lib/cookielib.py +++ b/Lib/cookielib.py @@ -1316,26 +1316,28 @@ class CookieJar: """ _debug("add_cookie_header") self._cookies_lock.acquire() + try: - self._policy._now = self._now = int(time.time()) + self._policy._now = self._now = int(time.time()) - cookies = self._cookies_for_request(request) + cookies = self._cookies_for_request(request) - attrs = self._cookie_attrs(cookies) - if attrs: - if not request.has_header("Cookie"): - request.add_unredirected_header( - "Cookie", "; ".join(attrs)) + attrs = self._cookie_attrs(cookies) + if attrs: + if not request.has_header("Cookie"): + request.add_unredirected_header( + "Cookie", "; ".join(attrs)) - # if necessary, advertise that we know RFC 2965 - if (self._policy.rfc2965 and not self._policy.hide_cookie2 and - not request.has_header("Cookie2")): - for cookie in cookies: - if cookie.version != 1: - request.add_unredirected_header("Cookie2", '$Version="1"') - break - - self._cookies_lock.release() + # if necessary, advertise that we know RFC 2965 + if (self._policy.rfc2965 and not self._policy.hide_cookie2 and + not request.has_header("Cookie2")): + for cookie in cookies: + if cookie.version != 1: + request.add_unredirected_header("Cookie2", '$Version="1"') + break + + finally: + self._cookies_lock.release() self.clear_expired_cookies() @@ -1602,12 +1604,15 @@ class CookieJar: def set_cookie_if_ok(self, cookie, request): """Set a cookie if policy says it's OK to do so.""" self._cookies_lock.acquire() - self._policy._now = self._now = int(time.time()) + try: + self._policy._now = self._now = int(time.time()) - if self._policy.set_ok(cookie, request): - self.set_cookie(cookie) + if self._policy.set_ok(cookie, request): + self.set_cookie(cookie) + - self._cookies_lock.release() + finally: + self._cookies_lock.release() def set_cookie(self, cookie): """Set a cookie, without checking whether or not it should be set.""" @@ -1626,13 +1631,15 @@ class CookieJar: """Extract cookies from response, where allowable given the request.""" _debug("extract_cookies: %s", response.info()) self._cookies_lock.acquire() - self._policy._now = self._now = int(time.time()) + try: + self._policy._now = self._now = int(time.time()) - for cookie in self.make_cookies(response, request): - if self._policy.set_ok(cookie, request): - _debug(" setting cookie: %s", cookie) - self.set_cookie(cookie) - self._cookies_lock.release() + for cookie in self.make_cookies(response, request): + if self._policy.set_ok(cookie, request): + _debug(" setting cookie: %s", cookie) + self.set_cookie(cookie) + finally: + self._cookies_lock.release() def clear(self, domain=None, path=None, name=None): """Clear some cookies. @@ -1669,10 +1676,12 @@ class CookieJar: """ self._cookies_lock.acquire() - for cookie in self: - if cookie.discard: - self.clear(cookie.domain, cookie.path, cookie.name) - self._cookies_lock.release() + try: + for cookie in self: + if cookie.discard: + self.clear(cookie.domain, cookie.path, cookie.name) + finally: + self._cookies_lock.release() def clear_expired_cookies(self): """Discard all expired cookies. @@ -1685,11 +1694,13 @@ class CookieJar: """ self._cookies_lock.acquire() - now = time.time() - for cookie in self: - if cookie.is_expired(now): - self.clear(cookie.domain, cookie.path, cookie.name) - self._cookies_lock.release() + try: + now = time.time() + for cookie in self: + if cookie.is_expired(now): + self.clear(cookie.domain, cookie.path, cookie.name) + finally: + self._cookies_lock.release() def __iter__(self): return deepvalues(self._cookies) @@ -1761,16 +1772,18 @@ class FileCookieJar(CookieJar): else: raise ValueError(MISSING_FILENAME_TEXT) self._cookies_lock.acquire() - - old_state = copy.deepcopy(self._cookies) - self._cookies = {} try: - self.load(filename, ignore_discard, ignore_expires) - except (LoadError, IOError): - self._cookies = old_state - raise - self._cookies_lock.release() + old_state = copy.deepcopy(self._cookies) + self._cookies = {} + try: + self.load(filename, ignore_discard, ignore_expires) + except (LoadError, IOError): + self._cookies = old_state + raise + + finally: + self._cookies_lock.release() from _LWPCookieJar import LWPCookieJar, lwp_cookie_str from _MozillaCookieJar import MozillaCookieJar diff --git a/Lib/difflib.py b/Lib/difflib.py index 408079b8038..831840d44fb 100644 --- a/Lib/difflib.py +++ b/Lib/difflib.py @@ -1310,7 +1310,7 @@ def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK): def _mdiff(fromlines, tolines, context=None, linejunk=None, charjunk=IS_CHARACTER_JUNK): - """Returns generator yielding marked up from/to side by side differences. + r"""Returns generator yielding marked up from/to side by side differences. Arguments: fromlines -- list of text lines to compared to tolines diff --git a/Lib/dumbdbm.py b/Lib/dumbdbm.py index e00d9e8382e..ee2f39e20bb 100644 --- a/Lib/dumbdbm.py +++ b/Lib/dumbdbm.py @@ -68,7 +68,8 @@ class _Database(UserDict.DictMixin): try: f = _open(self._datfile, 'r') except IOError: - f = _open(self._datfile, 'w', self._mode) + f = _open(self._datfile, 'w') + self._chmod(self._datfile) f.close() self._update() @@ -106,7 +107,8 @@ class _Database(UserDict.DictMixin): except self._os.error: pass - f = self._open(self._dirfile, 'w', self._mode) + f = self._open(self._dirfile, 'w') + self._chmod(self._dirfile) for key, pos_and_siz_pair in self._index.iteritems(): f.write("%r, %r\n" % (key, pos_and_siz_pair)) f.close() @@ -152,7 +154,8 @@ class _Database(UserDict.DictMixin): # the in-memory index dict, and append one to the directory file. def _addkey(self, key, pos_and_siz_pair): self._index[key] = pos_and_siz_pair - f = _open(self._dirfile, 'a', self._mode) + f = _open(self._dirfile, 'a') + self._chmod(self._dirfile) f.write("%r, %r\n" % (key, pos_and_siz_pair)) f.close() @@ -211,6 +214,9 @@ class _Database(UserDict.DictMixin): __del__ = close + def _chmod (self, file): + if hasattr(self._os, 'chmod'): + self._os.chmod(file, self._mode) def open(file, flag=None, mode=0666): @@ -227,4 +233,15 @@ def open(file, flag=None, mode=0666): """ # flag argument is currently ignored + + # Modify mode depending on the umask + try: + um = _os.umask(0) + _os.umask(um) + except AttributeError: + pass + else: + # Turn off any bits that are set in the umask + mode = mode & (~um) + return _Database(file, mode) diff --git a/Lib/heapq.py b/Lib/heapq.py index 04725cdef46..753c3b7ec19 100644 --- a/Lib/heapq.py +++ b/Lib/heapq.py @@ -130,7 +130,7 @@ __all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'nlargest', 'nsmallest'] from itertools import islice, repeat, count, imap, izip, tee -from operator import itemgetter +from operator import itemgetter, neg import bisect def heappush(heap, item): @@ -315,8 +315,6 @@ def nsmallest(n, iterable, key=None): Equivalent to: sorted(iterable, key=key)[:n] """ - if key is None: - return _nsmallest(n, iterable) in1, in2 = tee(iterable) it = izip(imap(key, in1), count(), in2) # decorate result = _nsmallest(n, it) @@ -328,10 +326,8 @@ def nlargest(n, iterable, key=None): Equivalent to: sorted(iterable, key=key, reverse=True)[:n] """ - if key is None: - return _nlargest(n, iterable) in1, in2 = tee(iterable) - it = izip(imap(key, in1), count(), in2) # decorate + it = izip(imap(key, in1), imap(neg, count()), in2) # decorate result = _nlargest(n, it) return map(itemgetter(2), result) # undecorate diff --git a/Lib/hmac.py b/Lib/hmac.py index 41d6c6cbd71..88c3fd5377a 100644 --- a/Lib/hmac.py +++ b/Lib/hmac.py @@ -3,13 +3,11 @@ Implements the HMAC algorithm as described by RFC 2104. """ -def _strxor(s1, s2): - """Utility method. XOR the two strings s1 and s2 (must have same length). - """ - return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2)) +trans_5C = "".join ([chr (x ^ 0x5C) for x in xrange(256)]) +trans_36 = "".join ([chr (x ^ 0x36) for x in xrange(256)]) # The size of the digests returned by HMAC depends on the underlying -# hashing module used. +# hashing module used. Use digest_size from the instance of HMAC instead. digest_size = None # A unique object passed by HMAC.copy() to the HMAC constructor, in order @@ -22,6 +20,7 @@ class HMAC: This supports the API for Cryptographic Hash Functions (PEP 247). """ + blocksize = 64 # 512-bit HMAC; can be changed in subclasses. def __init__(self, key, msg = None, digestmod = None): """Create a new HMAC object. @@ -49,16 +48,13 @@ class HMAC: self.inner = self.digest_cons() self.digest_size = self.inner.digest_size - blocksize = 64 - ipad = "\x36" * blocksize - opad = "\x5C" * blocksize - + blocksize = self.blocksize if len(key) > blocksize: key = self.digest_cons(key).digest() key = key + chr(0) * (blocksize - len(key)) - self.outer.update(_strxor(key, opad)) - self.inner.update(_strxor(key, ipad)) + self.outer.update(key.translate(trans_5C)) + self.inner.update(key.translate(trans_36)) if msg is not None: self.update(msg) @@ -75,13 +71,22 @@ class HMAC: An update to this copy won't affect the original object. """ - other = HMAC(_secret_backdoor_key) + other = self.__class__(_secret_backdoor_key) other.digest_cons = self.digest_cons other.digest_size = self.digest_size other.inner = self.inner.copy() other.outer = self.outer.copy() return other + def _current(self): + """Return a hash object for the current state. + + To be used only internally with digest() and hexdigest(). + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h + def digest(self): """Return the hash value of this hashing object. @@ -89,15 +94,14 @@ class HMAC: not altered in any way by this function; you can continue updating the object after calling this function. """ - h = self.outer.copy() - h.update(self.inner.digest()) + h = self._current() return h.digest() def hexdigest(self): """Like digest(), but returns a string of hexadecimal digits instead. """ - return "".join([hex(ord(x))[2:].zfill(2) - for x in tuple(self.digest())]) + h = self._current() + return h.hexdigest() def new(key, msg = None, digestmod = None): """Create a new hashing object and return it. diff --git a/Lib/idlelib/AutoCompleteWindow.py b/Lib/idlelib/AutoCompleteWindow.py index 8bed0344e99..7f8adafd1c0 100644 --- a/Lib/idlelib/AutoCompleteWindow.py +++ b/Lib/idlelib/AutoCompleteWindow.py @@ -118,8 +118,11 @@ class AutoCompleteWindow: i = 0 while i < len(lts) and i < len(selstart) and lts[i] == selstart[i]: i += 1 - while cursel > 0 and selstart[:i] <= self.completions[cursel-1]: + previous_completion = self.completions[cursel - 1] + while cursel > 0 and selstart[:i] <= previous_completion: i += 1 + if selstart == previous_completion: + break # maybe we have a duplicate? newstart = selstart[:i] self._change_start(newstart) diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt index 43e5b452aa0..5f73a698a05 100644 --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -3,9 +3,14 @@ What's New in IDLE 2.6a1? *Release date: XX-XXX-200X* +- Avoid hang when encountering a duplicate in a completion list. Bug 1571112. + - Patch #1362975: Rework CodeContext indentation algorithm to avoid hard-coding pixel widths. +- Bug #813342: Start the IDLE subprocess with -Qnew if the parent + is started with that option. + - Some syntax errors were being caught by tokenize during the tabnanny check, resulting in obscure error messages. Do the syntax check first. Bug 1562716, 1562719 @@ -14,6 +19,12 @@ What's New in IDLE 2.6a1? the Python release of which it's a part. +What's New in IDLE 1.2? +======================= + +*Release date: 19-SEP-2006* + + What's New in IDLE 1.2c1? ========================= @@ -44,6 +55,13 @@ What's New in IDLE 1.2b3? *Release date: 03-AUG-2006* +- Bug #1525817: Don't truncate short lines in IDLE's tool tips. + +- Bug #1517990: IDLE keybindings on MacOS X now work correctly + +- Bug #1517996: IDLE now longer shows the default Tk menu when a + path browser, class browser or debugger is the frontmost window on MacOS X + - EditorWindow.test() was failing. Bug 1417598 - EditorWindow failed when used stand-alone if sys.ps1 not set. @@ -80,6 +98,8 @@ What's New in IDLE 1.2a1? *Release date: 05-APR-2006* +- Patch #1162825: Support non-ASCII characters in IDLE window titles. + - Source file f.flush() after writing; trying to avoid lossage if user kills GUI. diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py index b57a9af3689..615dcc7470a 100644 --- a/Lib/logging/__init__.py +++ b/Lib/logging/__init__.py @@ -41,8 +41,8 @@ except ImportError: __author__ = "Vinay Sajip " __status__ = "production" -__version__ = "0.4.9.9" -__date__ = "06 February 2006" +__version__ = "0.5.0.0" +__date__ = "08 January 2007" #--------------------------------------------------------------------------- # Miscellaneous module data @@ -243,7 +243,7 @@ class LogRecord: try: self.filename = os.path.basename(pathname) self.module = os.path.splitext(self.filename)[0] - except: + except (TypeError, ValueError, AttributeError): self.filename = pathname self.module = "Unknown module" self.exc_info = exc_info diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py index 17eca8a0c43..82896ada2dc 100644 --- a/Lib/logging/handlers.py +++ b/Lib/logging/handlers.py @@ -347,7 +347,7 @@ class SocketHandler(logging.Handler): try: self.sock = self.makeSocket() self.retryTime = None # next time, no delay before trying - except: + except socket.error: #Creation failed, so set the retry time and return. if self.retryTime is None: self.retryPeriod = self.retryStart @@ -738,7 +738,7 @@ class SMTPHandler(logging.Handler): import smtplib try: from email.Utils import formatdate - except: + except ImportError: formatdate = self.date_time port = self.mailport if not port: diff --git a/Lib/mailbox.py b/Lib/mailbox.py index 084343086c4..8d1df4ba4e6 100755 --- a/Lib/mailbox.py +++ b/Lib/mailbox.py @@ -510,6 +510,7 @@ class _singlefileMailbox(Mailbox): self._next_key = 0 self._pending = False # No changes require rewriting the file. self._locked = False + self._file_length = None # Used to record mailbox size def add(self, message): """Add message and return assigned key.""" @@ -563,7 +564,21 @@ class _singlefileMailbox(Mailbox): """Write any pending changes to disk.""" if not self._pending: return - self._lookup() + + # In order to be writing anything out at all, self._toc must + # already have been generated (and presumably has been modified + # by adding or deleting an item). + assert self._toc is not None + + # Check length of self._file; if it's changed, some other process + # has modified the mailbox since we scanned it. + self._file.seek(0, 2) + cur_len = self._file.tell() + if cur_len != self._file_length: + raise ExternalClashError('Size of mailbox file changed ' + '(expected %i, found %i)' % + (self._file_length, cur_len)) + new_file = _create_temporary(self._path) try: new_toc = {} @@ -639,6 +654,7 @@ class _singlefileMailbox(Mailbox): offsets = self._install_message(message) self._post_message_hook(self._file) self._file.flush() + self._file_length = self._file.tell() # Record current length of mailbox return offsets @@ -730,6 +746,7 @@ class mbox(_mboxMMDF): break self._toc = dict(enumerate(zip(starts, stops))) self._next_key = len(self._toc) + self._file_length = self._file.tell() class MMDF(_mboxMMDF): @@ -773,6 +790,8 @@ class MMDF(_mboxMMDF): break self._toc = dict(enumerate(zip(starts, stops))) self._next_key = len(self._toc) + self._file.seek(0, 2) + self._file_length = self._file.tell() class MH(Mailbox): @@ -1198,7 +1217,9 @@ class Babyl(_singlefileMailbox): self._toc = dict(enumerate(zip(starts, stops))) self._labels = dict(enumerate(label_lists)) self._next_key = len(self._toc) - + self._file.seek(0, 2) + self._file_length = self._file.tell() + def _pre_mailbox_hook(self, f): """Called before writing the mailbox to file f.""" f.write('BABYL OPTIONS:%sVersion: 5%sLabels:%s%s\037' % @@ -1884,7 +1905,8 @@ def _create_temporary(path): def _sync_flush(f): """Ensure changes to file f are physically on disk.""" f.flush() - os.fsync(f.fileno()) + if hasattr(os, 'fsync'): + os.fsync(f.fileno()) def _sync_close(f): """Close file f, ensuring all changes are physically on disk.""" diff --git a/Lib/pty.py b/Lib/pty.py index 889113c3dd5..d3eb64f0a09 100644 --- a/Lib/pty.py +++ b/Lib/pty.py @@ -121,7 +121,9 @@ def fork(): # Explicitly open the tty to make it become a controlling tty. tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR) os.close(tmp_fd) - + else: + os.close(slave_fd) + # Parent and child process. return pid, master_fd diff --git a/Lib/pydoc.py b/Lib/pydoc.py index 3459fbe267c..d1ab01c214b 100755 --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -1741,6 +1741,9 @@ Here is a list of available topics. Enter any topic name to get more help. Sorry, topic and keyword documentation is not available because the Python HTML documentation files could not be found. If you have installed them, please set the environment variable PYTHONDOCS to indicate their location. + +On the Microsoft Windows operating system, the files can be built by +running "hh -decompile . PythonNN.chm" in the C:\PythonNN\Doc> directory. ''') return target = self.topics.get(topic, self.keywords.get(topic)) diff --git a/Lib/random.py b/Lib/random.py index ae2d434b311..b80f1a1c3f0 100644 --- a/Lib/random.py +++ b/Lib/random.py @@ -205,7 +205,7 @@ class Random(_random.Random): raise ValueError, "empty range for randrange()" if n >= maxwidth: - return istart + self._randbelow(n) + return istart + istep*self._randbelow(n) return istart + istep*int(self.random() * n) def randint(self, a, b): diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py index 319bf43b33f..e63f2acbb40 100644 --- a/Lib/sre_parse.py +++ b/Lib/sre_parse.py @@ -134,6 +134,8 @@ class SubPattern: def __delitem__(self, index): del self.data[index] def __getitem__(self, index): + if isinstance(index, slice): + return SubPattern(self.pattern, self.data[index]) return self.data[index] def __setitem__(self, index, code): self.data[index] = code diff --git a/Lib/subprocess.py b/Lib/subprocess.py index 68ab05e4864..62b70baaa95 100644 --- a/Lib/subprocess.py +++ b/Lib/subprocess.py @@ -166,7 +166,7 @@ wait() communicate(input=None) Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to - terminate. The optional stdin argument should be a string to be + terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. @@ -1005,8 +1005,12 @@ class Popen(object): # Close pipe fds. Make sure we don't close the same # fd more than once, or standard fds. - for fd in set((p2cread, c2pwrite, errwrite))-set((0,1,2)): - if fd: os.close(fd) + if p2cread and p2cread not in (0,): + os.close(p2cread) + if c2pwrite and c2pwrite not in (p2cread, 1): + os.close(c2pwrite) + if errwrite and errwrite not in (p2cread, c2pwrite, 2): + os.close(errwrite) # Close all other fds, if asked for if close_fds: @@ -1108,6 +1112,7 @@ class Popen(object): read_set.append(self.stderr) stderr = [] + input_offset = 0 while read_set or write_set: rlist, wlist, xlist = select.select(read_set, write_set, []) @@ -1115,9 +1120,9 @@ class Popen(object): # When select has indicated that the file is writable, # we can write up to PIPE_BUF bytes without risk # blocking. POSIX defines PIPE_BUF >= 512 - bytes_written = os.write(self.stdin.fileno(), input[:512]) - input = input[bytes_written:] - if not input: + bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512)) + input_offset += bytes_written + if input_offset >= len(input): self.stdin.close() write_set.remove(self.stdin) diff --git a/Lib/tarfile.py b/Lib/tarfile.py index 14553a776d3..3ffdff360b4 100644 --- a/Lib/tarfile.py +++ b/Lib/tarfile.py @@ -147,7 +147,10 @@ def nti(s): # There are two possible encodings for a number field, see # itn() below. if s[0] != chr(0200): - n = int(s.rstrip(NUL + " ") or "0", 8) + try: + n = int(s.rstrip(NUL + " ") or "0", 8) + except ValueError: + raise HeaderError("invalid header") else: n = 0L for i in xrange(len(s) - 1): @@ -282,6 +285,9 @@ class CompressionError(TarError): class StreamError(TarError): """Exception for unsupported operations on stream-like TarFiles.""" pass +class HeaderError(TarError): + """Exception for invalid headers.""" + pass #--------------------------- # internal stream interface @@ -624,64 +630,158 @@ class _BZ2Proxy(object): #------------------------ # Extraction file object #------------------------ -class ExFileObject(object): - """File-like object for reading an archive member. - Is returned by TarFile.extractfile(). Support for - sparse files included. +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. """ - def __init__(self, tarfile, tarinfo): - self.fileobj = tarfile.fileobj - self.name = tarinfo.name - self.mode = "r" - self.closed = False - self.offset = tarinfo.offset_data - self.size = tarinfo.size - self.pos = 0L - self.linebuffer = "" - if tarinfo.issparse(): - self.sparse = tarinfo.sparse - self.read = self._readsparse - else: - self.read = self._readnormal + def __init__(self, fileobj, offset, size, sparse=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.sparse = sparse + self.position = 0 - def __read(self, size): - """Overloadable read method. + def tell(self): + """Return the current file position. """ + return self.position + + def seek(self, position): + """Seek to a position in the file. + """ + self.position = position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + if self.sparse is None: + return self.readnormal(size) + else: + return self.readsparse(size) + + def readnormal(self, size): + """Read operation for regular files. + """ + self.fileobj.seek(self.offset + self.position) + self.position += size return self.fileobj.read(size) - def readline(self, size=-1): - """Read a line with approx. size. If size is negative, - read a whole line. readline() and read() must not - be mixed up (!). + def readsparse(self, size): + """Read operation for sparse files. """ - if size < 0: - size = sys.maxint + data = [] + while size > 0: + buf = self.readsparsesection(size) + if not buf: + break + size -= len(buf) + data.append(buf) + return "".join(data) - nl = self.linebuffer.find("\n") - if nl >= 0: - nl = min(nl, size) + def readsparsesection(self, size): + """Read a single section of a sparse file. + """ + section = self.sparse.find(self.position) + + if section is None: + return "" + + size = min(size, section.offset + section.size - self.position) + + if isinstance(section, _data): + realpos = section.realpos + self.position - section.offset + self.fileobj.seek(self.offset + realpos) + self.position += size + return self.fileobj.read(size) else: - size -= len(self.linebuffer) - while (nl < 0 and size > 0): - buf = self.read(min(size, 100)) - if not buf: + self.position += size + return NUL * size +#class _FileInFile + + +class ExFileObject(object): + """File-like object for reading an archive member. + Is returned by TarFile.extractfile(). + """ + blocksize = 1024 + + def __init__(self, tarfile, tarinfo): + self.fileobj = _FileInFile(tarfile.fileobj, + tarinfo.offset_data, + tarinfo.size, + getattr(tarinfo, "sparse", None)) + self.name = tarinfo.name + self.mode = "r" + self.closed = False + self.size = tarinfo.size + + self.position = 0 + self.buffer = "" + + def read(self, size=None): + """Read at most size bytes from the file. If size is not + present or None, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + buf = "" + if self.buffer: + if size is None: + buf = self.buffer + self.buffer = "" + else: + buf = self.buffer[:size] + self.buffer = self.buffer[size:] + + if size is None: + buf += self.fileobj.read() + else: + buf += self.fileobj.read(size - len(buf)) + + self.position += len(buf) + return buf + + def readline(self, size=-1): + """Read one entire line from the file. If size is present + and non-negative, return a string with at most that + size, which may be an incomplete line. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + if "\n" in self.buffer: + pos = self.buffer.find("\n") + 1 + else: + buffers = [self.buffer] + while True: + buf = self.fileobj.read(self.blocksize) + buffers.append(buf) + if not buf or "\n" in buf: + self.buffer = "".join(buffers) + pos = self.buffer.find("\n") + 1 + if pos == 0: + # no newline found. + pos = len(self.buffer) break - self.linebuffer += buf - size -= len(buf) - nl = self.linebuffer.find("\n") - if nl == -1: - s = self.linebuffer - self.linebuffer = "" - return s - buf = self.linebuffer[:nl] - self.linebuffer = self.linebuffer[nl + 1:] - while buf[-1:] == "\r": - buf = buf[:-1] - return buf + "\n" + + if size != -1: + pos = min(size, pos) + + buf = self.buffer[:pos] + self.buffer = self.buffer[pos:] + self.position += len(buf) + return buf def readlines(self): - """Return a list with all (following) lines. + """Return a list with all remaining lines. """ result = [] while True: @@ -690,74 +790,34 @@ class ExFileObject(object): result.append(line) return result - def _readnormal(self, size=None): - """Read operation for regular files. - """ - if self.closed: - raise ValueError("file is closed") - self.fileobj.seek(self.offset + self.pos) - bytesleft = self.size - self.pos - if size is None: - bytestoread = bytesleft - else: - bytestoread = min(size, bytesleft) - self.pos += bytestoread - return self.__read(bytestoread) - - def _readsparse(self, size=None): - """Read operation for sparse files. - """ - if self.closed: - raise ValueError("file is closed") - - if size is None: - size = self.size - self.pos - - data = [] - while size > 0: - buf = self._readsparsesection(size) - if not buf: - break - size -= len(buf) - data.append(buf) - return "".join(data) - - def _readsparsesection(self, size): - """Read a single section of a sparse file. - """ - section = self.sparse.find(self.pos) - - if section is None: - return "" - - toread = min(size, section.offset + section.size - self.pos) - if isinstance(section, _data): - realpos = section.realpos + self.pos - section.offset - self.pos += toread - self.fileobj.seek(self.offset + realpos) - return self.__read(toread) - else: - self.pos += toread - return NUL * toread - def tell(self): """Return the current file position. """ - return self.pos + if self.closed: + raise ValueError("I/O operation on closed file") - def seek(self, pos, whence=0): + return self.position + + def seek(self, pos, whence=os.SEEK_SET): """Seek to a position in the file. """ - self.linebuffer = "" - if whence == 0: - self.pos = min(max(pos, 0), self.size) - if whence == 1: + if self.closed: + raise ValueError("I/O operation on closed file") + + if whence == os.SEEK_SET: + self.position = min(max(pos, 0), self.size) + elif whence == os.SEEK_CUR: if pos < 0: - self.pos = max(self.pos + pos, 0) + self.position = max(self.position + pos, 0) else: - self.pos = min(self.pos + pos, self.size) - if whence == 2: - self.pos = max(min(self.size + pos, self.size), 0) + self.position = min(self.position + pos, self.size) + elif whence == os.SEEK_END: + self.position = max(min(self.size + pos, self.size), 0) + else: + raise ValueError("Invalid argument") + + self.buffer = "" + self.fileobj.seek(self.position) def close(self): """Close the file object. @@ -765,20 +825,13 @@ class ExFileObject(object): self.closed = True def __iter__(self): - """Get an iterator over the file object. + """Get an iterator over the file's lines. """ - if self.closed: - raise ValueError("I/O operation on closed file") - return self - - def next(self): - """Get the next item from the file iterator. - """ - result = self.readline() - if not result: - raise StopIteration - return result - + while True: + line = self.readline() + if not line: + break + yield line #class ExFileObject #------------------ @@ -821,9 +874,13 @@ class TarInfo(object): """Construct a TarInfo object from a 512 byte string buffer. """ if len(buf) != BLOCKSIZE: - raise ValueError("truncated header") + raise HeaderError("truncated header") if buf.count(NUL) == BLOCKSIZE: - raise ValueError("empty header") + raise HeaderError("empty header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise HeaderError("bad checksum") tarinfo = cls() tarinfo.buf = buf @@ -833,7 +890,7 @@ class TarInfo(object): tarinfo.gid = nti(buf[116:124]) tarinfo.size = nti(buf[124:136]) tarinfo.mtime = nti(buf[136:148]) - tarinfo.chksum = nti(buf[148:156]) + tarinfo.chksum = chksum tarinfo.type = buf[156:157] tarinfo.linkname = buf[157:257].rstrip(NUL) tarinfo.uname = buf[265:297].rstrip(NUL) @@ -845,8 +902,6 @@ class TarInfo(object): if prefix and not tarinfo.issparse(): tarinfo.name = prefix + "/" + tarinfo.name - if tarinfo.chksum not in calc_chksums(buf): - raise ValueError("invalid header") return tarinfo def tobuf(self, posix=False): @@ -999,7 +1054,7 @@ class TarFile(object): can be determined, `mode' is overridden by `fileobj's mode. `fileobj' is not closed, when TarFile is closed. """ - self.name = name + self.name = os.path.abspath(name) if len(mode) > 1 or mode not in "raw": raise ValueError("mode must be 'r', 'a' or 'w'") @@ -1011,7 +1066,7 @@ class TarFile(object): self._extfileobj = False else: if self.name is None and hasattr(fileobj, "name"): - self.name = fileobj.name + self.name = os.path.abspath(fileobj.name) if hasattr(fileobj, "mode"): self.mode = fileobj.mode self._extfileobj = True @@ -1088,9 +1143,13 @@ class TarFile(object): # Find out which *open() is appropriate for opening the file. for comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() try: return func(name, "r", fileobj) except (ReadError, CompressionError): + if fileobj is not None: + fileobj.seek(saved_pos) continue raise ReadError("file could not be opened successfully") @@ -1147,24 +1206,12 @@ class TarFile(object): except (ImportError, AttributeError): raise CompressionError("gzip module is not available") - pre, ext = os.path.splitext(name) - pre = os.path.basename(pre) - if ext == ".tgz": - ext = ".tar" - if ext == ".gz": - ext = "" - tarname = pre + ext - if fileobj is None: fileobj = _open(name, mode + "b") - if mode != "r": - name = tarname - try: - t = cls.taropen(tarname, mode, - gzip.GzipFile(name, mode, compresslevel, fileobj) - ) + t = cls.taropen(name, mode, + gzip.GzipFile(name, mode, compresslevel, fileobj)) except IOError: raise ReadError("not a gzip file") t._extfileobj = False @@ -1183,21 +1230,13 @@ class TarFile(object): except ImportError: raise CompressionError("bz2 module is not available") - pre, ext = os.path.splitext(name) - pre = os.path.basename(pre) - if ext == ".tbz2": - ext = ".tar" - if ext == ".bz2": - ext = "" - tarname = pre + ext - if fileobj is not None: fileobj = _BZ2Proxy(fileobj, mode) else: fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) try: - t = cls.taropen(tarname, mode, fileobj) + t = cls.taropen(name, mode, fileobj) except IOError: raise ReadError("not a bzip2 file") t._extfileobj = False @@ -1402,8 +1441,7 @@ class TarFile(object): arcname = name # Skip if somebody tries to archive the archive... - if self.name is not None \ - and os.path.abspath(name) == os.path.abspath(self.name): + if self.name is not None and os.path.abspath(name) == self.name: self._dbg(2, "tarfile: Skipped %r" % name) return @@ -1795,16 +1833,14 @@ class TarFile(object): tarinfo = self.proc_member(tarinfo) - except ValueError, e: + except HeaderError, e: if self.ignore_zeros: - self._dbg(2, "0x%X: empty or invalid block: %s" % - (self.offset, e)) + self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue else: if self.offset == 0: - raise ReadError("empty, unreadable or compressed " - "file: %s" % e) + raise ReadError(str(e)) return None break diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py index 73ef2d444e0..b517daa75aa 100644 --- a/Lib/test/test_compile.py +++ b/Lib/test/test_compile.py @@ -1,5 +1,4 @@ import unittest -import warnings import sys from test import test_support diff --git a/Lib/test/test_deque.py b/Lib/test/test_deque.py index 4c5d1ee5ae9..56031a7c2a6 100644 --- a/Lib/test/test_deque.py +++ b/Lib/test/test_deque.py @@ -396,6 +396,12 @@ class TestVariousIteratorArgs(unittest.TestCase): d.pop() self.assertRaises(RuntimeError, it.next) + def test_runtime_error_on_empty_deque(self): + d = deque() + it = iter(d) + d.append(10) + self.assertRaises(RuntimeError, it.next) + class Deque(deque): pass diff --git a/Lib/test/test_dumbdbm.py b/Lib/test/test_dumbdbm.py index 63b14b011b8..e5dfe1d7e78 100644 --- a/Lib/test/test_dumbdbm.py +++ b/Lib/test/test_dumbdbm.py @@ -38,6 +38,24 @@ class DumbDBMTestCase(unittest.TestCase): self.read_helper(f) f.close() + def test_dumbdbm_creation_mode(self): + # On platforms without chmod, don't do anything. + if not (hasattr(os, 'chmod') and hasattr(os, 'umask')): + return + + try: + old_umask = os.umask(0002) + f = dumbdbm.open(_fname, 'c', 0637) + f.close() + finally: + os.umask(old_umask) + + import stat + st = os.stat(_fname + '.dat') + self.assertEqual(stat.S_IMODE(st.st_mode), 0635) + st = os.stat(_fname + '.dir') + self.assertEqual(stat.S_IMODE(st.st_mode), 0635) + def test_close_twice(self): f = dumbdbm.open(_fname) f['a'] = 'b' diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py index abce41eef71..7619aae3a27 100644 --- a/Lib/test/test_exceptions.py +++ b/Lib/test/test_exceptions.py @@ -3,7 +3,6 @@ import os import sys import unittest -import warnings import pickle try: import cPickle diff --git a/Lib/test/test_heapq.py b/Lib/test/test_heapq.py index 191644986b3..b652d41fab8 100644 --- a/Lib/test/test_heapq.py +++ b/Lib/test/test_heapq.py @@ -104,20 +104,20 @@ class TestHeap(unittest.TestCase): self.assertEqual(heap_sorted, sorted(data)) def test_nsmallest(self): - data = [random.randrange(2000) for i in range(1000)] - f = lambda x: x * 547 % 2000 - for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100): - self.assertEqual(nsmallest(n, data), sorted(data)[:n]) - self.assertEqual(nsmallest(n, data, key=f), - sorted(data, key=f)[:n]) + data = [(random.randrange(2000), i) for i in range(1000)] + for f in (None, lambda x: x[0] * 547 % 2000): + for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100): + self.assertEqual(nsmallest(n, data), sorted(data)[:n]) + self.assertEqual(nsmallest(n, data, key=f), + sorted(data, key=f)[:n]) def test_nlargest(self): - data = [random.randrange(2000) for i in range(1000)] - f = lambda x: x * 547 % 2000 - for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100): - self.assertEqual(nlargest(n, data), sorted(data, reverse=True)[:n]) - self.assertEqual(nlargest(n, data, key=f), - sorted(data, key=f, reverse=True)[:n]) + data = [(random.randrange(2000), i) for i in range(1000)] + for f in (None, lambda x: x[0] * 547 % 2000): + for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100): + self.assertEqual(nlargest(n, data), sorted(data, reverse=True)[:n]) + self.assertEqual(nlargest(n, data, key=f), + sorted(data, key=f, reverse=True)[:n]) #============================================================================== diff --git a/Lib/test/test_import.py b/Lib/test/test_import.py index e37378f06cb..58de944b85a 100644 --- a/Lib/test/test_import.py +++ b/Lib/test/test_import.py @@ -1,10 +1,11 @@ -from test.test_support import TESTFN, run_unittest +from test.test_support import TESTFN, run_unittest, guard_warnings_filter import unittest import os import random import sys import py_compile +import warnings def remove_files(name): @@ -204,15 +205,11 @@ class ImportTest(unittest.TestCase): self.assert_(y is test.test_support, y.__name__) def test_import_initless_directory_warning(self): - import warnings - oldfilters = warnings.filters[:] - warnings.simplefilter('error', ImportWarning); - try: + with guard_warnings_filter(): # Just a random non-package directory we always expect to be # somewhere in sys.path... + warnings.simplefilter('error', ImportWarning) self.assertRaises(ImportWarning, __import__, "site-packages") - finally: - warnings.filters = oldfilters def test_main(verbose=None): run_unittest(ImportTest) diff --git a/Lib/test/test_pty.py b/Lib/test/test_pty.py index 59e51627ddc..8a83e397860 100644 --- a/Lib/test/test_pty.py +++ b/Lib/test/test_pty.py @@ -115,6 +115,12 @@ if pid == pty.CHILD: os._exit(4) else: debug("Waiting for child (%d) to finish."%pid) + ##line = os.read(master_fd, 80) + ##lines = line.replace('\r\n', '\n').split('\n') + ##if False and lines != ['In child, calling os.setsid()', + ## 'Good: OSError was raised.', '']: + ## raise TestFailed("Unexpected output from child: %r" % line) + (pid, status) = os.waitpid(pid, 0) res = status >> 8 debug("Child (%d) exited with status %d (%d)."%(pid, res, status)) @@ -127,6 +133,15 @@ else: elif res != 4: raise TestFailed, "pty.fork() failed for unknown reasons." + ##debug("Reading from master_fd now that the child has exited") + ##try: + ## s1 = os.read(master_fd, 1024) + ##except os.error: + ## pass + ##else: + ## raise TestFailed("Read from master_fd did not raise exception") + + os.close(master_fd) # pty.fork() passed. diff --git a/Lib/test/test_random.py b/Lib/test/test_random.py index afcf113564d..7ec130d85aa 100644 --- a/Lib/test/test_random.py +++ b/Lib/test/test_random.py @@ -180,10 +180,9 @@ class WichmannHill_TestBasicOps(TestBasicOps): def test_bigrand(self): # Verify warnings are raised when randrange is too large for random() - oldfilters = warnings.filters[:] - warnings.filterwarnings("error", "Underlying random") - self.assertRaises(UserWarning, self.gen.randrange, 2**60) - warnings.filters[:] = oldfilters + with test_support.guard_warnings_filter(): + warnings.filterwarnings("error", "Underlying random") + self.assertRaises(UserWarning, self.gen.randrange, 2**60) class SystemRandom_TestBasicOps(TestBasicOps): gen = random.SystemRandom() @@ -441,6 +440,14 @@ class MersenneTwister_TestBasicOps(TestBasicOps): self.assertEqual(k, numbits) # note the stronger assertion self.assert_(2**k > n > 2**(k-1)) # note the stronger assertion + def test_randrange_bug_1590891(self): + start = 1000000000000 + stop = -100000000000000000000 + step = -200 + x = self.gen.randrange(start, stop, step) + self.assert_(stop < x <= start) + self.assertEqual((x+stop)%step, 0) + _gammacoeff = (0.9999999999995183, 676.5203681218835, -1259.139216722289, 771.3234287757674, -176.6150291498386, 12.50734324009056, -0.1385710331296526, 0.9934937113930748e-05, 0.1659470187408462e-06) diff --git a/Lib/test/test_repr.py b/Lib/test/test_repr.py index 1dfa2821104..823298bfe59 100644 --- a/Lib/test/test_repr.py +++ b/Lib/test/test_repr.py @@ -136,7 +136,6 @@ class ReprTests(unittest.TestCase): '> fo, s, + fo.close() + fo = open(test_support.TESTFN, "rb") + self.assertEqual(fo.read(), repr(s)) + finally: + fo.close() + os.remove(test_support.TESTFN) + class TestSet(TestJointOps): thetype = set diff --git a/Lib/test/test_struct.py b/Lib/test/test_struct.py index 302698ba3a1..d4744dde790 100644 --- a/Lib/test/test_struct.py +++ b/Lib/test/test_struct.py @@ -50,22 +50,17 @@ def any_err(func, *args): def with_warning_restore(func): def _with_warning_restore(*args, **kw): - # The `warnings` module doesn't have an advertised way to restore - # its filter list. Cheat. - save_warnings_filters = warnings.filters[:] - # Grrr, we need this function to warn every time. Without removing - # the warningregistry, running test_tarfile then test_struct would fail - # on 64-bit platforms. - globals = func.func_globals - if '__warningregistry__' in globals: - del globals['__warningregistry__'] - warnings.filterwarnings("error", r"""^struct.*""", DeprecationWarning) - warnings.filterwarnings("error", r""".*format requires.*""", - DeprecationWarning) - try: + with test.test_support.guard_warnings_filter(): + # Grrr, we need this function to warn every time. Without removing + # the warningregistry, running test_tarfile then test_struct would fail + # on 64-bit platforms. + globals = func.func_globals + if '__warningregistry__' in globals: + del globals['__warningregistry__'] + warnings.filterwarnings("error", r"""^struct.*""", DeprecationWarning) + warnings.filterwarnings("error", r""".*format requires.*""", + DeprecationWarning) return func(*args, **kw) - finally: - warnings.filters[:] = save_warnings_filters[:] return _with_warning_restore def deprecated_err(func, *args): diff --git a/Lib/test/test_support.py b/Lib/test/test_support.py index 2829c5566d3..2c196984805 100644 --- a/Lib/test/test_support.py +++ b/Lib/test/test_support.py @@ -3,7 +3,9 @@ if __name__ != 'test.test_support': raise ImportError, 'test_support must be imported from the test package' +from contextlib import contextmanager import sys +import warnings class Error(Exception): """Base class for regression test exceptions.""" @@ -267,6 +269,48 @@ def open_urlresource(url): print >> get_original_stdout(), '\tfetching %s ...' % url fn, _ = urllib.urlretrieve(url, filename) return open(fn) + +@contextmanager +def guard_warnings_filter(): + """Guard the warnings filter from being permanently changed.""" + original_filters = warnings.filters[:] + try: + yield + finally: + warnings.filters = original_filters + +class EnvironmentVarGuard(object): + + """Class to help protect the environment variable properly. Can be used as + a context manager.""" + + def __init__(self): + from os import environ + self._environ = environ + self._unset = set() + self._reset = dict() + + def set(self, envvar, value): + if envvar not in self._environ: + self._unset.add(envvar) + else: + self._reset[envvar] = self._environ[envvar] + self._environ[envvar] = value + + def unset(self, envvar): + if envvar in self._environ: + self._reset[envvar] = self._environ[envvar] + del self._environ[envvar] + + def __enter__(self): + return self + + def __exit__(self, *ignore_exc): + for envvar, value in self._reset.iteritems(): + self._environ[envvar] = value + for unset in self._unset: + del self._environ[unset] + #======================================================================= # Decorator for running a function in a different locale, correctly resetting diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py index 0cebb299096..2b39715e98b 100644 --- a/Lib/test/test_tarfile.py +++ b/Lib/test/test_tarfile.py @@ -110,7 +110,7 @@ class ReadTest(BaseTest): """Test seek() method of _FileObject, incl. random reading. """ if self.sep != "|": - filename = "0-REGTYPE" + filename = "0-REGTYPE-TEXT" self.tar.extract(filename, dirname()) f = open(os.path.join(dirname(), filename), "rb") data = f.read() @@ -149,6 +149,16 @@ class ReadTest(BaseTest): s2 = fobj.readlines() self.assert_(s1 == s2, "readlines() after seek failed") + fobj.seek(0) + self.assert_(len(fobj.readline()) == fobj.tell(), + "tell() after readline() failed") + fobj.seek(512) + self.assert_(len(fobj.readline()) + 512 == fobj.tell(), + "tell() after seek() and readline() failed") + fobj.seek(0) + line = fobj.readline() + self.assert_(fobj.read() == data[len(line):], + "read() after readline() failed") fobj.close() def test_old_dirtype(self): @@ -280,6 +290,20 @@ class WriteTest(BaseTest): else: self.dst.addfile(tarinfo, f) + def test_add_self(self): + dstname = os.path.abspath(self.dstname) + + self.assertEqual(self.dst.name, dstname, "archive name must be absolute") + + self.dst.add(dstname) + self.assertEqual(self.dst.getnames(), [], "added the archive to itself") + + cwd = os.getcwd() + os.chdir(dirname()) + self.dst.add(dstname) + os.chdir(cwd) + self.assertEqual(self.dst.getnames(), [], "added the archive to itself") + class Write100Test(BaseTest): # The name field in a tar header stores strings of at most 100 chars. @@ -601,6 +625,38 @@ class FileModeTest(unittest.TestCase): self.assertEqual(tarfile.filemode(0755), '-rwxr-xr-x') self.assertEqual(tarfile.filemode(07111), '---s--s--t') +class HeaderErrorTest(unittest.TestCase): + + def test_truncated_header(self): + self.assertRaises(tarfile.HeaderError, tarfile.TarInfo.frombuf, "") + self.assertRaises(tarfile.HeaderError, tarfile.TarInfo.frombuf, "filename\0") + self.assertRaises(tarfile.HeaderError, tarfile.TarInfo.frombuf, "\0" * 511) + self.assertRaises(tarfile.HeaderError, tarfile.TarInfo.frombuf, "\0" * 513) + + def test_empty_header(self): + self.assertRaises(tarfile.HeaderError, tarfile.TarInfo.frombuf, "\0" * 512) + + def test_invalid_header(self): + buf = tarfile.TarInfo("filename").tobuf() + buf = buf[:148] + "foo\0\0\0\0\0" + buf[156:] # invalid number field. + self.assertRaises(tarfile.HeaderError, tarfile.TarInfo.frombuf, buf) + + def test_bad_checksum(self): + buf = tarfile.TarInfo("filename").tobuf() + b = buf[:148] + " " + buf[156:] # clear the checksum field. + self.assertRaises(tarfile.HeaderError, tarfile.TarInfo.frombuf, b) + b = "a" + buf[1:] # manipulate the buffer, so checksum won't match. + self.assertRaises(tarfile.HeaderError, tarfile.TarInfo.frombuf, b) + +class OpenFileobjTest(BaseTest): + # Test for SF bug #1496501. + + def test_opener(self): + fobj = StringIO.StringIO("foo\n") + try: + tarfile.open("", "r", fileobj=fobj) + except tarfile.ReadError: + self.assertEqual(fobj.tell(), 0, "fileobj's position has moved") if bz2: # Bzip2 TestCases @@ -646,6 +702,8 @@ def test_main(): tests = [ FileModeTest, + HeaderErrorTest, + OpenFileobjTest, ReadTest, ReadStreamTest, ReadDetectTest, diff --git a/Lib/test/test_uu.py b/Lib/test/test_uu.py index 7786316e9d4..16a55e4959a 100644 --- a/Lib/test/test_uu.py +++ b/Lib/test/test_uu.py @@ -114,11 +114,11 @@ class UUFileTest(unittest.TestCase): def test_encode(self): try: - fin = open(self.tmpin, 'wb') + fin = open(self.tmpin, 'w') fin.write(plaintext) fin.close() - fin = open(self.tmpin, 'rb') + fin = open(self.tmpin, 'r') fout = open(self.tmpout, 'w') uu.encode(fin, fout, self.tmpin, mode=0644) fin.close() @@ -130,7 +130,7 @@ class UUFileTest(unittest.TestCase): self.assertEqual(s, encodedtextwrapped % (0644, self.tmpin)) # in_file and out_file as filenames - uu.encode(self.tmpin, self.tmpout, mode=0644) + uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0644) fout = open(self.tmpout, 'r') s = fout.read() fout.close() @@ -142,11 +142,11 @@ class UUFileTest(unittest.TestCase): def test_decode(self): try: - f = open(self.tmpin, 'wb') + f = open(self.tmpin, 'w') f.write(encodedtextwrapped % (0644, self.tmpout)) f.close() - f = open(self.tmpin, 'rb') + f = open(self.tmpin, 'r') uu.decode(f) f.close() @@ -163,11 +163,11 @@ class UUFileTest(unittest.TestCase): try: f = cStringIO.StringIO(encodedtextwrapped % (0644, self.tmpout)) - f = open(self.tmpin, 'rb') + f = open(self.tmpin, 'r') uu.decode(f) f.close() - f = open(self.tmpin, 'rb') + f = open(self.tmpin, 'r') self.assertRaises(uu.Error, uu.decode, f) f.close() finally: diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py index 1165980c65a..06f4537f448 100644 --- a/Lib/test/test_weakref.py +++ b/Lib/test/test_weakref.py @@ -189,7 +189,7 @@ class ReferencesTestCase(TestBase): # None as the value for the callback, where either means "no # callback". The "no callback" ref and proxy objects are supposed # to be shared so long as they exist by all callers so long as - # they are active. In Python 2.3.3 and earlier, this guaranttee + # they are active. In Python 2.3.3 and earlier, this guarantee # was not honored, and was broken in different ways for # PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.) diff --git a/Lib/threading.py b/Lib/threading.py index 5655dded32d..fecd3cc3041 100644 --- a/Lib/threading.py +++ b/Lib/threading.py @@ -636,13 +636,11 @@ class _MainThread(Thread): _active_limbo_lock.acquire() _active[_get_ident()] = self _active_limbo_lock.release() - import atexit - atexit.register(self.__exitfunc) def _set_daemon(self): return False - def __exitfunc(self): + def _exitfunc(self): self._Thread__stop() t = _pickSomeNonDaemonThread() if t: @@ -715,9 +713,11 @@ def enumerate(): from thread import stack_size -# Create the main thread object +# Create the main thread object, +# and make it available for the interpreter +# (Py_Main) as threading._shutdown. -_MainThread() +_shutdown = _MainThread()._exitfunc # get thread-local implementation, either from the thread # module, or from the python fallback diff --git a/Lib/urllib.py b/Lib/urllib.py index 90f7aa07e89..27ec2c9d7ff 100644 --- a/Lib/urllib.py +++ b/Lib/urllib.py @@ -405,8 +405,8 @@ class URLopener: h.putheader('Content-Length', '%d' % len(data)) else: h.putrequest('GET', selector) - if proxy_auth: h.putheader('Proxy-Authorization: Basic %s' % proxy_auth) - if auth: h.putheader('Authorization: Basic %s' % auth) + if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth) + if auth: h.putheader('Authorization', 'Basic %s' % auth) if realhost: h.putheader('Host', realhost) for args in self.addheaders: h.putheader(*args) h.endheaders() diff --git a/Misc/ACKS b/Misc/ACKS index d5d7675bc25..b1981140b0a 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -285,6 +285,7 @@ Chris Hoffman Albert Hofkamp Jonathan Hogg Gerrit Holl +Shane Holloway Rune Holm Philip Homburg Naofumi Honda diff --git a/Misc/developers.txt b/Misc/developers.txt index c0859081564..2bdab7c3d0a 100644 --- a/Misc/developers.txt +++ b/Misc/developers.txt @@ -17,6 +17,15 @@ the format to accommodate documentation needs as they arise. Permissions History ------------------- +- Josiah Carlson (SF name "josiahcarlson") added to the SourceForge Python + project 06 Jan 2007, by NCN, as a tracker tech. He will maintain asyncore. + +- Collin Winter was given SVN access on 05 Jan 2007 by NCN, for PEP + update access. + +- Lars Gustaebel was given SVN access on 20 Dec 2006 by NCN, for tarfile.py + related work. + - 2006 Summer of Code entries: SoC developers are expected to work primarily in nondist/sandbox or on a branch of their own, and will have their work reviewed before changes are accepted into the trunk. diff --git a/Modules/_bsddb.c b/Modules/_bsddb.c index 68489e9a48e..c3d3415055c 100644 --- a/Modules/_bsddb.c +++ b/Modules/_bsddb.c @@ -98,7 +98,7 @@ #error "eek! DBVER can't handle minor versions > 9" #endif -#define PY_BSDDB_VERSION "4.4.6" +#define PY_BSDDB_VERSION "4.5.0" static char *rcs_id = "$Id$"; @@ -4127,6 +4127,7 @@ DBEnv_set_lk_detect(DBEnvObject* self, PyObject* args) } +#if (DBVER < 45) static PyObject* DBEnv_set_lk_max(DBEnvObject* self, PyObject* args) { @@ -4142,6 +4143,7 @@ DBEnv_set_lk_max(DBEnvObject* self, PyObject* args) RETURN_IF_ERR(); RETURN_NONE(); } +#endif #if (DBVER >= 32) @@ -5231,7 +5233,9 @@ static PyMethodDef DBEnv_methods[] = { {"set_lg_regionmax",(PyCFunction)DBEnv_set_lg_regionmax, METH_VARARGS}, #endif {"set_lk_detect", (PyCFunction)DBEnv_set_lk_detect, METH_VARARGS}, +#if (DBVER < 45) {"set_lk_max", (PyCFunction)DBEnv_set_lk_max, METH_VARARGS}, +#endif #if (DBVER >= 32) {"set_lk_max_locks", (PyCFunction)DBEnv_set_lk_max_locks, METH_VARARGS}, {"set_lk_max_lockers", (PyCFunction)DBEnv_set_lk_max_lockers, METH_VARARGS}, @@ -5833,7 +5837,9 @@ PyMODINIT_FUNC init_bsddb(void) ADD_INT(d, DB_AFTER); ADD_INT(d, DB_APPEND); ADD_INT(d, DB_BEFORE); +#if (DBVER < 45) ADD_INT(d, DB_CACHED_COUNTS); +#endif #if (DBVER >= 41) _addIntToDict(d, "DB_CHECKPOINT", 0); #else @@ -5868,7 +5874,9 @@ PyMODINIT_FUNC init_bsddb(void) ADD_INT(d, DB_POSITION); ADD_INT(d, DB_PREV); ADD_INT(d, DB_PREV_NODUP); +#if (DBVER < 45) ADD_INT(d, DB_RECORDCOUNT); +#endif ADD_INT(d, DB_SET); ADD_INT(d, DB_SET_RANGE); ADD_INT(d, DB_SET_RECNO); diff --git a/Modules/bz2module.c b/Modules/bz2module.c index 5a4e5d99581..3c6daa9eff5 100644 --- a/Modules/bz2module.c +++ b/Modules/bz2module.c @@ -989,7 +989,7 @@ BZ2File_seek(BZ2FileObject *self, PyObject *args) char small_buffer[SMALLCHUNK]; char *buffer = small_buffer; size_t buffersize = SMALLCHUNK; - int bytesread = 0; + Py_off_t bytesread = 0; size_t readsize; int chunksize; int bzerror; diff --git a/Modules/collectionsmodule.c b/Modules/collectionsmodule.c index cb12b3a6a70..ca4b3270594 100644 --- a/Modules/collectionsmodule.c +++ b/Modules/collectionsmodule.c @@ -911,15 +911,14 @@ dequeiter_next(dequeiterobject *it) { PyObject *item; - if (it->counter == 0) - return NULL; - if (it->deque->state != it->state) { it->counter = 0; PyErr_SetString(PyExc_RuntimeError, "deque mutated during iteration"); return NULL; } + if (it->counter == 0) + return NULL; assert (!(it->b == it->deque->rightblock && it->index > it->deque->rightindex)); diff --git a/Modules/main.c b/Modules/main.c index ac6b38dccd0..d604879a28c 100644 --- a/Modules/main.c +++ b/Modules/main.c @@ -175,6 +175,33 @@ static int RunModule(char *module) return 0; } +/* Wait until threading._shutdown completes, provided + the threading module was imported in the first place. + The shutdown routine will wait until all non-daemon + "threading" threads have completed. */ +#include "abstract.h" +static void +WaitForThreadShutdown(void) +{ +#ifdef WITH_THREAD + PyObject *result; + PyThreadState *tstate = PyThreadState_GET(); + PyObject *threading = PyMapping_GetItemString(tstate->interp->modules, + "threading"); + if (threading == NULL) { + /* threading not imported */ + PyErr_Clear(); + return; + } + result = PyObject_CallMethod(threading, "_shutdown", ""); + if (result == NULL) + PyErr_WriteUnraisable(threading); + else + Py_DECREF(result); + Py_DECREF(threading); +#endif +} + /* Main program */ int @@ -483,6 +510,8 @@ Py_Main(int argc, char **argv) /* XXX */ sts = PyRun_AnyFileFlags(stdin, "", &cf) != 0; + WaitForThreadShutdown(); + Py_Finalize(); #ifdef RISCOS if (Py_RISCOSWimpFlag) diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c index c9f0388a302..8ec0ed73ca3 100644 --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -2210,7 +2210,7 @@ The mode and buffersize arguments are as for the built-in open() function."); /* * This is the guts of the recv() and recv_into() methods, which reads into a - * char buffer. If you have any inc/def ref to do to the objects that contain + * char buffer. If you have any inc/dec ref to do to the objects that contain * the buffer, do it in the caller. This function returns the number of bytes * succesfully read. If there was an error, it returns -1. Note that it is * also possible that we return a number of bytes smaller than the request diff --git a/Modules/timemodule.c b/Modules/timemodule.c index 444b739fe72..9ab27247389 100644 --- a/Modules/timemodule.c +++ b/Modules/timemodule.c @@ -659,7 +659,7 @@ void inittimezone(PyObject *m) { time_tzset. In the future, some parts of it can be moved back (for platforms that don't HAVE_WORKING_TZSET, when we know what they are), and the extranious calls to tzset(3) should be removed. - I havn't done this yet, as I don't want to change this code as + I haven't done this yet, as I don't want to change this code as little as possible when introducing the time.tzset and time.tzsetwall methods. This should simply be a method of doing the following once, at the top of this function and removing the call to tzset() from diff --git a/Objects/dictnotes.txt b/Objects/dictnotes.txt index b0e59a7f10f..3b63197e794 100644 --- a/Objects/dictnotes.txt +++ b/Objects/dictnotes.txt @@ -44,7 +44,7 @@ Uniquification d.setdefault(word, []).append(pagenumber) Note, the second example is a use case characterized by a get and set - to the same key. There are similar used cases with a __contains__ + to the same key. There are similar use cases with a __contains__ followed by a get, set, or del to the same key. Part of the justification for d.setdefault is combining the two lookups into one. diff --git a/Objects/setobject.c b/Objects/setobject.c index 0af1f154fef..be829a8611e 100644 --- a/Objects/setobject.c +++ b/Objects/setobject.c @@ -573,8 +573,17 @@ set_tp_print(PySetObject *so, FILE *fp, int flags) char *emit = ""; /* No separator emitted on first pass */ char *separator = ", "; int literalform = 0; + int status = Py_ReprEnter((PyObject*)so); + + if (status != 0) { + if (status < 0) + return status; + fprintf(fp, "%s(...)", so->ob_type->tp_name); + return 0; + } if (!so->used) { + Py_ReprLeave((PyObject*)so); fprintf(fp, "%s()", so->ob_type->tp_name); return 0; } @@ -587,32 +596,44 @@ set_tp_print(PySetObject *so, FILE *fp, int flags) while (set_next(so, &pos, &entry)) { fputs(emit, fp); emit = separator; - if (PyObject_Print(entry->key, fp, 0) != 0) + if (PyObject_Print(entry->key, fp, 0) != 0) { + Py_ReprLeave((PyObject*)so); return -1; + } } if (literalform) fputs("}", fp); else fputs("])", fp); + Py_ReprLeave((PyObject*)so); return 0; } static PyObject * set_repr(PySetObject *so) { - PyObject *keys, *result, *listrepr; + PyObject *keys, *result=NULL, *listrepr; + int status = Py_ReprEnter((PyObject*)so); + + if (status != 0) { + if (status < 0) + return NULL; + return PyString_FromFormat("%s(...)", so->ob_type->tp_name); + } /* shortcut for the empty set */ - if (!so->used) + if (!so->used) { + Py_ReprLeave((PyObject*)so); return PyString_FromFormat("%s()", so->ob_type->tp_name); + } keys = PySequence_List((PyObject *)so); if (keys == NULL) - return NULL; + goto done; listrepr = PyObject_Repr(keys); Py_DECREF(keys); if (listrepr == NULL) - return NULL; + goto done; if (so->ob_type == &PySet_Type) { char *s = PyString_AS_STRING(listrepr); @@ -624,6 +645,8 @@ set_repr(PySetObject *so) PyString_AS_STRING(listrepr)); } Py_DECREF(listrepr); +done: + Py_ReprLeave((PyObject*)so); return result; } diff --git a/PC/python_nt.rc b/PC/python_nt.rc index a4e97fd3b65..6fb73b6412f 100644 --- a/PC/python_nt.rc +++ b/PC/python_nt.rc @@ -61,7 +61,7 @@ BEGIN VALUE "FileDescription", "Python Core\0" VALUE "FileVersion", PYTHON_VERSION VALUE "InternalName", "Python DLL\0" - VALUE "LegalCopyright", "Copyright © 2001-2006 Python Software Foundation. Copyright © 2000 BeOpen.com. Copyright © 1995-2001 CNRI. Copyright © 1991-1995 SMC.\0" + VALUE "LegalCopyright", "Copyright © 2001-2007 Python Software Foundation. Copyright © 2000 BeOpen.com. Copyright © 1995-2001 CNRI. Copyright © 1991-1995 SMC.\0" VALUE "OriginalFilename", PYTHON_DLL_NAME "\0" VALUE "ProductName", "Python\0" VALUE "ProductVersion", PYTHON_VERSION diff --git a/PCbuild/python20.wse b/PCbuild/python20.wse index a62265ad341..fa11af4b3ed 100644 --- a/PCbuild/python20.wse +++ b/PCbuild/python20.wse @@ -24,8 +24,8 @@ item: Global Dialogs Version=8 Version File=2.4a1 Version Description=Python Programming Language - Version Copyright=©2001-2006 Python Software Foundation - Version Company=PythonLabs at Zope Corporation + Version Copyright=©2001-2007 Python Software Foundation + Version Company=Python Software Foundation Crystal Format=10111100101100000010001001001001 Step View=&All Variable Name1=_WISE_ diff --git a/PCbuild8/python20.wse b/PCbuild8/python20.wse index 33a34914a4e..a8d97852724 100644 --- a/PCbuild8/python20.wse +++ b/PCbuild8/python20.wse @@ -24,8 +24,8 @@ item: Global Dialogs Version=8 Version File=2.4a1 Version Description=Python Programming Language - Version Copyright=©2001-2006 Python Software Foundation - Version Company=PythonLabs at Zope Corporation + Version Copyright=©2001-2007 Python Software Foundation + Version Company=Python Software Foundation Crystal Format=10111100101100000010001001001001 Step View=&All Variable Name1=_WISE_ diff --git a/Python/ceval.c b/Python/ceval.c index f5ebb8eded1..978ff618136 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -738,7 +738,16 @@ PyEval_EvalFrameEx(PyFrameObject *f, int throwflag) this wasn't always true before 2.3! PyFrame_New now sets f->f_lasti to -1 (i.e. the index *before* the first instruction) and YIELD_VALUE doesn't fiddle with f_lasti any more. So this - does work. Promise. */ + does work. Promise. + + When the PREDICT() macros are enabled, some opcode pairs follow in + direct succession without updating f->f_lasti. A successful + prediction effectively links the two codes together as if they + were a single new opcode; accordingly,f->f_lasti will point to + the first code in the pair (for instance, GET_ITER followed by + FOR_ITER is effectively a single opcode and f->f_lasti will point + at to the beginning of the combined pair.) + */ next_instr = first_instr + f->f_lasti + 1; stack_pointer = f->f_stacktop; assert(stack_pointer != NULL); diff --git a/Python/errors.c b/Python/errors.c index 28c1ea55a3a..0e565c1cc05 100644 --- a/Python/errors.c +++ b/Python/errors.c @@ -649,7 +649,8 @@ PyErr_WarnEx(PyObject *category, const char *message, Py_ssize_t stack_level) if (warnings_module != NULL) { dict = PyModule_GetDict(warnings_module); - func = PyDict_GetItemString(dict, "warn"); + if (dict != NULL) + func = PyDict_GetItemString(dict, "warn"); } if (func == NULL) { PySys_WriteStderr("warning: %s\n", message); diff --git a/Python/getcopyright.c b/Python/getcopyright.c index 325aee51f99..c10aea452c3 100644 --- a/Python/getcopyright.c +++ b/Python/getcopyright.c @@ -4,7 +4,7 @@ static char cprt[] = "\ -Copyright (c) 2001-2006 Python Software Foundation.\n\ +Copyright (c) 2001-2007 Python Software Foundation.\n\ All Rights Reserved.\n\ \n\ Copyright (c) 2000 BeOpen.com.\n\ diff --git a/README b/README index ed6b1396abb..6eeb05101c4 100644 --- a/README +++ b/README @@ -1,7 +1,8 @@ This is Python 3000 -- unversioned (branched off 2.5 in various beta stages) ================================================================= -Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. +Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 +Python Software Foundation. All rights reserved. Copyright (c) 2000 BeOpen.com. diff --git a/setup.py b/setup.py index 39001071c77..1f529e3af01 100644 --- a/setup.py +++ b/setup.py @@ -606,7 +606,7 @@ class PyBuildExt(build_ext): # a release. Most open source OSes come with one or more # versions of BerkeleyDB already installed. - max_db_ver = (4, 4) + max_db_ver = (4, 5) min_db_ver = (3, 3) db_setup_debug = False # verbose debug prints from this script? @@ -623,7 +623,7 @@ class PyBuildExt(build_ext): '/sw/include/db3', ] # 4.x minor number specific paths - for x in (0,1,2,3,4): + for x in (0,1,2,3,4,5): db_inc_paths.append('/usr/include/db4%d' % x) db_inc_paths.append('/usr/include/db4.%d' % x) db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x) @@ -631,7 +631,7 @@ class PyBuildExt(build_ext): db_inc_paths.append('/pkg/db-4.%d/include' % x) db_inc_paths.append('/opt/db-4.%d/include' % x) # 3.x minor number specific paths - for x in (2,3): + for x in (3,): db_inc_paths.append('/usr/include/db3%d' % x) db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x) db_inc_paths.append('/usr/local/include/db3%d' % x)