2007-04-10 16:01:47 -03:00
|
|
|
"""New I/O library conforming to PEP 3116.
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-11 13:07:50 -03:00
|
|
|
This is a prototype; hopefully eventually some of this will be
|
|
|
|
reimplemented in C.
|
2007-02-27 11:45:13 -04:00
|
|
|
|
2007-04-10 16:01:47 -03:00
|
|
|
Conformance of alternative implementations: all arguments are intended
|
|
|
|
to be positional-only except the arguments of the open() function.
|
|
|
|
Argument names except those of the open() function are not part of the
|
|
|
|
specification. Instance variables and methods whose name starts with
|
|
|
|
a leading underscore are not part of the specification (except "magic"
|
|
|
|
names like __iter__). Only the top-level names listed in the __all__
|
|
|
|
variable are part of the specification.
|
2007-03-15 15:59:31 -03:00
|
|
|
|
2007-04-11 13:07:50 -03:00
|
|
|
XXX edge cases when switching between reading/writing
|
2007-03-15 15:59:31 -03:00
|
|
|
XXX need to support 1 meaning line-buffered
|
2007-04-10 22:09:03 -03:00
|
|
|
XXX whenever an argument is None, use the default value
|
|
|
|
XXX read/write ops should check readable/writable
|
2007-04-12 02:44:49 -03:00
|
|
|
XXX buffered readinto should work with arbitrary buffer objects
|
2007-04-16 23:38:04 -03:00
|
|
|
XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
|
2007-08-27 14:39:33 -03:00
|
|
|
XXX check writable, readable and seekable in appropriate places
|
2007-02-27 01:47:44 -04:00
|
|
|
"""
|
|
|
|
|
2007-02-27 13:19:33 -04:00
|
|
|
__author__ = ("Guido van Rossum <guido@python.org>, "
|
2007-04-06 14:31:18 -03:00
|
|
|
"Mike Verdone <mike.verdone@gmail.com>, "
|
|
|
|
"Mark Russell <mark.russell@zen.co.uk>")
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
|
2007-08-27 14:39:33 -03:00
|
|
|
"BytesIO", "StringIO", "BufferedIOBase",
|
2007-03-06 21:00:12 -04:00
|
|
|
"BufferedReader", "BufferedWriter", "BufferedRWPair",
|
2007-04-09 21:22:16 -03:00
|
|
|
"BufferedRandom", "TextIOBase", "TextIOWrapper"]
|
2007-02-27 01:47:44 -04:00
|
|
|
|
|
|
|
import os
|
2007-08-22 15:14:10 -03:00
|
|
|
import abc
|
2007-04-06 14:31:18 -03:00
|
|
|
import sys
|
|
|
|
import codecs
|
2007-04-09 21:22:16 -03:00
|
|
|
import _fileio
|
2007-04-06 14:31:18 -03:00
|
|
|
import warnings
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-08-27 14:39:33 -03:00
|
|
|
# open() uses st_blksize whenever we can
|
2007-04-08 20:59:06 -03:00
|
|
|
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
|
2007-03-06 21:00:12 -04:00
|
|
|
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
class BlockingIOError(IOError):
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
"""Exception raised when I/O would block on a non-blocking I/O stream."""
|
|
|
|
|
|
|
|
def __init__(self, errno, strerror, characters_written=0):
|
2007-03-06 21:00:12 -04:00
|
|
|
IOError.__init__(self, errno, strerror)
|
|
|
|
self.characters_written = characters_written
|
|
|
|
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-12-03 18:54:21 -04:00
|
|
|
def open(file, mode="r", buffering=None, encoding=None, errors=None,
|
|
|
|
newline=None, closefd=True):
|
2007-10-15 17:52:41 -03:00
|
|
|
r"""Replacement for the built-in open function.
|
2007-02-27 11:45:13 -04:00
|
|
|
|
|
|
|
Args:
|
2007-04-08 20:59:06 -03:00
|
|
|
file: string giving the name of the file to be opened;
|
2007-04-10 22:09:03 -03:00
|
|
|
or integer file descriptor of the file to be wrapped (*).
|
|
|
|
mode: optional mode string; see below.
|
2007-02-27 11:45:13 -04:00
|
|
|
buffering: optional int >= 0 giving the buffer size; values
|
|
|
|
can be: 0 = unbuffered, 1 = line buffered,
|
2007-04-10 22:09:03 -03:00
|
|
|
larger = fully buffered.
|
|
|
|
encoding: optional string giving the text encoding.
|
2007-12-03 18:54:21 -04:00
|
|
|
errors: optional string giving the encoding error handling.
|
2007-08-18 18:39:55 -03:00
|
|
|
newline: optional newlines specifier; must be None, '', '\n', '\r'
|
|
|
|
or '\r\n'; all other values are illegal. It controls the
|
|
|
|
handling of line endings. It works as follows:
|
|
|
|
|
|
|
|
* On input, if `newline` is `None`, universal newlines
|
|
|
|
mode is enabled. Lines in the input can end in `'\n'`,
|
|
|
|
`'\r'`, or `'\r\n'`, and these are translated into
|
|
|
|
`'\n'` before being returned to the caller. If it is
|
|
|
|
`''`, universal newline mode is enabled, but line endings
|
|
|
|
are returned to the caller untranslated. If it has any of
|
|
|
|
the other legal values, input lines are only terminated by
|
|
|
|
the given string, and the line ending is returned to the
|
|
|
|
caller untranslated.
|
|
|
|
|
|
|
|
* On output, if `newline` is `None`, any `'\n'`
|
|
|
|
characters written are translated to the system default
|
|
|
|
line separator, `os.linesep`. If `newline` is `''`,
|
|
|
|
no translation takes place. If `newline` is any of the
|
|
|
|
other legal values, any `'\n'` characters written are
|
|
|
|
translated to the given string.
|
2007-02-27 11:45:13 -04:00
|
|
|
|
2007-10-30 14:27:30 -03:00
|
|
|
closefd: optional argument to keep the underlying file descriptor
|
|
|
|
open when the file is closed. It must not be false when
|
|
|
|
a filename is given.
|
|
|
|
|
2007-04-08 20:59:06 -03:00
|
|
|
(*) If a file descriptor is given, it is closed when the returned
|
2007-10-30 14:42:20 -03:00
|
|
|
I/O object is closed, unless closefd=False is given.
|
2007-04-08 20:59:06 -03:00
|
|
|
|
2007-02-27 11:45:13 -04:00
|
|
|
Mode strings characters:
|
|
|
|
'r': open for reading (default)
|
|
|
|
'w': open for writing, truncating the file first
|
|
|
|
'a': open for writing, appending to the end if the file exists
|
|
|
|
'b': binary mode
|
|
|
|
't': text mode (default)
|
|
|
|
'+': open a disk file for updating (implies reading and writing)
|
2007-04-06 23:59:27 -03:00
|
|
|
'U': universal newline mode (for backwards compatibility)
|
2007-02-27 11:45:13 -04:00
|
|
|
|
|
|
|
Constraints:
|
2007-12-03 18:54:21 -04:00
|
|
|
- encoding or errors must not be given when a binary mode is given
|
2007-02-27 11:45:13 -04:00
|
|
|
- buffering must not be zero when a text mode is given
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Depending on the mode and buffering arguments, either a raw
|
|
|
|
binary stream, a buffered binary stream, or a buffered text
|
|
|
|
stream, open for reading and/or writing.
|
|
|
|
"""
|
2007-10-16 15:12:55 -03:00
|
|
|
if not isinstance(file, (str, int)):
|
2007-08-27 14:39:33 -03:00
|
|
|
raise TypeError("invalid file: %r" % file)
|
2007-10-16 15:12:55 -03:00
|
|
|
if not isinstance(mode, str):
|
2007-08-27 14:39:33 -03:00
|
|
|
raise TypeError("invalid mode: %r" % mode)
|
|
|
|
if buffering is not None and not isinstance(buffering, int):
|
|
|
|
raise TypeError("invalid buffering: %r" % buffering)
|
2007-10-16 15:12:55 -03:00
|
|
|
if encoding is not None and not isinstance(encoding, str):
|
2007-08-27 14:39:33 -03:00
|
|
|
raise TypeError("invalid encoding: %r" % encoding)
|
2007-12-03 18:54:21 -04:00
|
|
|
if errors is not None and not isinstance(errors, str):
|
|
|
|
raise TypeError("invalid errors: %r" % errors)
|
2007-02-27 01:47:44 -04:00
|
|
|
modes = set(mode)
|
2007-04-06 23:59:27 -03:00
|
|
|
if modes - set("arwb+tU") or len(mode) > len(modes):
|
2007-02-27 01:47:44 -04:00
|
|
|
raise ValueError("invalid mode: %r" % mode)
|
|
|
|
reading = "r" in modes
|
2007-02-27 11:45:13 -04:00
|
|
|
writing = "w" in modes
|
2007-02-27 01:47:44 -04:00
|
|
|
appending = "a" in modes
|
|
|
|
updating = "+" in modes
|
2007-02-27 11:45:13 -04:00
|
|
|
text = "t" in modes
|
|
|
|
binary = "b" in modes
|
2007-07-10 03:54:34 -03:00
|
|
|
if "U" in modes:
|
|
|
|
if writing or appending:
|
|
|
|
raise ValueError("can't use U and writing mode at once")
|
2007-04-06 23:59:27 -03:00
|
|
|
reading = True
|
2007-02-27 01:47:44 -04:00
|
|
|
if text and binary:
|
|
|
|
raise ValueError("can't have text and binary mode at once")
|
|
|
|
if reading + writing + appending > 1:
|
|
|
|
raise ValueError("can't have read/write/append mode at once")
|
|
|
|
if not (reading or writing or appending):
|
|
|
|
raise ValueError("must have exactly one of read/write/append mode")
|
|
|
|
if binary and encoding is not None:
|
2007-04-10 22:09:03 -03:00
|
|
|
raise ValueError("binary mode doesn't take an encoding argument")
|
2007-12-03 18:54:21 -04:00
|
|
|
if binary and errors is not None:
|
|
|
|
raise ValueError("binary mode doesn't take an errors argument")
|
2007-04-10 22:09:03 -03:00
|
|
|
if binary and newline is not None:
|
|
|
|
raise ValueError("binary mode doesn't take a newline argument")
|
2007-04-08 20:59:06 -03:00
|
|
|
raw = FileIO(file,
|
2007-02-27 01:47:44 -04:00
|
|
|
(reading and "r" or "") +
|
|
|
|
(writing and "w" or "") +
|
|
|
|
(appending and "a" or "") +
|
2007-10-30 14:27:30 -03:00
|
|
|
(updating and "+" or ""),
|
|
|
|
closefd)
|
2007-02-27 01:47:44 -04:00
|
|
|
if buffering is None:
|
2007-05-23 21:50:02 -03:00
|
|
|
buffering = -1
|
2007-12-05 21:04:26 -04:00
|
|
|
line_buffering = False
|
|
|
|
if buffering == 1 or buffering < 0 and raw.isatty():
|
|
|
|
buffering = -1
|
|
|
|
line_buffering = True
|
2007-05-23 21:50:02 -03:00
|
|
|
if buffering < 0:
|
2007-02-27 13:19:33 -04:00
|
|
|
buffering = DEFAULT_BUFFER_SIZE
|
2007-02-27 11:45:13 -04:00
|
|
|
try:
|
|
|
|
bs = os.fstat(raw.fileno()).st_blksize
|
|
|
|
except (os.error, AttributeError):
|
2007-03-18 00:36:28 -03:00
|
|
|
pass
|
|
|
|
else:
|
2007-02-27 11:45:13 -04:00
|
|
|
if bs > 1:
|
|
|
|
buffering = bs
|
2007-02-27 01:47:44 -04:00
|
|
|
if buffering < 0:
|
|
|
|
raise ValueError("invalid buffering size")
|
|
|
|
if buffering == 0:
|
|
|
|
if binary:
|
2007-04-13 15:42:35 -03:00
|
|
|
raw._name = file
|
|
|
|
raw._mode = mode
|
2007-02-27 01:47:44 -04:00
|
|
|
return raw
|
|
|
|
raise ValueError("can't have unbuffered text I/O")
|
|
|
|
if updating:
|
|
|
|
buffer = BufferedRandom(raw, buffering)
|
2007-02-27 11:45:13 -04:00
|
|
|
elif writing or appending:
|
2007-02-27 01:47:44 -04:00
|
|
|
buffer = BufferedWriter(raw, buffering)
|
2007-08-27 14:39:33 -03:00
|
|
|
elif reading:
|
2007-02-27 01:47:44 -04:00
|
|
|
buffer = BufferedReader(raw, buffering)
|
2007-08-27 14:39:33 -03:00
|
|
|
else:
|
|
|
|
raise ValueError("unknown mode: %r" % mode)
|
2007-02-27 01:47:44 -04:00
|
|
|
if binary:
|
2007-04-13 15:42:35 -03:00
|
|
|
buffer.name = file
|
|
|
|
buffer.mode = mode
|
2007-02-27 01:47:44 -04:00
|
|
|
return buffer
|
2007-12-05 21:04:26 -04:00
|
|
|
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
|
2007-04-13 15:42:35 -03:00
|
|
|
text.name = file
|
|
|
|
text.mode = mode
|
|
|
|
return text
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-12-08 13:47:40 -04:00
|
|
|
class _DocDescriptor:
|
|
|
|
"""Helper for builtins.open.__doc__
|
|
|
|
"""
|
|
|
|
def __get__(self, obj, typ):
|
|
|
|
return (
|
|
|
|
"open(file, mode='r', buffering=None, encoding=None, "
|
|
|
|
"errors=None, newline=None, closefd=True)\n\n" +
|
|
|
|
open.__doc__)
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-10-19 20:16:50 -03:00
|
|
|
class OpenWrapper:
|
2007-12-02 05:40:06 -04:00
|
|
|
"""Wrapper for builtins.open
|
2007-10-19 20:16:50 -03:00
|
|
|
|
|
|
|
Trick so that open won't become a bound method when stored
|
|
|
|
as a class variable (as dumbdbm does).
|
|
|
|
|
|
|
|
See initstdio() in Python/pythonrun.c.
|
|
|
|
"""
|
2007-12-08 13:47:40 -04:00
|
|
|
__doc__ = _DocDescriptor()
|
|
|
|
|
2007-10-19 20:16:50 -03:00
|
|
|
def __new__(cls, *args, **kwargs):
|
|
|
|
return open(*args, **kwargs)
|
|
|
|
|
|
|
|
|
2007-07-10 06:12:49 -03:00
|
|
|
class UnsupportedOperation(ValueError, IOError):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2007-08-22 15:14:10 -03:00
|
|
|
class IOBase(metaclass=abc.ABCMeta):
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
"""Base class for all I/O classes.
|
2007-02-27 11:45:13 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
This class provides dummy implementations for many methods that
|
2007-02-27 11:45:13 -04:00
|
|
|
derived classes can override selectively; the default
|
|
|
|
implementations represent a file that cannot be read, written or
|
|
|
|
seeked.
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
This does not define read(), readinto() and write(), nor
|
|
|
|
readline() and friends, since their signatures vary per layer.
|
2007-04-10 16:01:47 -03:00
|
|
|
|
2008-04-06 13:47:13 -03:00
|
|
|
Note that calling any method (even inquiries) on a closed file is
|
|
|
|
undefined. Implementations may raise IOError in this case.
|
2007-02-27 11:45:13 -04:00
|
|
|
"""
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
### Internal ###
|
|
|
|
|
|
|
|
def _unsupported(self, name: str) -> IOError:
|
|
|
|
"""Internal: raise an exception for unsupported operations."""
|
2007-07-10 06:12:49 -03:00
|
|
|
raise UnsupportedOperation("%s.%s() not supported" %
|
|
|
|
(self.__class__.__name__, name))
|
2007-04-08 20:59:06 -03:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
### Positioning ###
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-04-10 16:01:47 -03:00
|
|
|
def seek(self, pos: int, whence: int = 0) -> int:
|
|
|
|
"""seek(pos: int, whence: int = 0) -> int. Change stream position.
|
2007-04-06 14:31:18 -03:00
|
|
|
|
|
|
|
Seek to byte offset pos relative to position indicated by whence:
|
|
|
|
0 Start of stream (the default). pos should be >= 0;
|
2008-04-06 17:27:02 -03:00
|
|
|
1 Current position - pos may be negative;
|
|
|
|
2 End of stream - pos usually negative.
|
2007-04-10 16:01:47 -03:00
|
|
|
Returns the new absolute position.
|
2007-04-06 14:31:18 -03:00
|
|
|
"""
|
2007-04-08 20:59:06 -03:00
|
|
|
self._unsupported("seek")
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def tell(self) -> int:
|
2007-04-06 14:31:18 -03:00
|
|
|
"""tell() -> int. Return current stream position."""
|
2007-04-10 16:01:47 -03:00
|
|
|
return self.seek(0, 1)
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-10 18:06:59 -03:00
|
|
|
def truncate(self, pos: int = None) -> int:
|
2008-04-09 04:33:01 -03:00
|
|
|
"""truncate(pos: int = None) -> int. Truncate file to pos bytes.
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2008-04-09 04:33:01 -03:00
|
|
|
Pos defaults to the current IO position as reported by tell().
|
2007-04-10 18:06:59 -03:00
|
|
|
Returns the new size.
|
2007-04-06 14:31:18 -03:00
|
|
|
"""
|
2007-04-08 20:59:06 -03:00
|
|
|
self._unsupported("truncate")
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
### Flush and close ###
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def flush(self) -> None:
|
|
|
|
"""flush() -> None. Flushes write buffers, if applicable.
|
2007-04-08 20:59:06 -03:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
This is a no-op for read-only and non-blocking streams.
|
|
|
|
"""
|
2007-04-12 02:44:49 -03:00
|
|
|
# XXX Should this return the number of bytes written???
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
__closed = False
|
|
|
|
|
|
|
|
def close(self) -> None:
|
|
|
|
"""close() -> None. Flushes and closes the IO object.
|
|
|
|
|
|
|
|
This must be idempotent. It should also set a flag for the
|
|
|
|
'closed' property (see below) to test.
|
|
|
|
"""
|
|
|
|
if not self.__closed:
|
2007-07-10 09:00:45 -03:00
|
|
|
try:
|
|
|
|
self.flush()
|
2007-07-22 17:38:07 -03:00
|
|
|
except IOError:
|
|
|
|
pass # If flush() fails, just give up
|
|
|
|
self.__closed = True
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
def __del__(self) -> None:
|
|
|
|
"""Destructor. Calls close()."""
|
|
|
|
# The try/except block is in case this is called at program
|
|
|
|
# exit time, when it's possible that globals have already been
|
|
|
|
# deleted, and then the close() call might fail. Since
|
|
|
|
# there's nothing we can do about such failures and they annoy
|
|
|
|
# the end users, we suppress the traceback.
|
|
|
|
try:
|
|
|
|
self.close()
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
### Inquiries ###
|
|
|
|
|
|
|
|
def seekable(self) -> bool:
|
2007-04-06 14:31:18 -03:00
|
|
|
"""seekable() -> bool. Return whether object supports random access.
|
|
|
|
|
|
|
|
If False, seek(), tell() and truncate() will raise IOError.
|
|
|
|
This method may need to do a test seek().
|
|
|
|
"""
|
2007-02-27 01:47:44 -04:00
|
|
|
return False
|
|
|
|
|
2007-08-27 14:39:33 -03:00
|
|
|
def _checkSeekable(self, msg=None):
|
|
|
|
"""Internal: raise an IOError if file is not seekable
|
|
|
|
"""
|
|
|
|
if not self.seekable():
|
|
|
|
raise IOError("File or stream is not seekable."
|
|
|
|
if msg is None else msg)
|
|
|
|
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def readable(self) -> bool:
|
2007-04-06 14:31:18 -03:00
|
|
|
"""readable() -> bool. Return whether object was opened for reading.
|
|
|
|
|
|
|
|
If False, read() will raise IOError.
|
|
|
|
"""
|
2007-02-27 01:47:44 -04:00
|
|
|
return False
|
|
|
|
|
2007-08-27 14:39:33 -03:00
|
|
|
def _checkReadable(self, msg=None):
|
|
|
|
"""Internal: raise an IOError if file is not readable
|
|
|
|
"""
|
|
|
|
if not self.readable():
|
|
|
|
raise IOError("File or stream is not readable."
|
|
|
|
if msg is None else msg)
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def writable(self) -> bool:
|
2007-04-06 14:31:18 -03:00
|
|
|
"""writable() -> bool. Return whether object was opened for writing.
|
|
|
|
|
|
|
|
If False, write() and truncate() will raise IOError.
|
|
|
|
"""
|
2007-02-27 01:47:44 -04:00
|
|
|
return False
|
|
|
|
|
2007-08-27 14:39:33 -03:00
|
|
|
def _checkWritable(self, msg=None):
|
|
|
|
"""Internal: raise an IOError if file is not writable
|
|
|
|
"""
|
|
|
|
if not self.writable():
|
|
|
|
raise IOError("File or stream is not writable."
|
|
|
|
if msg is None else msg)
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
@property
|
|
|
|
def closed(self):
|
|
|
|
"""closed: bool. True iff the file has been closed.
|
|
|
|
|
|
|
|
For backwards compatibility, this is a property, not a predicate.
|
|
|
|
"""
|
|
|
|
return self.__closed
|
|
|
|
|
2007-08-27 14:39:33 -03:00
|
|
|
def _checkClosed(self, msg=None):
|
|
|
|
"""Internal: raise an ValueError if file is closed
|
|
|
|
"""
|
|
|
|
if self.closed:
|
|
|
|
raise ValueError("I/O operation on closed file."
|
|
|
|
if msg is None else msg)
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
### Context manager ###
|
|
|
|
|
|
|
|
def __enter__(self) -> "IOBase": # That's a forward reference
|
2007-04-06 14:31:18 -03:00
|
|
|
"""Context management protocol. Returns self."""
|
Merged revisions 60481,60485,60489-60492,60494-60496,60498-60499,60501-60503,60505-60506,60508-60509,60523-60524,60532,60543,60545,60547-60548,60552,60554,60556-60559,60561-60562,60569,60571-60572,60574,60576-60583,60585-60586,60589,60591,60594-60595,60597-60598,60600-60601,60606-60612,60615,60617,60619-60621,60623-60625,60627-60629,60631,60633,60635,60647,60650,60652,60654,60656,60658-60659,60664-60666,60668-60670,60672,60676,60678-60695 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r60679 | raymond.hettinger | 2008-02-09 02:18:42 +0100 (Sat, 09 Feb 2008) | 1 line
Make ABC containers inherit as documented.
........
r60684 | raymond.hettinger | 2008-02-09 04:34:52 +0100 (Sat, 09 Feb 2008) | 1 line
Merge with r60683.
........
r60687 | raymond.hettinger | 2008-02-09 05:37:49 +0100 (Sat, 09 Feb 2008) | 1 line
Add -3 warnings that set.copy(), dict.copy(), and defaultdict.copy() will go away in Py3.x
........
r60689 | raymond.hettinger | 2008-02-09 11:04:19 +0100 (Sat, 09 Feb 2008) | 1 line
Metaclass declaration is inherited
........
r60691 | raymond.hettinger | 2008-02-09 11:06:20 +0100 (Sat, 09 Feb 2008) | 1 line
Temporarily disable this test. It's been broken for a week.
........
r60695 | nick.coghlan | 2008-02-09 16:28:09 +0100 (Sat, 09 Feb 2008) | 1 line
Issue 2021: Allow NamedTemporaryFile and SpooledTemporaryFile to be used as context managers. (The NamedTemporaryFile fix should be considered for backporting to 2.5)
........
2008-02-09 16:51:34 -04:00
|
|
|
self._checkClosed()
|
2007-02-27 01:47:44 -04:00
|
|
|
return self
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def __exit__(self, *args) -> None:
|
|
|
|
"""Context management protocol. Calls close()"""
|
2007-02-27 01:47:44 -04:00
|
|
|
self.close()
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
### Lower-level APIs ###
|
|
|
|
|
|
|
|
# XXX Should these be present even if unimplemented?
|
|
|
|
|
|
|
|
def fileno(self) -> int:
|
|
|
|
"""fileno() -> int. Returns underlying file descriptor if one exists.
|
2007-04-06 14:31:18 -03:00
|
|
|
|
|
|
|
Raises IOError if the IO object does not use a file descriptor.
|
|
|
|
"""
|
2007-04-08 20:59:06 -03:00
|
|
|
self._unsupported("fileno")
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def isatty(self) -> bool:
|
|
|
|
"""isatty() -> int. Returns whether this is an 'interactive' stream.
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
Returns False if we don't know.
|
|
|
|
"""
|
2007-08-27 14:39:33 -03:00
|
|
|
self._checkClosed()
|
2007-04-09 21:22:16 -03:00
|
|
|
return False
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-07-10 03:54:34 -03:00
|
|
|
### Readline[s] and writelines ###
|
2007-05-21 20:13:11 -03:00
|
|
|
|
2007-06-07 20:45:37 -03:00
|
|
|
def readline(self, limit: int = -1) -> bytes:
|
|
|
|
"""For backwards compatibility, a (slowish) readline()."""
|
2007-06-07 21:07:57 -03:00
|
|
|
if hasattr(self, "peek"):
|
|
|
|
def nreadahead():
|
2008-03-17 14:34:48 -03:00
|
|
|
readahead = self.peek(1)
|
2007-06-07 21:07:57 -03:00
|
|
|
if not readahead:
|
|
|
|
return 1
|
|
|
|
n = (readahead.find(b"\n") + 1) or len(readahead)
|
|
|
|
if limit >= 0:
|
|
|
|
n = min(n, limit)
|
|
|
|
return n
|
|
|
|
else:
|
|
|
|
def nreadahead():
|
|
|
|
return 1
|
2007-06-07 20:45:37 -03:00
|
|
|
if limit is None:
|
|
|
|
limit = -1
|
2007-11-21 15:29:53 -04:00
|
|
|
res = bytearray()
|
2007-06-07 20:45:37 -03:00
|
|
|
while limit < 0 or len(res) < limit:
|
2007-06-07 21:07:57 -03:00
|
|
|
b = self.read(nreadahead())
|
2007-05-21 20:13:11 -03:00
|
|
|
if not b:
|
|
|
|
break
|
|
|
|
res += b
|
2007-06-07 20:45:37 -03:00
|
|
|
if res.endswith(b"\n"):
|
2007-05-21 20:13:11 -03:00
|
|
|
break
|
2007-11-06 17:34:58 -04:00
|
|
|
return bytes(res)
|
2007-05-21 20:13:11 -03:00
|
|
|
|
2007-07-10 03:54:34 -03:00
|
|
|
def __iter__(self):
|
2007-08-27 14:39:33 -03:00
|
|
|
self._checkClosed()
|
2007-07-10 03:54:34 -03:00
|
|
|
return self
|
|
|
|
|
|
|
|
def __next__(self):
|
|
|
|
line = self.readline()
|
|
|
|
if not line:
|
|
|
|
raise StopIteration
|
|
|
|
return line
|
|
|
|
|
|
|
|
def readlines(self, hint=None):
|
|
|
|
if hint is None:
|
|
|
|
return list(self)
|
|
|
|
n = 0
|
|
|
|
lines = []
|
|
|
|
for line in self:
|
|
|
|
lines.append(line)
|
|
|
|
n += len(line)
|
|
|
|
if n >= hint:
|
|
|
|
break
|
|
|
|
return lines
|
|
|
|
|
|
|
|
def writelines(self, lines):
|
2007-08-27 14:39:33 -03:00
|
|
|
self._checkClosed()
|
2007-07-10 03:54:34 -03:00
|
|
|
for line in lines:
|
|
|
|
self.write(line)
|
|
|
|
|
2007-02-27 11:45:13 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
class RawIOBase(IOBase):
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
"""Base class for raw binary I/O.
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
The read() method is implemented by calling readinto(); derived
|
|
|
|
classes that want to support read() only need to implement
|
|
|
|
readinto() as a primitive operation. In general, readinto()
|
|
|
|
can be more efficient than read().
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
(It would be tempting to also provide an implementation of
|
|
|
|
readinto() in terms of read(), in case the latter is a more
|
|
|
|
suitable primitive operation, but that would lead to nasty
|
|
|
|
recursion in case a subclass doesn't implement either.)
|
|
|
|
"""
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-07-10 03:54:34 -03:00
|
|
|
def read(self, n: int = -1) -> bytes:
|
2007-04-09 21:22:16 -03:00
|
|
|
"""read(n: int) -> bytes. Read and return up to n bytes.
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2008-04-09 04:33:01 -03:00
|
|
|
Returns an empty bytes object on EOF, or None if the object is
|
2007-04-09 21:22:16 -03:00
|
|
|
set not to block and has no data to read.
|
|
|
|
"""
|
2007-07-10 03:54:34 -03:00
|
|
|
if n is None:
|
|
|
|
n = -1
|
|
|
|
if n < 0:
|
|
|
|
return self.readall()
|
2007-11-21 15:29:53 -04:00
|
|
|
b = bytearray(n.__index__())
|
2007-04-09 21:22:16 -03:00
|
|
|
n = self.readinto(b)
|
|
|
|
del b[n:]
|
2007-11-06 17:34:58 -04:00
|
|
|
return bytes(b)
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-07-10 03:54:34 -03:00
|
|
|
def readall(self):
|
2008-04-09 04:33:01 -03:00
|
|
|
"""readall() -> bytes. Read until EOF, using multiple read() calls."""
|
2007-11-21 15:29:53 -04:00
|
|
|
res = bytearray()
|
2007-07-10 03:54:34 -03:00
|
|
|
while True:
|
|
|
|
data = self.read(DEFAULT_BUFFER_SIZE)
|
|
|
|
if not data:
|
|
|
|
break
|
|
|
|
res += data
|
2007-11-06 17:34:58 -04:00
|
|
|
return bytes(res)
|
2007-07-10 03:54:34 -03:00
|
|
|
|
2008-04-07 19:27:34 -03:00
|
|
|
def readinto(self, b: bytearray) -> int:
|
|
|
|
"""readinto(b: bytearray) -> int. Read up to len(b) bytes into b.
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
Returns number of bytes read (0 for EOF), or None if the object
|
|
|
|
is set not to block as has no data to read.
|
|
|
|
"""
|
|
|
|
self._unsupported("readinto")
|
2007-04-08 20:59:06 -03:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def write(self, b: bytes) -> int:
|
|
|
|
"""write(b: bytes) -> int. Write the given buffer to the IO stream.
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
Returns the number of bytes written, which may be less than len(b).
|
|
|
|
"""
|
|
|
|
self._unsupported("write")
|
2007-02-27 01:47:44 -04:00
|
|
|
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
class FileIO(_fileio._FileIO, RawIOBase):
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
"""Raw I/O implementation for OS files.
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
This multiply inherits from _FileIO and RawIOBase to make
|
|
|
|
isinstance(io.FileIO(), io.RawIOBase) return True without
|
|
|
|
requiring that _fileio._FileIO inherits from io.RawIOBase (which
|
|
|
|
would be hard to do since _fileio.c is written in C).
|
|
|
|
"""
|
2007-03-07 20:43:48 -04:00
|
|
|
|
2007-04-10 18:06:59 -03:00
|
|
|
def close(self):
|
|
|
|
_fileio._FileIO.close(self)
|
|
|
|
RawIOBase.close(self)
|
|
|
|
|
2007-04-13 15:42:35 -03:00
|
|
|
@property
|
|
|
|
def name(self):
|
|
|
|
return self._name
|
|
|
|
|
2008-04-09 04:33:01 -03:00
|
|
|
# XXX(gb): _FileIO already has a mode property
|
2007-04-13 15:42:35 -03:00
|
|
|
@property
|
|
|
|
def mode(self):
|
|
|
|
return self._mode
|
|
|
|
|
2007-03-07 20:43:48 -04:00
|
|
|
|
2007-04-10 11:41:39 -03:00
|
|
|
class BufferedIOBase(IOBase):
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
"""Base class for buffered IO objects.
|
|
|
|
|
|
|
|
The main difference with RawIOBase is that the read() method
|
|
|
|
supports omitting the size argument, and does not have a default
|
|
|
|
implementation that defers to readinto().
|
|
|
|
|
|
|
|
In addition, read(), readinto() and write() may raise
|
|
|
|
BlockingIOError if the underlying raw stream is in non-blocking
|
|
|
|
mode and not ready; unlike their raw counterparts, they will never
|
|
|
|
return None.
|
|
|
|
|
|
|
|
A typical implementation should not inherit from a RawIOBase
|
|
|
|
implementation, but wrap one.
|
|
|
|
"""
|
|
|
|
|
2007-05-23 21:50:02 -03:00
|
|
|
def read(self, n: int = None) -> bytes:
|
|
|
|
"""read(n: int = None) -> bytes. Read and return up to n bytes.
|
2007-04-09 21:22:16 -03:00
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
If the argument is omitted, None, or negative, reads and
|
|
|
|
returns all data until EOF.
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
If the argument is positive, and the underlying raw stream is
|
|
|
|
not 'interactive', multiple raw reads may be issued to satisfy
|
|
|
|
the byte count (unless EOF is reached first). But for
|
|
|
|
interactive raw streams (XXX and for pipes?), at most one raw
|
|
|
|
read will be issued, and a short result does not imply that
|
|
|
|
EOF is imminent.
|
|
|
|
|
|
|
|
Returns an empty bytes array on EOF.
|
|
|
|
|
|
|
|
Raises BlockingIOError if the underlying raw stream has no
|
|
|
|
data at the moment.
|
|
|
|
"""
|
|
|
|
self._unsupported("read")
|
|
|
|
|
2008-04-07 19:27:34 -03:00
|
|
|
def readinto(self, b: bytearray) -> int:
|
|
|
|
"""readinto(b: bytearray) -> int. Read up to len(b) bytes into b.
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
Like read(), this may issue multiple reads to the underlying
|
|
|
|
raw stream, unless the latter is 'interactive' (XXX or a
|
|
|
|
pipe?).
|
|
|
|
|
|
|
|
Returns the number of bytes read (0 for EOF).
|
|
|
|
|
|
|
|
Raises BlockingIOError if the underlying raw stream has no
|
|
|
|
data at the moment.
|
|
|
|
"""
|
2007-04-12 02:44:49 -03:00
|
|
|
# XXX This ought to work with anything that supports the buffer API
|
2007-04-10 18:06:59 -03:00
|
|
|
data = self.read(len(b))
|
|
|
|
n = len(data)
|
2007-07-10 03:54:34 -03:00
|
|
|
try:
|
|
|
|
b[:n] = data
|
|
|
|
except TypeError as err:
|
|
|
|
import array
|
|
|
|
if not isinstance(b, array.array):
|
|
|
|
raise err
|
|
|
|
b[:n] = array.array('b', data)
|
2007-04-10 18:06:59 -03:00
|
|
|
return n
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
def write(self, b: bytes) -> int:
|
|
|
|
"""write(b: bytes) -> int. Write the given buffer to the IO stream.
|
|
|
|
|
|
|
|
Returns the number of bytes written, which is never less than
|
|
|
|
len(b).
|
|
|
|
|
|
|
|
Raises BlockingIOError if the buffer is full and the
|
|
|
|
underlying raw stream cannot accept more data at the moment.
|
|
|
|
"""
|
|
|
|
self._unsupported("write")
|
|
|
|
|
|
|
|
|
|
|
|
class _BufferedIOMixin(BufferedIOBase):
|
|
|
|
|
|
|
|
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
|
|
|
|
|
|
|
|
This passes most requests on to the underlying raw stream. It
|
|
|
|
does *not* provide implementations of read(), readinto() or
|
|
|
|
write().
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, raw):
|
|
|
|
self.raw = raw
|
|
|
|
|
|
|
|
### Positioning ###
|
|
|
|
|
|
|
|
def seek(self, pos, whence=0):
|
2007-04-10 16:01:47 -03:00
|
|
|
return self.raw.seek(pos, whence)
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
def tell(self):
|
|
|
|
return self.raw.tell()
|
|
|
|
|
|
|
|
def truncate(self, pos=None):
|
2007-10-25 20:21:03 -03:00
|
|
|
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
|
|
|
|
# and a flush may be necessary to synch both views of the current
|
|
|
|
# file state.
|
|
|
|
self.flush()
|
2007-10-26 14:19:33 -03:00
|
|
|
|
|
|
|
if pos is None:
|
|
|
|
pos = self.tell()
|
|
|
|
return self.raw.truncate(pos)
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
### Flush and close ###
|
|
|
|
|
|
|
|
def flush(self):
|
|
|
|
self.raw.flush()
|
|
|
|
|
|
|
|
def close(self):
|
2007-07-10 06:12:49 -03:00
|
|
|
if not self.closed:
|
2007-07-22 17:38:07 -03:00
|
|
|
try:
|
|
|
|
self.flush()
|
|
|
|
except IOError:
|
|
|
|
pass # If flush() fails, just give up
|
2007-07-10 06:12:49 -03:00
|
|
|
self.raw.close()
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
### Inquiries ###
|
|
|
|
|
|
|
|
def seekable(self):
|
|
|
|
return self.raw.seekable()
|
|
|
|
|
|
|
|
def readable(self):
|
|
|
|
return self.raw.readable()
|
|
|
|
|
|
|
|
def writable(self):
|
|
|
|
return self.raw.writable()
|
|
|
|
|
|
|
|
@property
|
|
|
|
def closed(self):
|
|
|
|
return self.raw.closed
|
|
|
|
|
|
|
|
### Lower-level APIs ###
|
|
|
|
|
|
|
|
def fileno(self):
|
|
|
|
return self.raw.fileno()
|
|
|
|
|
|
|
|
def isatty(self):
|
|
|
|
return self.raw.isatty()
|
|
|
|
|
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
class BytesIO(BufferedIOBase):
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
"""Buffered I/O implementation using an in-memory bytes buffer."""
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
# XXX More docs
|
|
|
|
|
|
|
|
def __init__(self, initial_bytes=None):
|
2007-11-21 15:29:53 -04:00
|
|
|
buf = bytearray()
|
2007-05-17 20:59:11 -03:00
|
|
|
if initial_bytes is not None:
|
2007-11-06 17:34:58 -04:00
|
|
|
buf += initial_bytes
|
|
|
|
self._buffer = buf
|
2007-02-27 01:47:44 -04:00
|
|
|
self._pos = 0
|
|
|
|
|
|
|
|
def getvalue(self):
|
2007-11-06 17:34:58 -04:00
|
|
|
return bytes(self._buffer)
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
def read(self, n=None):
|
|
|
|
if n is None:
|
|
|
|
n = -1
|
2007-04-09 21:22:16 -03:00
|
|
|
if n < 0:
|
2007-02-27 13:19:33 -04:00
|
|
|
n = len(self._buffer)
|
2007-02-27 01:47:44 -04:00
|
|
|
newpos = min(len(self._buffer), self._pos + n)
|
|
|
|
b = self._buffer[self._pos : newpos]
|
|
|
|
self._pos = newpos
|
2007-11-06 17:34:58 -04:00
|
|
|
return bytes(b)
|
2007-02-27 01:47:44 -04:00
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
def read1(self, n):
|
|
|
|
return self.read(n)
|
|
|
|
|
2007-02-27 01:47:44 -04:00
|
|
|
def write(self, b):
|
2007-07-10 06:12:49 -03:00
|
|
|
if self.closed:
|
|
|
|
raise ValueError("write to closed file")
|
2007-08-29 01:05:57 -03:00
|
|
|
if isinstance(b, str):
|
|
|
|
raise TypeError("can't write str to binary stream")
|
2007-02-27 01:47:44 -04:00
|
|
|
n = len(b)
|
|
|
|
newpos = self._pos + n
|
2007-07-20 21:25:15 -03:00
|
|
|
if newpos > len(self._buffer):
|
|
|
|
# Inserts null bytes between the current end of the file
|
|
|
|
# and the new write position.
|
2007-08-29 01:05:57 -03:00
|
|
|
padding = b'\x00' * (newpos - len(self._buffer) - n)
|
2007-07-20 21:25:15 -03:00
|
|
|
self._buffer[self._pos:newpos - n] = padding
|
2007-02-27 01:47:44 -04:00
|
|
|
self._buffer[self._pos:newpos] = b
|
|
|
|
self._pos = newpos
|
|
|
|
return n
|
|
|
|
|
|
|
|
def seek(self, pos, whence=0):
|
2007-11-08 21:27:29 -04:00
|
|
|
try:
|
|
|
|
pos = pos.__index__()
|
|
|
|
except AttributeError as err:
|
|
|
|
raise TypeError("an integer is required") from err
|
2007-02-27 01:47:44 -04:00
|
|
|
if whence == 0:
|
|
|
|
self._pos = max(0, pos)
|
|
|
|
elif whence == 1:
|
|
|
|
self._pos = max(0, self._pos + pos)
|
|
|
|
elif whence == 2:
|
|
|
|
self._pos = max(0, len(self._buffer) + pos)
|
|
|
|
else:
|
|
|
|
raise IOError("invalid whence value")
|
2007-04-10 16:01:47 -03:00
|
|
|
return self._pos
|
2007-02-27 01:47:44 -04:00
|
|
|
|
|
|
|
def tell(self):
|
|
|
|
return self._pos
|
|
|
|
|
|
|
|
def truncate(self, pos=None):
|
|
|
|
if pos is None:
|
|
|
|
pos = self._pos
|
|
|
|
del self._buffer[pos:]
|
2007-04-10 18:06:59 -03:00
|
|
|
return pos
|
2007-02-27 01:47:44 -04:00
|
|
|
|
|
|
|
def readable(self):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def writable(self):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def seekable(self):
|
|
|
|
return True
|
2007-02-27 13:19:33 -04:00
|
|
|
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
class BufferedReader(_BufferedIOMixin):
|
2007-04-08 20:59:06 -03:00
|
|
|
|
|
|
|
"""Buffer for a readable sequential RawIO object."""
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
|
2007-03-06 21:00:12 -04:00
|
|
|
"""Create a new buffered reader using the given readable raw IO object.
|
2007-02-27 13:19:33 -04:00
|
|
|
"""
|
2007-08-27 14:39:33 -03:00
|
|
|
raw._checkReadable()
|
2007-04-09 21:22:16 -03:00
|
|
|
_BufferedIOMixin.__init__(self, raw)
|
2007-03-06 21:00:12 -04:00
|
|
|
self._read_buf = b""
|
2007-04-06 14:31:18 -03:00
|
|
|
self.buffer_size = buffer_size
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
def read(self, n=None):
|
2007-03-06 21:00:12 -04:00
|
|
|
"""Read n bytes.
|
|
|
|
|
|
|
|
Returns exactly n bytes of data unless the underlying raw IO
|
2007-05-29 16:13:29 -03:00
|
|
|
stream reaches EOF or if the call would block in non-blocking
|
2007-04-09 21:22:16 -03:00
|
|
|
mode. If n is negative, read until EOF or until read() would
|
2007-03-06 21:00:12 -04:00
|
|
|
block.
|
2007-02-27 13:19:33 -04:00
|
|
|
"""
|
2007-05-17 20:59:11 -03:00
|
|
|
if n is None:
|
|
|
|
n = -1
|
2007-04-06 14:31:18 -03:00
|
|
|
nodata_val = b""
|
2007-04-09 21:22:16 -03:00
|
|
|
while n < 0 or len(self._read_buf) < n:
|
2007-04-06 16:10:29 -03:00
|
|
|
to_read = max(self.buffer_size,
|
|
|
|
n if n is not None else 2*len(self._read_buf))
|
2007-04-06 14:31:18 -03:00
|
|
|
current = self.raw.read(to_read)
|
|
|
|
if current in (b"", None):
|
2007-02-27 13:19:33 -04:00
|
|
|
nodata_val = current
|
|
|
|
break
|
2007-03-06 21:00:12 -04:00
|
|
|
self._read_buf += current
|
|
|
|
if self._read_buf:
|
2007-04-09 21:22:16 -03:00
|
|
|
if n < 0:
|
2007-03-06 21:00:12 -04:00
|
|
|
n = len(self._read_buf)
|
|
|
|
out = self._read_buf[:n]
|
|
|
|
self._read_buf = self._read_buf[n:]
|
|
|
|
else:
|
|
|
|
out = nodata_val
|
|
|
|
return out
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2008-03-17 14:34:48 -03:00
|
|
|
def peek(self, n=0):
|
2007-04-13 15:42:35 -03:00
|
|
|
"""Returns buffered bytes without advancing the position.
|
|
|
|
|
|
|
|
The argument indicates a desired minimal number of bytes; we
|
|
|
|
do at most one raw read to satisfy it. We never return more
|
|
|
|
than self.buffer_size.
|
|
|
|
"""
|
|
|
|
want = min(n, self.buffer_size)
|
|
|
|
have = len(self._read_buf)
|
|
|
|
if have < want:
|
|
|
|
to_read = self.buffer_size - have
|
|
|
|
current = self.raw.read(to_read)
|
|
|
|
if current:
|
|
|
|
self._read_buf += current
|
2008-03-17 14:34:48 -03:00
|
|
|
return self._read_buf
|
2007-04-13 15:42:35 -03:00
|
|
|
|
|
|
|
def read1(self, n):
|
2008-03-18 01:51:32 -03:00
|
|
|
"""Reads up to n bytes, with at most one read() system call.
|
2007-04-13 15:42:35 -03:00
|
|
|
|
2008-03-18 01:51:32 -03:00
|
|
|
Returns up to n bytes. If at least one byte is buffered, we
|
|
|
|
only return buffered bytes. Otherwise, we do one raw read.
|
2007-04-13 15:42:35 -03:00
|
|
|
"""
|
|
|
|
if n <= 0:
|
|
|
|
return b""
|
2008-03-17 14:34:48 -03:00
|
|
|
self.peek(1)
|
2007-04-13 15:42:35 -03:00
|
|
|
return self.read(min(n, len(self._read_buf)))
|
|
|
|
|
2007-04-06 16:10:29 -03:00
|
|
|
def tell(self):
|
|
|
|
return self.raw.tell() - len(self._read_buf)
|
|
|
|
|
|
|
|
def seek(self, pos, whence=0):
|
|
|
|
if whence == 1:
|
|
|
|
pos -= len(self._read_buf)
|
2007-04-10 16:01:47 -03:00
|
|
|
pos = self.raw.seek(pos, whence)
|
2007-04-06 16:10:29 -03:00
|
|
|
self._read_buf = b""
|
2007-04-10 16:01:47 -03:00
|
|
|
return pos
|
2007-04-06 16:10:29 -03:00
|
|
|
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
class BufferedWriter(_BufferedIOMixin):
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
# XXX docstring
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def __init__(self, raw,
|
|
|
|
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
|
2007-08-27 14:39:33 -03:00
|
|
|
raw._checkWritable()
|
2007-04-09 21:22:16 -03:00
|
|
|
_BufferedIOMixin.__init__(self, raw)
|
2007-02-27 13:19:33 -04:00
|
|
|
self.buffer_size = buffer_size
|
2007-04-09 21:22:16 -03:00
|
|
|
self.max_buffer_size = (2*buffer_size
|
|
|
|
if max_buffer_size is None
|
|
|
|
else max_buffer_size)
|
2007-11-21 15:29:53 -04:00
|
|
|
self._write_buf = bytearray()
|
2007-02-27 13:19:33 -04:00
|
|
|
|
|
|
|
def write(self, b):
|
2007-07-10 06:12:49 -03:00
|
|
|
if self.closed:
|
|
|
|
raise ValueError("write to closed file")
|
2007-08-29 01:05:57 -03:00
|
|
|
if isinstance(b, str):
|
|
|
|
raise TypeError("can't write str to binary stream")
|
2007-03-06 21:00:12 -04:00
|
|
|
# XXX we can implement some more tricks to try and avoid partial writes
|
|
|
|
if len(self._write_buf) > self.buffer_size:
|
|
|
|
# We're full, so let's pre-flush the buffer
|
|
|
|
try:
|
|
|
|
self.flush()
|
2007-04-09 21:22:16 -03:00
|
|
|
except BlockingIOError as e:
|
2007-03-06 21:00:12 -04:00
|
|
|
# We can't accept anything else.
|
2007-04-06 16:10:29 -03:00
|
|
|
# XXX Why not just let the exception pass through?
|
2007-04-09 21:22:16 -03:00
|
|
|
raise BlockingIOError(e.errno, e.strerror, 0)
|
2007-04-12 02:44:49 -03:00
|
|
|
before = len(self._write_buf)
|
2007-04-08 20:59:06 -03:00
|
|
|
self._write_buf.extend(b)
|
2007-04-12 02:44:49 -03:00
|
|
|
written = len(self._write_buf) - before
|
2007-04-06 16:10:29 -03:00
|
|
|
if len(self._write_buf) > self.buffer_size:
|
2007-03-06 21:00:12 -04:00
|
|
|
try:
|
|
|
|
self.flush()
|
2007-04-09 21:22:16 -03:00
|
|
|
except BlockingIOError as e:
|
2007-03-06 21:00:12 -04:00
|
|
|
if (len(self._write_buf) > self.max_buffer_size):
|
|
|
|
# We've hit max_buffer_size. We have to accept a partial
|
|
|
|
# write and cut back our buffer.
|
|
|
|
overage = len(self._write_buf) - self.max_buffer_size
|
|
|
|
self._write_buf = self._write_buf[:self.max_buffer_size]
|
2007-04-09 21:22:16 -03:00
|
|
|
raise BlockingIOError(e.errno, e.strerror, overage)
|
2007-04-12 02:44:49 -03:00
|
|
|
return written
|
2007-02-27 13:19:33 -04:00
|
|
|
|
|
|
|
def flush(self):
|
2007-07-10 06:12:49 -03:00
|
|
|
if self.closed:
|
|
|
|
raise ValueError("flush of closed file")
|
2007-04-06 16:10:29 -03:00
|
|
|
written = 0
|
2007-03-06 21:00:12 -04:00
|
|
|
try:
|
2007-04-06 16:10:29 -03:00
|
|
|
while self._write_buf:
|
|
|
|
n = self.raw.write(self._write_buf)
|
|
|
|
del self._write_buf[:n]
|
|
|
|
written += n
|
2007-04-09 21:22:16 -03:00
|
|
|
except BlockingIOError as e:
|
2007-04-06 16:10:29 -03:00
|
|
|
n = e.characters_written
|
|
|
|
del self._write_buf[:n]
|
|
|
|
written += n
|
2007-04-09 21:22:16 -03:00
|
|
|
raise BlockingIOError(e.errno, e.strerror, written)
|
2007-04-06 16:10:29 -03:00
|
|
|
|
|
|
|
def tell(self):
|
|
|
|
return self.raw.tell() + len(self._write_buf)
|
|
|
|
|
|
|
|
def seek(self, pos, whence=0):
|
|
|
|
self.flush()
|
2007-04-10 16:01:47 -03:00
|
|
|
return self.raw.seek(pos, whence)
|
2007-03-06 21:00:12 -04:00
|
|
|
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
class BufferedRWPair(BufferedIOBase):
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-03-06 21:00:12 -04:00
|
|
|
"""A buffered reader and writer object together.
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
A buffered reader object and buffered writer object put together
|
|
|
|
to form a sequential IO object that can read and write.
|
2007-04-06 14:31:18 -03:00
|
|
|
|
|
|
|
This is typically used with a socket or two-way pipe.
|
2007-04-09 21:22:16 -03:00
|
|
|
|
|
|
|
XXX The usefulness of this (compared to having two separate IO
|
|
|
|
objects) is questionable.
|
2007-02-27 13:19:33 -04:00
|
|
|
"""
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def __init__(self, reader, writer,
|
|
|
|
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
|
|
|
|
"""Constructor.
|
|
|
|
|
|
|
|
The arguments are two RawIO instances.
|
|
|
|
"""
|
2007-08-27 14:39:33 -03:00
|
|
|
reader._checkReadable()
|
|
|
|
writer._checkWritable()
|
2007-04-09 21:22:16 -03:00
|
|
|
self.reader = BufferedReader(reader, buffer_size)
|
|
|
|
self.writer = BufferedWriter(writer, buffer_size, max_buffer_size)
|
2007-03-06 21:00:12 -04:00
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
def read(self, n=None):
|
|
|
|
if n is None:
|
|
|
|
n = -1
|
2007-03-06 21:00:12 -04:00
|
|
|
return self.reader.read(n)
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def readinto(self, b):
|
|
|
|
return self.reader.readinto(b)
|
|
|
|
|
2007-03-06 21:00:12 -04:00
|
|
|
def write(self, b):
|
|
|
|
return self.writer.write(b)
|
|
|
|
|
2008-03-17 14:34:48 -03:00
|
|
|
def peek(self, n=0):
|
|
|
|
return self.reader.peek(n)
|
2007-04-13 15:42:35 -03:00
|
|
|
|
|
|
|
def read1(self, n):
|
|
|
|
return self.reader.read1(n)
|
|
|
|
|
2007-03-06 21:00:12 -04:00
|
|
|
def readable(self):
|
|
|
|
return self.reader.readable()
|
|
|
|
|
|
|
|
def writable(self):
|
|
|
|
return self.writer.writable()
|
|
|
|
|
|
|
|
def flush(self):
|
|
|
|
return self.writer.flush()
|
2007-02-27 13:19:33 -04:00
|
|
|
|
2007-03-06 21:00:12 -04:00
|
|
|
def close(self):
|
|
|
|
self.writer.close()
|
2007-04-09 21:22:16 -03:00
|
|
|
self.reader.close()
|
|
|
|
|
|
|
|
def isatty(self):
|
|
|
|
return self.reader.isatty() or self.writer.isatty()
|
2007-03-06 21:00:12 -04:00
|
|
|
|
2007-04-08 20:59:06 -03:00
|
|
|
@property
|
|
|
|
def closed(self):
|
2007-04-09 21:22:16 -03:00
|
|
|
return self.writer.closed()
|
2007-04-08 20:59:06 -03:00
|
|
|
|
2007-03-06 21:00:12 -04:00
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
class BufferedRandom(BufferedWriter, BufferedReader):
|
2007-03-06 21:00:12 -04:00
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
# XXX docstring
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def __init__(self, raw,
|
|
|
|
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
|
2007-08-27 14:39:33 -03:00
|
|
|
raw._checkSeekable()
|
2007-04-08 20:59:06 -03:00
|
|
|
BufferedReader.__init__(self, raw, buffer_size)
|
2007-03-06 21:00:12 -04:00
|
|
|
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
|
|
|
|
|
|
|
|
def seek(self, pos, whence=0):
|
|
|
|
self.flush()
|
2007-04-06 16:10:29 -03:00
|
|
|
# First do the raw seek, then empty the read buffer, so that
|
|
|
|
# if the raw seek fails, we don't lose buffered data forever.
|
2007-04-10 16:01:47 -03:00
|
|
|
pos = self.raw.seek(pos, whence)
|
2007-04-06 16:10:29 -03:00
|
|
|
self._read_buf = b""
|
2007-04-10 16:01:47 -03:00
|
|
|
return pos
|
2007-03-06 21:00:12 -04:00
|
|
|
|
|
|
|
def tell(self):
|
|
|
|
if (self._write_buf):
|
|
|
|
return self.raw.tell() + len(self._write_buf)
|
|
|
|
else:
|
|
|
|
return self.raw.tell() - len(self._read_buf)
|
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
def read(self, n=None):
|
|
|
|
if n is None:
|
|
|
|
n = -1
|
2007-03-06 21:00:12 -04:00
|
|
|
self.flush()
|
|
|
|
return BufferedReader.read(self, n)
|
|
|
|
|
2007-04-09 21:22:16 -03:00
|
|
|
def readinto(self, b):
|
|
|
|
self.flush()
|
|
|
|
return BufferedReader.readinto(self, b)
|
|
|
|
|
2008-03-17 14:34:48 -03:00
|
|
|
def peek(self, n=0):
|
2007-04-13 15:42:35 -03:00
|
|
|
self.flush()
|
2008-03-17 14:34:48 -03:00
|
|
|
return BufferedReader.peek(self, n)
|
2007-04-13 15:42:35 -03:00
|
|
|
|
|
|
|
def read1(self, n):
|
|
|
|
self.flush()
|
|
|
|
return BufferedReader.read1(self, n)
|
|
|
|
|
2007-03-06 21:00:12 -04:00
|
|
|
def write(self, b):
|
2007-04-06 14:31:18 -03:00
|
|
|
if self._read_buf:
|
|
|
|
self.raw.seek(-len(self._read_buf), 1) # Undo readahead
|
|
|
|
self._read_buf = b""
|
2007-03-06 21:00:12 -04:00
|
|
|
return BufferedWriter.write(self, b)
|
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-04-10 11:41:39 -03:00
|
|
|
class TextIOBase(IOBase):
|
2007-04-06 14:31:18 -03:00
|
|
|
|
|
|
|
"""Base class for text I/O.
|
|
|
|
|
|
|
|
This class provides a character and line based interface to stream I/O.
|
2007-04-10 22:09:03 -03:00
|
|
|
|
|
|
|
There is no readinto() method, as character strings are immutable.
|
2007-04-06 14:31:18 -03:00
|
|
|
"""
|
|
|
|
|
|
|
|
def read(self, n: int = -1) -> str:
|
|
|
|
"""read(n: int = -1) -> str. Read at most n characters from stream.
|
|
|
|
|
|
|
|
Read from underlying buffer until we have n characters or we hit EOF.
|
|
|
|
If n is negative or omitted, read until EOF.
|
|
|
|
"""
|
2007-04-08 20:59:06 -03:00
|
|
|
self._unsupported("read")
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-04-10 22:09:03 -03:00
|
|
|
def write(self, s: str) -> int:
|
|
|
|
"""write(s: str) -> int. Write string s to stream."""
|
2007-04-08 20:59:06 -03:00
|
|
|
self._unsupported("write")
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-04-10 22:09:03 -03:00
|
|
|
def truncate(self, pos: int = None) -> int:
|
|
|
|
"""truncate(pos: int = None) -> int. Truncate size to pos."""
|
|
|
|
self.flush()
|
|
|
|
if pos is None:
|
|
|
|
pos = self.tell()
|
|
|
|
self.seek(pos)
|
|
|
|
return self.buffer.truncate()
|
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
def readline(self) -> str:
|
|
|
|
"""readline() -> str. Read until newline or EOF.
|
|
|
|
|
|
|
|
Returns an empty string if EOF is hit immediately.
|
|
|
|
"""
|
2007-04-08 20:59:06 -03:00
|
|
|
self._unsupported("readline")
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-05-24 14:58:06 -03:00
|
|
|
@property
|
|
|
|
def encoding(self):
|
|
|
|
"""Subclasses should override."""
|
|
|
|
return None
|
|
|
|
|
2007-08-18 18:39:55 -03:00
|
|
|
@property
|
|
|
|
def newlines(self):
|
|
|
|
"""newlines -> None | str | tuple of str. Line endings translated
|
|
|
|
so far.
|
|
|
|
|
|
|
|
Only line endings translated during reading are considered.
|
|
|
|
|
|
|
|
Subclasses should override.
|
|
|
|
"""
|
|
|
|
return None
|
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-11-19 16:34:10 -04:00
|
|
|
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
|
|
|
|
"""Codec used when reading a file in universal newlines mode.
|
|
|
|
It wraps another incremental decoder, translating \\r\\n and \\r into \\n.
|
|
|
|
It also records the types of newlines encountered.
|
|
|
|
When used with translate=False, it ensures that the newline sequence is
|
|
|
|
returned in one piece.
|
|
|
|
"""
|
|
|
|
def __init__(self, decoder, translate, errors='strict'):
|
|
|
|
codecs.IncrementalDecoder.__init__(self, errors=errors)
|
|
|
|
self.buffer = b''
|
|
|
|
self.translate = translate
|
|
|
|
self.decoder = decoder
|
|
|
|
self.seennl = 0
|
|
|
|
|
|
|
|
def decode(self, input, final=False):
|
|
|
|
# decode input (with the eventual \r from a previous pass)
|
|
|
|
if self.buffer:
|
|
|
|
input = self.buffer + input
|
|
|
|
|
|
|
|
output = self.decoder.decode(input, final=final)
|
|
|
|
|
|
|
|
# retain last \r even when not translating data:
|
|
|
|
# then readline() is sure to get \r\n in one pass
|
|
|
|
if output.endswith("\r") and not final:
|
|
|
|
output = output[:-1]
|
|
|
|
self.buffer = b'\r'
|
|
|
|
else:
|
|
|
|
self.buffer = b''
|
|
|
|
|
|
|
|
# Record which newlines are read
|
|
|
|
crlf = output.count('\r\n')
|
|
|
|
cr = output.count('\r') - crlf
|
|
|
|
lf = output.count('\n') - crlf
|
|
|
|
self.seennl |= (lf and self._LF) | (cr and self._CR) \
|
|
|
|
| (crlf and self._CRLF)
|
|
|
|
|
|
|
|
if self.translate:
|
|
|
|
if crlf:
|
|
|
|
output = output.replace("\r\n", "\n")
|
|
|
|
if cr:
|
|
|
|
output = output.replace("\r", "\n")
|
|
|
|
|
|
|
|
return output
|
|
|
|
|
|
|
|
def getstate(self):
|
|
|
|
buf, flag = self.decoder.getstate()
|
|
|
|
return buf + self.buffer, flag
|
|
|
|
|
|
|
|
def setstate(self, state):
|
|
|
|
buf, flag = state
|
|
|
|
if buf.endswith(b'\r'):
|
|
|
|
self.buffer = b'\r'
|
|
|
|
buf = buf[:-1]
|
|
|
|
else:
|
|
|
|
self.buffer = b''
|
|
|
|
self.decoder.setstate((buf, flag))
|
|
|
|
|
|
|
|
def reset(self):
|
2007-12-27 21:24:22 -04:00
|
|
|
self.seennl = 0
|
2007-11-19 16:34:10 -04:00
|
|
|
self.buffer = b''
|
|
|
|
self.decoder.reset()
|
|
|
|
|
|
|
|
_LF = 1
|
|
|
|
_CR = 2
|
|
|
|
_CRLF = 4
|
|
|
|
|
|
|
|
@property
|
|
|
|
def newlines(self):
|
|
|
|
return (None,
|
|
|
|
"\n",
|
|
|
|
"\r",
|
|
|
|
("\r", "\n"),
|
|
|
|
"\r\n",
|
|
|
|
("\n", "\r\n"),
|
|
|
|
("\r", "\r\n"),
|
|
|
|
("\r", "\n", "\r\n")
|
|
|
|
)[self.seennl]
|
|
|
|
|
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
class TextIOWrapper(TextIOBase):
|
|
|
|
|
|
|
|
"""Buffered text stream.
|
|
|
|
|
|
|
|
Character and line based layer over a BufferedIOBase object.
|
|
|
|
"""
|
|
|
|
|
2007-04-11 13:07:50 -03:00
|
|
|
_CHUNK_SIZE = 128
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-12-05 21:04:26 -04:00
|
|
|
def __init__(self, buffer, encoding=None, errors=None, newline=None,
|
|
|
|
line_buffering=False):
|
2007-08-18 18:39:55 -03:00
|
|
|
if newline not in (None, "", "\n", "\r", "\r\n"):
|
2007-04-10 22:09:03 -03:00
|
|
|
raise ValueError("illegal newline value: %r" % (newline,))
|
2007-04-06 14:31:18 -03:00
|
|
|
if encoding is None:
|
2007-08-11 11:02:14 -03:00
|
|
|
try:
|
|
|
|
encoding = os.device_encoding(buffer.fileno())
|
2007-10-11 20:08:53 -03:00
|
|
|
except (AttributeError, UnsupportedOperation):
|
2007-08-11 11:02:14 -03:00
|
|
|
pass
|
|
|
|
if encoding is None:
|
2007-08-11 12:36:45 -03:00
|
|
|
try:
|
|
|
|
import locale
|
|
|
|
except ImportError:
|
|
|
|
# Importing locale may fail if Python is being built
|
|
|
|
encoding = "ascii"
|
|
|
|
else:
|
|
|
|
encoding = locale.getpreferredencoding()
|
2007-11-08 12:34:32 -04:00
|
|
|
|
|
|
|
if not isinstance(encoding, str):
|
|
|
|
raise ValueError("invalid encoding: %r" % encoding)
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-12-03 18:54:21 -04:00
|
|
|
if errors is None:
|
|
|
|
errors = "strict"
|
|
|
|
else:
|
|
|
|
if not isinstance(errors, str):
|
|
|
|
raise ValueError("invalid errors: %r" % errors)
|
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
self.buffer = buffer
|
2007-12-05 21:04:26 -04:00
|
|
|
self._line_buffering = line_buffering
|
2007-04-06 14:31:18 -03:00
|
|
|
self._encoding = encoding
|
2007-12-03 18:54:21 -04:00
|
|
|
self._errors = errors
|
2007-08-18 18:39:55 -03:00
|
|
|
self._readuniversal = not newline
|
|
|
|
self._readtranslate = newline is None
|
|
|
|
self._readnl = newline
|
|
|
|
self._writetranslate = newline != ''
|
|
|
|
self._writenl = newline or os.linesep
|
2008-01-07 14:30:48 -04:00
|
|
|
self._encoder = None
|
2007-04-06 14:31:18 -03:00
|
|
|
self._decoder = None
|
2008-03-20 07:37:32 -03:00
|
|
|
self._decoded_chars = '' # buffer for text returned from decoder
|
|
|
|
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
|
2008-03-18 01:51:32 -03:00
|
|
|
self._snapshot = None # info for reconstructing decoder state
|
2007-04-11 13:07:50 -03:00
|
|
|
self._seekable = self._telling = self.buffer.seekable()
|
2007-04-10 22:09:03 -03:00
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# self._snapshot is either None, or a tuple (dec_flags, next_input)
|
|
|
|
# where dec_flags is the second (integer) item of the decoder state
|
|
|
|
# and next_input is the chunk of input bytes that comes next after the
|
|
|
|
# snapshot point. We use this to reconstruct decoder states in tell().
|
2008-03-18 01:51:32 -03:00
|
|
|
|
|
|
|
# Naming convention:
|
2008-03-20 07:37:32 -03:00
|
|
|
# - "bytes_..." for integer variables that count input bytes
|
|
|
|
# - "chars_..." for integer variables that count decoded characters
|
2008-03-18 01:51:32 -03:00
|
|
|
|
2007-05-24 14:58:06 -03:00
|
|
|
@property
|
|
|
|
def encoding(self):
|
|
|
|
return self._encoding
|
|
|
|
|
2007-12-03 18:54:21 -04:00
|
|
|
@property
|
|
|
|
def errors(self):
|
|
|
|
return self._errors
|
|
|
|
|
2007-12-05 21:04:26 -04:00
|
|
|
@property
|
|
|
|
def line_buffering(self):
|
|
|
|
return self._line_buffering
|
|
|
|
|
2008-03-17 17:35:15 -03:00
|
|
|
def seekable(self):
|
2007-04-10 22:09:03 -03:00
|
|
|
return self._seekable
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-04-08 20:59:06 -03:00
|
|
|
def flush(self):
|
|
|
|
self.buffer.flush()
|
2007-04-11 13:07:50 -03:00
|
|
|
self._telling = self._seekable
|
2007-04-08 20:59:06 -03:00
|
|
|
|
|
|
|
def close(self):
|
2007-07-22 17:38:07 -03:00
|
|
|
try:
|
|
|
|
self.flush()
|
|
|
|
except:
|
|
|
|
pass # If flush() fails, just give up
|
2007-04-08 20:59:06 -03:00
|
|
|
self.buffer.close()
|
|
|
|
|
|
|
|
@property
|
|
|
|
def closed(self):
|
|
|
|
return self.buffer.closed
|
|
|
|
|
2007-04-06 23:59:27 -03:00
|
|
|
def fileno(self):
|
|
|
|
return self.buffer.fileno()
|
|
|
|
|
2007-05-27 06:14:51 -03:00
|
|
|
def isatty(self):
|
|
|
|
return self.buffer.isatty()
|
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
def write(self, s: str):
|
2007-07-10 06:12:49 -03:00
|
|
|
if self.closed:
|
|
|
|
raise ValueError("write to closed file")
|
2007-10-16 15:12:55 -03:00
|
|
|
if not isinstance(s, str):
|
2007-08-29 15:10:08 -03:00
|
|
|
raise TypeError("can't write %s to text stream" %
|
|
|
|
s.__class__.__name__)
|
2007-11-19 16:34:10 -04:00
|
|
|
length = len(s)
|
2007-12-05 21:04:26 -04:00
|
|
|
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
|
2007-08-18 18:39:55 -03:00
|
|
|
if haslf and self._writetranslate and self._writenl != "\n":
|
|
|
|
s = s.replace("\n", self._writenl)
|
2008-01-07 14:30:48 -04:00
|
|
|
encoder = self._encoder or self._get_encoder()
|
2007-04-10 22:09:03 -03:00
|
|
|
# XXX What if we were just reading?
|
2008-01-07 14:30:48 -04:00
|
|
|
b = encoder.encode(s)
|
2007-08-18 18:39:55 -03:00
|
|
|
self.buffer.write(b)
|
2007-12-05 21:04:26 -04:00
|
|
|
if self._line_buffering and (haslf or "\r" in s):
|
2007-04-08 20:59:06 -03:00
|
|
|
self.flush()
|
2007-11-19 16:34:10 -04:00
|
|
|
self._snapshot = None
|
|
|
|
if self._decoder:
|
|
|
|
self._decoder.reset()
|
|
|
|
return length
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2008-01-07 14:30:48 -04:00
|
|
|
def _get_encoder(self):
|
|
|
|
make_encoder = codecs.getincrementalencoder(self._encoding)
|
|
|
|
self._encoder = make_encoder(self._errors)
|
|
|
|
return self._encoder
|
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
def _get_decoder(self):
|
|
|
|
make_decoder = codecs.getincrementaldecoder(self._encoding)
|
2007-12-03 18:54:21 -04:00
|
|
|
decoder = make_decoder(self._errors)
|
2007-11-19 16:34:10 -04:00
|
|
|
if self._readuniversal:
|
|
|
|
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
|
|
|
|
self._decoder = decoder
|
2007-04-06 14:31:18 -03:00
|
|
|
return decoder
|
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# The following three methods implement an ADT for _decoded_chars.
|
|
|
|
# Text returned from the decoder is buffered here until the client
|
|
|
|
# requests it by calling our read() or readline() method.
|
|
|
|
def _set_decoded_chars(self, chars):
|
|
|
|
"""Set the _decoded_chars buffer."""
|
|
|
|
self._decoded_chars = chars
|
|
|
|
self._decoded_chars_used = 0
|
|
|
|
|
|
|
|
def _get_decoded_chars(self, n=None):
|
|
|
|
"""Advance into the _decoded_chars buffer."""
|
|
|
|
offset = self._decoded_chars_used
|
|
|
|
if n is None:
|
|
|
|
chars = self._decoded_chars[offset:]
|
|
|
|
else:
|
|
|
|
chars = self._decoded_chars[offset:offset + n]
|
|
|
|
self._decoded_chars_used += len(chars)
|
|
|
|
return chars
|
|
|
|
|
|
|
|
def _rewind_decoded_chars(self, n):
|
|
|
|
"""Rewind the _decoded_chars buffer."""
|
|
|
|
if self._decoded_chars_used < n:
|
|
|
|
raise AssertionError("rewind decoded_chars out of bounds")
|
|
|
|
self._decoded_chars_used -= n
|
|
|
|
|
2007-04-10 22:09:03 -03:00
|
|
|
def _read_chunk(self):
|
2008-03-18 01:51:32 -03:00
|
|
|
"""
|
|
|
|
Read and decode the next chunk of data from the BufferedReader.
|
|
|
|
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
The return value is True unless EOF was reached. The decoded string
|
2008-03-20 07:37:32 -03:00
|
|
|
is placed in self._decoded_chars (replacing its previous value).
|
|
|
|
The entire input chunk is sent to the decoder, though some of it
|
|
|
|
may remain buffered in the decoder, yet to be converted.
|
2008-03-18 01:51:32 -03:00
|
|
|
"""
|
|
|
|
|
2007-08-27 14:39:33 -03:00
|
|
|
if self._decoder is None:
|
|
|
|
raise ValueError("no decoder")
|
2008-03-18 01:51:32 -03:00
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
if self._telling:
|
|
|
|
# To prepare for tell(), we need to snapshot a point in the
|
|
|
|
# file where the decoder's input buffer is empty.
|
|
|
|
|
|
|
|
dec_buffer, dec_flags = self._decoder.getstate()
|
|
|
|
# Given this, we know there was a valid snapshot point
|
|
|
|
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
|
|
|
|
|
|
|
|
# Read a chunk, decode it, and put the result in self._decoded_chars.
|
2008-03-18 01:51:32 -03:00
|
|
|
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
|
|
|
|
eof = not input_chunk
|
2008-03-20 07:37:32 -03:00
|
|
|
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
|
|
|
|
|
|
|
|
if self._telling:
|
|
|
|
# At the snapshot point, len(dec_buffer) bytes before the read,
|
|
|
|
# the next input to be decoded is dec_buffer + input_chunk.
|
|
|
|
self._snapshot = (dec_flags, dec_buffer + input_chunk)
|
2008-03-18 01:51:32 -03:00
|
|
|
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
return not eof
|
2008-03-18 01:51:32 -03:00
|
|
|
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
def _pack_cookie(self, position, dec_flags=0,
|
2008-03-20 07:37:32 -03:00
|
|
|
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
|
2008-03-18 01:51:32 -03:00
|
|
|
# The meaning of a tell() cookie is: seek to position, set the
|
2008-03-20 07:37:32 -03:00
|
|
|
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
|
2008-03-18 01:51:32 -03:00
|
|
|
# into the decoder with need_eof as the EOF flag, then skip
|
2008-03-20 07:37:32 -03:00
|
|
|
# chars_to_skip characters of the decoded result. For most simple
|
|
|
|
# decoders, tell() will often just give a byte offset in the file.
|
|
|
|
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
|
|
|
|
(chars_to_skip<<192) | bool(need_eof)<<256)
|
2008-03-18 01:51:32 -03:00
|
|
|
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
def _unpack_cookie(self, bigint):
|
2008-03-18 01:51:32 -03:00
|
|
|
rest, position = divmod(bigint, 1<<64)
|
|
|
|
rest, dec_flags = divmod(rest, 1<<64)
|
2008-03-20 07:37:32 -03:00
|
|
|
rest, bytes_to_feed = divmod(rest, 1<<64)
|
|
|
|
need_eof, chars_to_skip = divmod(rest, 1<<64)
|
|
|
|
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
|
2007-04-10 22:09:03 -03:00
|
|
|
|
|
|
|
def tell(self):
|
|
|
|
if not self._seekable:
|
2008-03-18 01:51:32 -03:00
|
|
|
raise IOError("underlying stream is not seekable")
|
2007-04-11 13:07:50 -03:00
|
|
|
if not self._telling:
|
2008-03-18 01:51:32 -03:00
|
|
|
raise IOError("telling position disabled by next() call")
|
2007-04-10 22:09:03 -03:00
|
|
|
self.flush()
|
2007-04-11 11:19:59 -03:00
|
|
|
position = self.buffer.tell()
|
2007-04-16 23:38:04 -03:00
|
|
|
decoder = self._decoder
|
|
|
|
if decoder is None or self._snapshot is None:
|
2008-03-20 07:37:32 -03:00
|
|
|
if self._decoded_chars:
|
2008-03-18 01:51:32 -03:00
|
|
|
# This should never happen.
|
|
|
|
raise AssertionError("pending decoded text")
|
2007-04-11 11:19:59 -03:00
|
|
|
return position
|
2008-03-18 01:51:32 -03:00
|
|
|
|
|
|
|
# Skip backward to the snapshot point (see _read_chunk).
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
dec_flags, next_input = self._snapshot
|
2008-03-18 01:51:32 -03:00
|
|
|
position -= len(next_input)
|
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# How many decoded characters have been used up since the snapshot?
|
|
|
|
chars_to_skip = self._decoded_chars_used
|
|
|
|
if chars_to_skip == 0:
|
2008-03-18 01:51:32 -03:00
|
|
|
# We haven't moved from the snapshot point.
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
return self._pack_cookie(position, dec_flags)
|
2008-03-18 01:51:32 -03:00
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# Starting from the snapshot position, we will walk the decoder
|
|
|
|
# forward until it gives us enough decoded characters.
|
2007-04-16 23:38:04 -03:00
|
|
|
saved_state = decoder.getstate()
|
|
|
|
try:
|
2008-03-20 07:37:32 -03:00
|
|
|
# Note our initial start point.
|
|
|
|
decoder.setstate((b'', dec_flags))
|
|
|
|
start_pos = position
|
|
|
|
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
|
2008-03-18 01:51:32 -03:00
|
|
|
need_eof = 0
|
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# Feed the decoder one byte at a time. As we go, note the
|
|
|
|
# nearest "safe start point" before the current location
|
|
|
|
# (a point where the decoder has nothing buffered, so seek()
|
|
|
|
# can safely start from there and advance to this location).
|
2008-03-18 01:51:32 -03:00
|
|
|
next_byte = bytearray(1)
|
|
|
|
for next_byte[0] in next_input:
|
2008-03-20 07:37:32 -03:00
|
|
|
bytes_fed += 1
|
|
|
|
chars_decoded += len(decoder.decode(next_byte))
|
2008-03-18 01:51:32 -03:00
|
|
|
dec_buffer, dec_flags = decoder.getstate()
|
2008-03-20 07:37:32 -03:00
|
|
|
if not dec_buffer and chars_decoded <= chars_to_skip:
|
|
|
|
# Decoder buffer is empty, so this is a safe start point.
|
|
|
|
start_pos += bytes_fed
|
|
|
|
chars_to_skip -= chars_decoded
|
|
|
|
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
|
|
|
|
if chars_decoded >= chars_to_skip:
|
2008-03-18 01:51:32 -03:00
|
|
|
break
|
|
|
|
else:
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
# We didn't get enough decoded data; signal EOF to get more.
|
2008-03-20 07:37:32 -03:00
|
|
|
chars_decoded += len(decoder.decode(b'', final=True))
|
2008-03-18 01:51:32 -03:00
|
|
|
need_eof = 1
|
2008-03-20 07:37:32 -03:00
|
|
|
if chars_decoded < chars_to_skip:
|
2008-03-18 01:51:32 -03:00
|
|
|
raise IOError("can't reconstruct logical file position")
|
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# The returned cookie corresponds to the last safe start point.
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
return self._pack_cookie(
|
2008-03-20 07:37:32 -03:00
|
|
|
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
|
2007-04-16 23:38:04 -03:00
|
|
|
finally:
|
|
|
|
decoder.setstate(saved_state)
|
2007-04-10 22:09:03 -03:00
|
|
|
|
2008-03-18 01:51:32 -03:00
|
|
|
def seek(self, cookie, whence=0):
|
2007-04-10 22:09:03 -03:00
|
|
|
if not self._seekable:
|
2008-03-18 01:51:32 -03:00
|
|
|
raise IOError("underlying stream is not seekable")
|
|
|
|
if whence == 1: # seek relative to current position
|
|
|
|
if cookie != 0:
|
|
|
|
raise IOError("can't do nonzero cur-relative seeks")
|
|
|
|
# Seeking to the current position should attempt to
|
|
|
|
# sync the underlying buffer with the current position.
|
2007-04-12 02:24:24 -03:00
|
|
|
whence = 0
|
2008-03-18 01:51:32 -03:00
|
|
|
cookie = self.tell()
|
|
|
|
if whence == 2: # seek relative to end of file
|
|
|
|
if cookie != 0:
|
|
|
|
raise IOError("can't do nonzero end-relative seeks")
|
2007-04-10 22:09:03 -03:00
|
|
|
self.flush()
|
2008-03-18 01:51:32 -03:00
|
|
|
position = self.buffer.seek(0, 2)
|
2008-03-20 07:37:32 -03:00
|
|
|
self._set_decoded_chars('')
|
|
|
|
self._snapshot = None
|
2007-11-19 16:34:10 -04:00
|
|
|
if self._decoder:
|
|
|
|
self._decoder.reset()
|
2008-03-18 01:51:32 -03:00
|
|
|
return position
|
2007-04-10 22:09:03 -03:00
|
|
|
if whence != 0:
|
2008-03-18 01:51:32 -03:00
|
|
|
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
|
2007-04-10 22:09:03 -03:00
|
|
|
(whence,))
|
2008-03-18 01:51:32 -03:00
|
|
|
if cookie < 0:
|
|
|
|
raise ValueError("negative seek position %r" % (cookie,))
|
2007-04-11 13:07:50 -03:00
|
|
|
self.flush()
|
2008-03-18 01:51:32 -03:00
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# The strategy of seek() is to go back to the safe start point
|
|
|
|
# and replay the effect of read(chars_to_skip) from there.
|
|
|
|
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
self._unpack_cookie(cookie)
|
2008-03-18 01:51:32 -03:00
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# Seek back to the safe start point.
|
|
|
|
self.buffer.seek(start_pos)
|
|
|
|
self._set_decoded_chars('')
|
|
|
|
self._snapshot = None
|
|
|
|
|
|
|
|
# Restore the decoder to its state from the safe start point.
|
|
|
|
if self._decoder or dec_flags or chars_to_skip:
|
2008-03-18 01:51:32 -03:00
|
|
|
self._decoder = self._decoder or self._get_decoder()
|
2008-03-20 07:37:32 -03:00
|
|
|
self._decoder.setstate((b'', dec_flags))
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
self._snapshot = (dec_flags, b'')
|
2008-03-18 01:51:32 -03:00
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
if chars_to_skip:
|
|
|
|
# Just like _read_chunk, feed the decoder and save a snapshot.
|
|
|
|
input_chunk = self.buffer.read(bytes_to_feed)
|
|
|
|
self._set_decoded_chars(
|
|
|
|
self._decoder.decode(input_chunk, need_eof))
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
self._snapshot = (dec_flags, input_chunk)
|
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# Skip chars_to_skip of the decoded characters.
|
|
|
|
if len(self._decoded_chars) < chars_to_skip:
|
|
|
|
raise IOError("can't restore logical file position")
|
|
|
|
self._decoded_chars_used = chars_to_skip
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
return cookie
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
def read(self, n=None):
|
|
|
|
if n is None:
|
|
|
|
n = -1
|
2007-04-06 14:31:18 -03:00
|
|
|
decoder = self._decoder or self._get_decoder()
|
|
|
|
if n < 0:
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
# Read everything.
|
2008-03-20 07:37:32 -03:00
|
|
|
result = (self._get_decoded_chars() +
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
decoder.decode(self.buffer.read(), final=True))
|
2008-03-20 07:37:32 -03:00
|
|
|
self._set_decoded_chars('')
|
|
|
|
self._snapshot = None
|
2008-03-18 01:51:32 -03:00
|
|
|
return result
|
2007-04-06 14:31:18 -03:00
|
|
|
else:
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
# Keep reading chunks until we have n characters to return.
|
|
|
|
eof = False
|
2008-03-20 07:37:32 -03:00
|
|
|
result = self._get_decoded_chars(n)
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
while len(result) < n and not eof:
|
|
|
|
eof = not self._read_chunk()
|
2008-03-20 07:37:32 -03:00
|
|
|
result += self._get_decoded_chars(n - len(result))
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
return result
|
2007-04-06 14:31:18 -03:00
|
|
|
|
2007-05-17 20:59:11 -03:00
|
|
|
def __next__(self):
|
2007-04-11 13:07:50 -03:00
|
|
|
self._telling = False
|
|
|
|
line = self.readline()
|
|
|
|
if not line:
|
|
|
|
self._snapshot = None
|
|
|
|
self._telling = self._seekable
|
|
|
|
raise StopIteration
|
|
|
|
return line
|
|
|
|
|
2007-04-08 20:59:06 -03:00
|
|
|
def readline(self, limit=None):
|
2007-11-06 17:34:58 -04:00
|
|
|
if limit is None:
|
|
|
|
limit = -1
|
2007-04-08 20:59:06 -03:00
|
|
|
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
# Grab all the decoded text (we will rewind any extra bits later).
|
2008-03-20 07:37:32 -03:00
|
|
|
line = self._get_decoded_chars()
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
start = 0
|
|
|
|
decoder = self._decoder or self._get_decoder()
|
|
|
|
|
2007-08-18 18:39:55 -03:00
|
|
|
pos = endpos = None
|
2007-04-06 14:31:18 -03:00
|
|
|
while True:
|
2007-11-19 16:34:10 -04:00
|
|
|
if self._readtranslate:
|
|
|
|
# Newlines are already translated, only search for \n
|
|
|
|
pos = line.find('\n', start)
|
|
|
|
if pos >= 0:
|
|
|
|
endpos = pos + 1
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
start = len(line)
|
|
|
|
|
|
|
|
elif self._readuniversal:
|
2007-08-18 18:39:55 -03:00
|
|
|
# Universal newline search. Find any of \r, \r\n, \n
|
2007-11-19 16:34:10 -04:00
|
|
|
# The decoder ensures that \r\n are not split in two pieces
|
2007-08-18 18:39:55 -03:00
|
|
|
|
|
|
|
# In C we'd look for these in parallel of course.
|
|
|
|
nlpos = line.find("\n", start)
|
|
|
|
crpos = line.find("\r", start)
|
|
|
|
if crpos == -1:
|
|
|
|
if nlpos == -1:
|
2007-11-19 16:34:10 -04:00
|
|
|
# Nothing found
|
2007-08-18 18:39:55 -03:00
|
|
|
start = len(line)
|
2007-04-06 14:31:18 -03:00
|
|
|
else:
|
2007-08-18 18:39:55 -03:00
|
|
|
# Found \n
|
2007-11-19 16:34:10 -04:00
|
|
|
endpos = nlpos + 1
|
2007-08-18 18:39:55 -03:00
|
|
|
break
|
|
|
|
elif nlpos == -1:
|
2007-11-19 16:34:10 -04:00
|
|
|
# Found lone \r
|
|
|
|
endpos = crpos + 1
|
|
|
|
break
|
2007-08-18 18:39:55 -03:00
|
|
|
elif nlpos < crpos:
|
|
|
|
# Found \n
|
2007-11-19 16:34:10 -04:00
|
|
|
endpos = nlpos + 1
|
2007-08-18 18:39:55 -03:00
|
|
|
break
|
|
|
|
elif nlpos == crpos + 1:
|
|
|
|
# Found \r\n
|
2007-11-19 16:34:10 -04:00
|
|
|
endpos = crpos + 2
|
2007-08-18 18:39:55 -03:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
# Found \r
|
2007-11-19 16:34:10 -04:00
|
|
|
endpos = crpos + 1
|
2007-04-06 14:31:18 -03:00
|
|
|
break
|
|
|
|
else:
|
2007-08-18 18:39:55 -03:00
|
|
|
# non-universal
|
|
|
|
pos = line.find(self._readnl)
|
|
|
|
if pos >= 0:
|
2007-11-19 16:34:10 -04:00
|
|
|
endpos = pos + len(self._readnl)
|
2007-08-18 18:39:55 -03:00
|
|
|
break
|
2007-04-06 14:31:18 -03:00
|
|
|
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
if limit >= 0 and len(line) >= limit:
|
|
|
|
endpos = limit # reached length limit
|
|
|
|
break
|
|
|
|
|
2007-04-06 14:31:18 -03:00
|
|
|
# No line ending seen yet - get more data
|
2007-08-18 18:39:55 -03:00
|
|
|
more_line = ''
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
while self._read_chunk():
|
2008-03-20 07:37:32 -03:00
|
|
|
if self._decoded_chars:
|
2007-04-06 14:31:18 -03:00
|
|
|
break
|
2008-03-20 07:37:32 -03:00
|
|
|
if self._decoded_chars:
|
|
|
|
line += self._get_decoded_chars()
|
2007-08-18 18:39:55 -03:00
|
|
|
else:
|
|
|
|
# end of file
|
2008-03-20 07:37:32 -03:00
|
|
|
self._set_decoded_chars('')
|
|
|
|
self._snapshot = None
|
2007-11-19 16:34:10 -04:00
|
|
|
return line
|
2007-04-06 14:31:18 -03:00
|
|
|
|
This is r61508 plus additional fixes to the handling of 'limit'
in TextIOWrapper.readline().
All tests now pass for me (except for expected skips on darwin:
bsddb, bsddb3, cProfile, codecmaps_*, curses, gdbm, largefile,
locale, normalization, ossaudiodev, pep277, socketserver,
startfile, timeout, urllib2net, urllibnet, winreg, winsound,
xmlrpc_net, zipfile64, and the -u largefile part of test_io).
2008-03-20 07:34:07 -03:00
|
|
|
if limit >= 0 and endpos > limit:
|
|
|
|
endpos = limit # don't exceed limit
|
|
|
|
|
2008-03-20 07:37:32 -03:00
|
|
|
# Rewind _decoded_chars to just after the line ending we found.
|
|
|
|
self._rewind_decoded_chars(len(line) - endpos)
|
2007-11-19 16:34:10 -04:00
|
|
|
return line[:endpos]
|
2007-08-18 18:39:55 -03:00
|
|
|
|
|
|
|
@property
|
|
|
|
def newlines(self):
|
2007-11-19 16:34:10 -04:00
|
|
|
return self._decoder.newlines if self._decoder else None
|
2007-05-17 20:59:11 -03:00
|
|
|
|
|
|
|
class StringIO(TextIOWrapper):
|
|
|
|
|
|
|
|
# XXX This is really slow, but fully functional
|
|
|
|
|
2007-12-03 18:54:21 -04:00
|
|
|
def __init__(self, initial_value="", encoding="utf-8",
|
|
|
|
errors="strict", newline="\n"):
|
2007-07-27 15:03:11 -03:00
|
|
|
super(StringIO, self).__init__(BytesIO(),
|
|
|
|
encoding=encoding,
|
2007-12-03 18:54:21 -04:00
|
|
|
errors=errors,
|
2007-07-27 15:03:11 -03:00
|
|
|
newline=newline)
|
2007-05-17 20:59:11 -03:00
|
|
|
if initial_value:
|
2007-10-16 15:12:55 -03:00
|
|
|
if not isinstance(initial_value, str):
|
2007-08-08 22:03:29 -03:00
|
|
|
initial_value = str(initial_value)
|
2007-05-17 20:59:11 -03:00
|
|
|
self.write(initial_value)
|
|
|
|
self.seek(0)
|
|
|
|
|
|
|
|
def getvalue(self):
|
2007-08-08 22:03:29 -03:00
|
|
|
self.flush()
|
2007-12-03 18:54:21 -04:00
|
|
|
return self.buffer.getvalue().decode(self._encoding, self._errors)
|