2011-04-03 12:05:46 -03:00
|
|
|
"""Interface to the libbzip2 compression library.
|
|
|
|
|
|
|
|
This module provides a file interface, classes for incremental
|
|
|
|
(de)compression, and functions for one-shot (de)compression.
|
|
|
|
"""
|
|
|
|
|
2012-06-04 18:32:38 -03:00
|
|
|
__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor",
|
|
|
|
"open", "compress", "decompress"]
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
__author__ = "Nadeem Vawda <nadeem.vawda@gmail.com>"
|
|
|
|
|
|
|
|
import io
|
|
|
|
import warnings
|
|
|
|
|
2012-01-17 19:57:14 -04:00
|
|
|
try:
|
|
|
|
from threading import RLock
|
2013-07-04 18:43:24 -03:00
|
|
|
except ImportError:
|
2012-01-17 19:57:14 -04:00
|
|
|
from dummy_threading import RLock
|
|
|
|
|
2011-04-03 12:05:46 -03:00
|
|
|
from _bz2 import BZ2Compressor, BZ2Decompressor
|
|
|
|
|
|
|
|
|
|
|
|
_MODE_CLOSED = 0
|
|
|
|
_MODE_READ = 1
|
|
|
|
_MODE_READ_EOF = 2
|
|
|
|
_MODE_WRITE = 3
|
|
|
|
|
|
|
|
_BUFFER_SIZE = 8192
|
|
|
|
|
2012-10-08 14:20:49 -03:00
|
|
|
_builtin_open = open
|
|
|
|
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
class BZ2File(io.BufferedIOBase):
|
|
|
|
|
|
|
|
"""A file object providing transparent bzip2 (de)compression.
|
|
|
|
|
|
|
|
A BZ2File can act as a wrapper for an existing file object, or refer
|
|
|
|
directly to a named file on disk.
|
|
|
|
|
|
|
|
Note that BZ2File provides a *binary* file interface - data read is
|
|
|
|
returned as bytes, and data to be written should be given as bytes.
|
|
|
|
"""
|
|
|
|
|
2012-06-04 18:31:20 -03:00
|
|
|
def __init__(self, filename, mode="r", buffering=None, compresslevel=9):
|
2011-04-03 12:05:46 -03:00
|
|
|
"""Open a bzip2-compressed file.
|
|
|
|
|
2012-10-08 15:31:34 -03:00
|
|
|
If filename is a str or bytes object, it gives the name
|
|
|
|
of the file to be opened. Otherwise, it should be a file object,
|
|
|
|
which will be used to read or write the compressed data.
|
2011-04-03 12:05:46 -03:00
|
|
|
|
2012-10-08 15:31:34 -03:00
|
|
|
mode can be 'r' for reading (default), 'w' for (over)writing,
|
|
|
|
or 'a' for appending. These can equivalently be given as 'rb',
|
|
|
|
'wb', and 'ab'.
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
buffering is ignored. Its use is deprecated.
|
|
|
|
|
2012-02-04 07:08:11 -04:00
|
|
|
If mode is 'w' or 'a', compresslevel can be a number between 1
|
|
|
|
and 9 specifying the level of compression: 1 produces the least
|
2011-04-03 12:05:46 -03:00
|
|
|
compression, and 9 (default) produces the most compression.
|
2012-02-04 07:08:11 -04:00
|
|
|
|
|
|
|
If mode is 'r', the input file may be the concatenation of
|
|
|
|
multiple compressed streams.
|
2011-04-03 12:05:46 -03:00
|
|
|
"""
|
|
|
|
# This lock must be recursive, so that BufferedIOBase's
|
|
|
|
# readline(), readlines() and writelines() don't deadlock.
|
2012-01-17 19:57:14 -04:00
|
|
|
self._lock = RLock()
|
2011-04-03 12:05:46 -03:00
|
|
|
self._fp = None
|
|
|
|
self._closefp = False
|
|
|
|
self._mode = _MODE_CLOSED
|
|
|
|
self._pos = 0
|
|
|
|
self._size = -1
|
|
|
|
|
|
|
|
if buffering is not None:
|
|
|
|
warnings.warn("Use of 'buffering' argument is deprecated",
|
|
|
|
DeprecationWarning)
|
|
|
|
|
|
|
|
if not (1 <= compresslevel <= 9):
|
|
|
|
raise ValueError("compresslevel must be between 1 and 9")
|
|
|
|
|
|
|
|
if mode in ("", "r", "rb"):
|
|
|
|
mode = "rb"
|
|
|
|
mode_code = _MODE_READ
|
|
|
|
self._decompressor = BZ2Decompressor()
|
2012-09-29 22:57:33 -03:00
|
|
|
self._buffer = b""
|
|
|
|
self._buffer_offset = 0
|
2011-04-03 12:05:46 -03:00
|
|
|
elif mode in ("w", "wb"):
|
|
|
|
mode = "wb"
|
|
|
|
mode_code = _MODE_WRITE
|
2011-09-11 17:38:11 -03:00
|
|
|
self._compressor = BZ2Compressor(compresslevel)
|
2011-05-26 20:52:15 -03:00
|
|
|
elif mode in ("a", "ab"):
|
|
|
|
mode = "ab"
|
|
|
|
mode_code = _MODE_WRITE
|
2011-09-11 17:38:11 -03:00
|
|
|
self._compressor = BZ2Compressor(compresslevel)
|
2011-04-03 12:05:46 -03:00
|
|
|
else:
|
2012-10-08 14:20:49 -03:00
|
|
|
raise ValueError("Invalid mode: %r" % (mode,))
|
2011-04-03 12:05:46 -03:00
|
|
|
|
2012-06-04 18:31:20 -03:00
|
|
|
if isinstance(filename, (str, bytes)):
|
2012-10-08 14:20:49 -03:00
|
|
|
self._fp = _builtin_open(filename, mode)
|
2011-04-03 12:05:46 -03:00
|
|
|
self._closefp = True
|
|
|
|
self._mode = mode_code
|
2012-06-04 18:31:20 -03:00
|
|
|
elif hasattr(filename, "read") or hasattr(filename, "write"):
|
|
|
|
self._fp = filename
|
2011-04-03 12:05:46 -03:00
|
|
|
self._mode = mode_code
|
|
|
|
else:
|
2012-06-04 18:31:20 -03:00
|
|
|
raise TypeError("filename must be a str or bytes object, or a file")
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
def close(self):
|
|
|
|
"""Flush and close the file.
|
|
|
|
|
|
|
|
May be called more than once without error. Once the file is
|
|
|
|
closed, any other operation on it will raise a ValueError.
|
|
|
|
"""
|
|
|
|
with self._lock:
|
|
|
|
if self._mode == _MODE_CLOSED:
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
if self._mode in (_MODE_READ, _MODE_READ_EOF):
|
|
|
|
self._decompressor = None
|
|
|
|
elif self._mode == _MODE_WRITE:
|
|
|
|
self._fp.write(self._compressor.flush())
|
|
|
|
self._compressor = None
|
|
|
|
finally:
|
2011-04-03 12:08:49 -03:00
|
|
|
try:
|
2011-04-03 12:05:46 -03:00
|
|
|
if self._closefp:
|
|
|
|
self._fp.close()
|
|
|
|
finally:
|
|
|
|
self._fp = None
|
|
|
|
self._closefp = False
|
|
|
|
self._mode = _MODE_CLOSED
|
2012-09-29 22:57:33 -03:00
|
|
|
self._buffer = b""
|
|
|
|
self._buffer_offset = 0
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
@property
|
|
|
|
def closed(self):
|
|
|
|
"""True if this file is closed."""
|
|
|
|
return self._mode == _MODE_CLOSED
|
|
|
|
|
|
|
|
def fileno(self):
|
|
|
|
"""Return the file descriptor for the underlying file."""
|
2011-11-30 11:39:30 -04:00
|
|
|
self._check_not_closed()
|
2011-04-03 12:05:46 -03:00
|
|
|
return self._fp.fileno()
|
|
|
|
|
|
|
|
def seekable(self):
|
|
|
|
"""Return whether the file supports seeking."""
|
2012-02-11 19:51:38 -04:00
|
|
|
return self.readable() and self._fp.seekable()
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
def readable(self):
|
|
|
|
"""Return whether the file was opened for reading."""
|
2011-11-30 11:39:30 -04:00
|
|
|
self._check_not_closed()
|
2011-04-03 12:05:46 -03:00
|
|
|
return self._mode in (_MODE_READ, _MODE_READ_EOF)
|
|
|
|
|
|
|
|
def writable(self):
|
|
|
|
"""Return whether the file was opened for writing."""
|
2011-11-30 11:39:30 -04:00
|
|
|
self._check_not_closed()
|
2011-04-03 12:05:46 -03:00
|
|
|
return self._mode == _MODE_WRITE
|
|
|
|
|
|
|
|
# Mode-checking helper functions.
|
|
|
|
|
|
|
|
def _check_not_closed(self):
|
|
|
|
if self.closed:
|
|
|
|
raise ValueError("I/O operation on closed file")
|
|
|
|
|
|
|
|
def _check_can_read(self):
|
2012-09-30 18:58:01 -03:00
|
|
|
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
|
2012-10-01 18:02:50 -03:00
|
|
|
self._check_not_closed()
|
2011-04-03 12:05:46 -03:00
|
|
|
raise io.UnsupportedOperation("File not open for reading")
|
|
|
|
|
|
|
|
def _check_can_write(self):
|
2012-09-30 18:58:01 -03:00
|
|
|
if self._mode != _MODE_WRITE:
|
2012-10-01 18:02:50 -03:00
|
|
|
self._check_not_closed()
|
2011-04-03 12:05:46 -03:00
|
|
|
raise io.UnsupportedOperation("File not open for writing")
|
|
|
|
|
|
|
|
def _check_can_seek(self):
|
2012-09-30 18:58:01 -03:00
|
|
|
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
|
2012-10-01 18:02:50 -03:00
|
|
|
self._check_not_closed()
|
2011-04-03 12:05:46 -03:00
|
|
|
raise io.UnsupportedOperation("Seeking is only supported "
|
2011-05-24 19:32:08 -03:00
|
|
|
"on files open for reading")
|
2012-02-11 19:51:38 -04:00
|
|
|
if not self._fp.seekable():
|
|
|
|
raise io.UnsupportedOperation("The underlying file object "
|
|
|
|
"does not support seeking")
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
# Fill the readahead buffer if it is empty. Returns False on EOF.
|
|
|
|
def _fill_buffer(self):
|
2012-09-29 22:57:33 -03:00
|
|
|
if self._mode == _MODE_READ_EOF:
|
|
|
|
return False
|
2012-08-04 10:29:28 -03:00
|
|
|
# Depending on the input data, our call to the decompressor may not
|
|
|
|
# return any data. In this case, try again after reading another block.
|
2012-09-29 22:57:33 -03:00
|
|
|
while self._buffer_offset == len(self._buffer):
|
|
|
|
rawblock = (self._decompressor.unused_data or
|
|
|
|
self._fp.read(_BUFFER_SIZE))
|
2012-08-04 10:29:28 -03:00
|
|
|
|
|
|
|
if not rawblock:
|
|
|
|
if self._decompressor.eof:
|
2012-10-08 14:20:49 -03:00
|
|
|
# End-of-stream marker and end of file. We're good.
|
2012-08-04 10:29:28 -03:00
|
|
|
self._mode = _MODE_READ_EOF
|
|
|
|
self._size = self._pos
|
|
|
|
return False
|
|
|
|
else:
|
2012-10-08 14:20:49 -03:00
|
|
|
# Problem - we were expecting more compressed data.
|
2012-08-04 10:29:28 -03:00
|
|
|
raise EOFError("Compressed file ended before the "
|
|
|
|
"end-of-stream marker was reached")
|
|
|
|
|
|
|
|
if self._decompressor.eof:
|
2012-10-08 14:20:49 -03:00
|
|
|
# Continue to next stream.
|
2012-08-04 10:29:28 -03:00
|
|
|
self._decompressor = BZ2Decompressor()
|
2011-05-26 20:52:15 -03:00
|
|
|
|
2012-08-04 10:29:28 -03:00
|
|
|
self._buffer = self._decompressor.decompress(rawblock)
|
2012-09-29 22:57:33 -03:00
|
|
|
self._buffer_offset = 0
|
|
|
|
return True
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
# Read data until EOF.
|
|
|
|
# If return_data is false, consume the data without returning it.
|
|
|
|
def _read_all(self, return_data=True):
|
2012-09-29 22:57:33 -03:00
|
|
|
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
|
|
|
|
self._buffer = self._buffer[self._buffer_offset:]
|
|
|
|
self._buffer_offset = 0
|
|
|
|
|
2011-04-03 12:05:46 -03:00
|
|
|
blocks = []
|
|
|
|
while self._fill_buffer():
|
|
|
|
if return_data:
|
|
|
|
blocks.append(self._buffer)
|
|
|
|
self._pos += len(self._buffer)
|
2012-09-29 22:57:33 -03:00
|
|
|
self._buffer = b""
|
2011-04-03 12:05:46 -03:00
|
|
|
if return_data:
|
|
|
|
return b"".join(blocks)
|
|
|
|
|
|
|
|
# Read a block of up to n bytes.
|
|
|
|
# If return_data is false, consume the data without returning it.
|
|
|
|
def _read_block(self, n, return_data=True):
|
2012-09-29 22:57:33 -03:00
|
|
|
# If we have enough data buffered, return immediately.
|
|
|
|
end = self._buffer_offset + n
|
|
|
|
if end <= len(self._buffer):
|
|
|
|
data = self._buffer[self._buffer_offset : end]
|
|
|
|
self._buffer_offset = end
|
|
|
|
self._pos += len(data)
|
2012-09-30 08:41:29 -03:00
|
|
|
return data if return_data else None
|
2012-09-29 22:57:33 -03:00
|
|
|
|
|
|
|
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
|
|
|
|
self._buffer = self._buffer[self._buffer_offset:]
|
|
|
|
self._buffer_offset = 0
|
|
|
|
|
2011-04-03 12:05:46 -03:00
|
|
|
blocks = []
|
|
|
|
while n > 0 and self._fill_buffer():
|
|
|
|
if n < len(self._buffer):
|
|
|
|
data = self._buffer[:n]
|
2012-09-29 22:57:33 -03:00
|
|
|
self._buffer_offset = n
|
2011-04-03 12:05:46 -03:00
|
|
|
else:
|
|
|
|
data = self._buffer
|
2012-09-29 22:57:33 -03:00
|
|
|
self._buffer = b""
|
2011-04-03 12:05:46 -03:00
|
|
|
if return_data:
|
|
|
|
blocks.append(data)
|
|
|
|
self._pos += len(data)
|
|
|
|
n -= len(data)
|
|
|
|
if return_data:
|
|
|
|
return b"".join(blocks)
|
|
|
|
|
|
|
|
def peek(self, n=0):
|
|
|
|
"""Return buffered data without advancing the file position.
|
|
|
|
|
|
|
|
Always returns at least one byte of data, unless at EOF.
|
|
|
|
The exact number of bytes returned is unspecified.
|
|
|
|
"""
|
|
|
|
with self._lock:
|
|
|
|
self._check_can_read()
|
2012-09-29 22:57:33 -03:00
|
|
|
if not self._fill_buffer():
|
2011-04-03 12:05:46 -03:00
|
|
|
return b""
|
2012-09-29 22:57:33 -03:00
|
|
|
return self._buffer[self._buffer_offset:]
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
def read(self, size=-1):
|
|
|
|
"""Read up to size uncompressed bytes from the file.
|
|
|
|
|
|
|
|
If size is negative or omitted, read until EOF is reached.
|
|
|
|
Returns b'' if the file is already at EOF.
|
|
|
|
"""
|
|
|
|
with self._lock:
|
|
|
|
self._check_can_read()
|
2012-09-29 22:57:33 -03:00
|
|
|
if size == 0:
|
2011-04-03 12:05:46 -03:00
|
|
|
return b""
|
|
|
|
elif size < 0:
|
|
|
|
return self._read_all()
|
|
|
|
else:
|
|
|
|
return self._read_block(size)
|
|
|
|
|
|
|
|
def read1(self, size=-1):
|
2012-08-04 10:29:28 -03:00
|
|
|
"""Read up to size uncompressed bytes, while trying to avoid
|
|
|
|
making multiple reads from the underlying stream.
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
Returns b'' if the file is at EOF.
|
|
|
|
"""
|
2012-08-04 10:29:28 -03:00
|
|
|
# Usually, read1() calls _fp.read() at most once. However, sometimes
|
|
|
|
# this does not give enough data for the decompressor to make progress.
|
|
|
|
# In this case we make multiple reads, to avoid returning b"".
|
2011-04-03 12:05:46 -03:00
|
|
|
with self._lock:
|
|
|
|
self._check_can_read()
|
2012-09-29 22:57:33 -03:00
|
|
|
if (size == 0 or
|
|
|
|
# Only call _fill_buffer() if the buffer is actually empty.
|
|
|
|
# This gives a significant speedup if *size* is small.
|
|
|
|
(self._buffer_offset == len(self._buffer) and not self._fill_buffer())):
|
2011-04-03 12:05:46 -03:00
|
|
|
return b""
|
2012-09-29 22:57:33 -03:00
|
|
|
if size > 0:
|
|
|
|
data = self._buffer[self._buffer_offset :
|
|
|
|
self._buffer_offset + size]
|
|
|
|
self._buffer_offset += len(data)
|
2011-04-03 12:05:46 -03:00
|
|
|
else:
|
2012-09-29 22:57:33 -03:00
|
|
|
data = self._buffer[self._buffer_offset:]
|
|
|
|
self._buffer = b""
|
|
|
|
self._buffer_offset = 0
|
2011-04-03 12:05:46 -03:00
|
|
|
self._pos += len(data)
|
|
|
|
return data
|
|
|
|
|
|
|
|
def readinto(self, b):
|
|
|
|
"""Read up to len(b) bytes into b.
|
2011-04-03 12:08:49 -03:00
|
|
|
|
2011-04-03 12:05:46 -03:00
|
|
|
Returns the number of bytes read (0 for EOF).
|
|
|
|
"""
|
|
|
|
with self._lock:
|
|
|
|
return io.BufferedIOBase.readinto(self, b)
|
|
|
|
|
|
|
|
def readline(self, size=-1):
|
|
|
|
"""Read a line of uncompressed bytes from the file.
|
|
|
|
|
|
|
|
The terminating newline (if present) is retained. If size is
|
|
|
|
non-negative, no more than size bytes will be read (in which
|
|
|
|
case the line may be incomplete). Returns b'' if already at EOF.
|
|
|
|
"""
|
2012-10-01 18:05:32 -03:00
|
|
|
if not isinstance(size, int):
|
|
|
|
if not hasattr(size, "__index__"):
|
|
|
|
raise TypeError("Integer argument expected")
|
|
|
|
size = size.__index__()
|
2011-04-03 12:05:46 -03:00
|
|
|
with self._lock:
|
2012-10-01 18:04:11 -03:00
|
|
|
self._check_can_read()
|
2012-09-29 22:57:33 -03:00
|
|
|
# Shortcut for the common case - the whole line is in the buffer.
|
|
|
|
if size < 0:
|
|
|
|
end = self._buffer.find(b"\n", self._buffer_offset) + 1
|
|
|
|
if end > 0:
|
|
|
|
line = self._buffer[self._buffer_offset : end]
|
|
|
|
self._buffer_offset = end
|
|
|
|
self._pos += len(line)
|
|
|
|
return line
|
2011-04-03 12:05:46 -03:00
|
|
|
return io.BufferedIOBase.readline(self, size)
|
|
|
|
|
|
|
|
def readlines(self, size=-1):
|
|
|
|
"""Read a list of lines of uncompressed bytes from the file.
|
|
|
|
|
|
|
|
size can be specified to control the number of lines read: no
|
|
|
|
further lines will be read once the total size of the lines read
|
|
|
|
so far equals or exceeds size.
|
|
|
|
"""
|
2012-10-01 18:05:32 -03:00
|
|
|
if not isinstance(size, int):
|
|
|
|
if not hasattr(size, "__index__"):
|
|
|
|
raise TypeError("Integer argument expected")
|
|
|
|
size = size.__index__()
|
2011-04-03 12:05:46 -03:00
|
|
|
with self._lock:
|
|
|
|
return io.BufferedIOBase.readlines(self, size)
|
|
|
|
|
|
|
|
def write(self, data):
|
|
|
|
"""Write a byte string to the file.
|
|
|
|
|
|
|
|
Returns the number of uncompressed bytes written, which is
|
|
|
|
always len(data). Note that due to buffering, the file on disk
|
|
|
|
may not reflect the data written until close() is called.
|
|
|
|
"""
|
|
|
|
with self._lock:
|
|
|
|
self._check_can_write()
|
|
|
|
compressed = self._compressor.compress(data)
|
|
|
|
self._fp.write(compressed)
|
|
|
|
self._pos += len(data)
|
|
|
|
return len(data)
|
|
|
|
|
|
|
|
def writelines(self, seq):
|
|
|
|
"""Write a sequence of byte strings to the file.
|
|
|
|
|
|
|
|
Returns the number of uncompressed bytes written.
|
|
|
|
seq can be any iterable yielding byte strings.
|
|
|
|
|
|
|
|
Line separators are not added between the written byte strings.
|
|
|
|
"""
|
|
|
|
with self._lock:
|
|
|
|
return io.BufferedIOBase.writelines(self, seq)
|
|
|
|
|
|
|
|
# Rewind the file to the beginning of the data stream.
|
|
|
|
def _rewind(self):
|
|
|
|
self._fp.seek(0, 0)
|
|
|
|
self._mode = _MODE_READ
|
|
|
|
self._pos = 0
|
|
|
|
self._decompressor = BZ2Decompressor()
|
2012-09-29 22:57:33 -03:00
|
|
|
self._buffer = b""
|
|
|
|
self._buffer_offset = 0
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
def seek(self, offset, whence=0):
|
|
|
|
"""Change the file position.
|
|
|
|
|
|
|
|
The new position is specified by offset, relative to the
|
|
|
|
position indicated by whence. Values for whence are:
|
|
|
|
|
|
|
|
0: start of stream (default); offset must not be negative
|
|
|
|
1: current stream position
|
|
|
|
2: end of stream; offset must not be positive
|
|
|
|
|
|
|
|
Returns the new file position.
|
|
|
|
|
|
|
|
Note that seeking is emulated, so depending on the parameters,
|
|
|
|
this operation may be extremely slow.
|
|
|
|
"""
|
|
|
|
with self._lock:
|
|
|
|
self._check_can_seek()
|
|
|
|
|
|
|
|
# Recalculate offset as an absolute file position.
|
|
|
|
if whence == 0:
|
|
|
|
pass
|
|
|
|
elif whence == 1:
|
|
|
|
offset = self._pos + offset
|
|
|
|
elif whence == 2:
|
|
|
|
# Seeking relative to EOF - we need to know the file's size.
|
|
|
|
if self._size < 0:
|
|
|
|
self._read_all(return_data=False)
|
|
|
|
offset = self._size + offset
|
|
|
|
else:
|
2012-10-08 14:20:49 -03:00
|
|
|
raise ValueError("Invalid value for whence: %s" % (whence,))
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
# Make it so that offset is the number of bytes to skip forward.
|
|
|
|
if offset < self._pos:
|
|
|
|
self._rewind()
|
|
|
|
else:
|
|
|
|
offset -= self._pos
|
|
|
|
|
|
|
|
# Read and discard data until we reach the desired position.
|
2012-09-29 22:57:33 -03:00
|
|
|
self._read_block(offset, return_data=False)
|
2011-04-03 12:05:46 -03:00
|
|
|
|
|
|
|
return self._pos
|
|
|
|
|
|
|
|
def tell(self):
|
|
|
|
"""Return the current file position."""
|
|
|
|
with self._lock:
|
|
|
|
self._check_not_closed()
|
|
|
|
return self._pos
|
|
|
|
|
|
|
|
|
2012-06-04 18:32:38 -03:00
|
|
|
def open(filename, mode="rb", compresslevel=9,
|
|
|
|
encoding=None, errors=None, newline=None):
|
|
|
|
"""Open a bzip2-compressed file in binary or text mode.
|
|
|
|
|
2012-10-08 15:31:34 -03:00
|
|
|
The filename argument can be an actual filename (a str or bytes
|
|
|
|
object), or an existing file object to read from or write to.
|
2012-06-04 18:32:38 -03:00
|
|
|
|
2012-10-08 15:31:34 -03:00
|
|
|
The mode argument can be "r", "rb", "w", "wb", "a" or "ab" for
|
|
|
|
binary mode, or "rt", "wt" or "at" for text mode. The default mode
|
|
|
|
is "rb", and the default compresslevel is 9.
|
2012-06-04 18:32:38 -03:00
|
|
|
|
2012-10-08 15:31:34 -03:00
|
|
|
For binary mode, this function is equivalent to the BZ2File
|
|
|
|
constructor: BZ2File(filename, mode, compresslevel). In this case,
|
|
|
|
the encoding, errors and newline arguments must not be provided.
|
2012-06-04 18:32:38 -03:00
|
|
|
|
|
|
|
For text mode, a BZ2File object is created, and wrapped in an
|
2012-10-08 15:31:34 -03:00
|
|
|
io.TextIOWrapper instance with the specified encoding, error
|
|
|
|
handling behavior, and line ending(s).
|
2012-06-04 18:32:38 -03:00
|
|
|
|
|
|
|
"""
|
|
|
|
if "t" in mode:
|
|
|
|
if "b" in mode:
|
|
|
|
raise ValueError("Invalid mode: %r" % (mode,))
|
|
|
|
else:
|
|
|
|
if encoding is not None:
|
|
|
|
raise ValueError("Argument 'encoding' not supported in binary mode")
|
|
|
|
if errors is not None:
|
|
|
|
raise ValueError("Argument 'errors' not supported in binary mode")
|
|
|
|
if newline is not None:
|
|
|
|
raise ValueError("Argument 'newline' not supported in binary mode")
|
|
|
|
|
|
|
|
bz_mode = mode.replace("t", "")
|
|
|
|
binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel)
|
|
|
|
|
|
|
|
if "t" in mode:
|
|
|
|
return io.TextIOWrapper(binary_file, encoding, errors, newline)
|
|
|
|
else:
|
|
|
|
return binary_file
|
|
|
|
|
|
|
|
|
2011-04-03 12:05:46 -03:00
|
|
|
def compress(data, compresslevel=9):
|
|
|
|
"""Compress a block of data.
|
|
|
|
|
|
|
|
compresslevel, if given, must be a number between 1 and 9.
|
|
|
|
|
|
|
|
For incremental compression, use a BZ2Compressor object instead.
|
|
|
|
"""
|
|
|
|
comp = BZ2Compressor(compresslevel)
|
|
|
|
return comp.compress(data) + comp.flush()
|
|
|
|
|
|
|
|
|
|
|
|
def decompress(data):
|
|
|
|
"""Decompress a block of data.
|
|
|
|
|
|
|
|
For incremental decompression, use a BZ2Decompressor object instead.
|
|
|
|
"""
|
|
|
|
if len(data) == 0:
|
|
|
|
return b""
|
2011-05-26 20:52:15 -03:00
|
|
|
|
2011-05-29 20:12:24 -03:00
|
|
|
results = []
|
2011-05-26 20:52:15 -03:00
|
|
|
while True:
|
|
|
|
decomp = BZ2Decompressor()
|
2011-05-29 20:12:24 -03:00
|
|
|
results.append(decomp.decompress(data))
|
2011-05-26 20:52:15 -03:00
|
|
|
if not decomp.eof:
|
|
|
|
raise ValueError("Compressed data ended before the "
|
|
|
|
"end-of-stream marker was reached")
|
|
|
|
if not decomp.unused_data:
|
2011-05-29 20:12:24 -03:00
|
|
|
return b"".join(results)
|
2011-05-26 20:52:15 -03:00
|
|
|
# There is unused data left over. Proceed to next stream.
|
|
|
|
data = decomp.unused_data
|