mirror of https://github.com/python/cpython
Replace KB unit with KiB (#4293)
kB (*kilo* byte) unit means 1000 bytes, whereas KiB ("kibibyte") means 1024 bytes. KB was misused: replace kB or KB with KiB when appropriate. Same change for MB and GB which become MiB and GiB. Change the output of Tools/iobench/iobench.py. Round also the size of the documentation from 5.5 MB to 5 MiB.
This commit is contained in:
parent
0e163d2ced
commit
8c663fd60e
|
@ -450,7 +450,7 @@ The pymalloc allocator
|
|||
|
||||
Python has a *pymalloc* allocator optimized for small objects (smaller or equal
|
||||
to 512 bytes) with a short lifetime. It uses memory mappings called "arenas"
|
||||
with a fixed size of 256 KB. It falls back to :c:func:`PyMem_RawMalloc` and
|
||||
with a fixed size of 256 KiB. It falls back to :c:func:`PyMem_RawMalloc` and
|
||||
:c:func:`PyMem_RawRealloc` for allocations larger than 512 bytes.
|
||||
|
||||
*pymalloc* is the default allocator of the :c:data:`PYMEM_DOMAIN_MEM` (ex:
|
||||
|
|
|
@ -267,7 +267,7 @@ include a `salt <https://en.wikipedia.org/wiki/Salt_%28cryptography%29>`_.
|
|||
should be about 16 or more bytes from a proper source, e.g. :func:`os.urandom`.
|
||||
|
||||
*n* is the CPU/Memory cost factor, *r* the block size, *p* parallelization
|
||||
factor and *maxmem* limits memory (OpenSSL 1.1.0 defaults to 32 MB).
|
||||
factor and *maxmem* limits memory (OpenSSL 1.1.0 defaults to 32 MiB).
|
||||
*dklen* is the length of the derived key.
|
||||
|
||||
Availability: OpenSSL 1.1+
|
||||
|
|
|
@ -373,7 +373,7 @@ The :mod:`locale` module defines the following exception and functions:
|
|||
|
||||
Please note that this function works like :meth:`format_string` but will
|
||||
only work for exactly one ``%char`` specifier. For example, ``'%f'`` and
|
||||
``'%.0f'`` are both valid specifiers, but ``'%f kB'`` is not.
|
||||
``'%.0f'`` are both valid specifiers, but ``'%f KiB'`` is not.
|
||||
|
||||
For whole format strings, use :func:`format_string`.
|
||||
|
||||
|
|
|
@ -1034,7 +1034,7 @@ Connection objects are usually created using :func:`Pipe` -- see also
|
|||
Send an object to the other end of the connection which should be read
|
||||
using :meth:`recv`.
|
||||
|
||||
The object must be picklable. Very large pickles (approximately 32 MB+,
|
||||
The object must be picklable. Very large pickles (approximately 32 MiB+,
|
||||
though it depends on the OS) may raise a :exc:`ValueError` exception.
|
||||
|
||||
.. method:: recv()
|
||||
|
@ -1071,7 +1071,7 @@ Connection objects are usually created using :func:`Pipe` -- see also
|
|||
|
||||
If *offset* is given then data is read from that position in *buffer*. If
|
||||
*size* is given then that many bytes will be read from buffer. Very large
|
||||
buffers (approximately 32 MB+, though it depends on the OS) may raise a
|
||||
buffers (approximately 32 MiB+, though it depends on the OS) may raise a
|
||||
:exc:`ValueError` exception
|
||||
|
||||
.. method:: recv_bytes([maxlength])
|
||||
|
|
|
@ -18,23 +18,23 @@ in the table are the size of the download files in megabytes.</p>
|
|||
<table class="docutils">
|
||||
<tr><th>Format</th><th>Packed as .zip</th><th>Packed as .tar.bz2</th></tr>
|
||||
<tr><td>PDF (US-Letter paper size)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-pdf-letter.zip">Download</a> (ca. 13 MB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-pdf-letter.tar.bz2">Download</a> (ca. 13 MB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-pdf-letter.zip">Download</a> (ca. 13 MiB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-pdf-letter.tar.bz2">Download</a> (ca. 13 MiB)</td>
|
||||
</tr>
|
||||
<tr><td>PDF (A4 paper size)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-pdf-a4.zip">Download</a> (ca. 13 MB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-pdf-a4.tar.bz2">Download</a> (ca. 13 MB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-pdf-a4.zip">Download</a> (ca. 13 MiB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-pdf-a4.tar.bz2">Download</a> (ca. 13 MiB)</td>
|
||||
</tr>
|
||||
<tr><td>HTML</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-html.zip">Download</a> (ca. 9 MB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-html.tar.bz2">Download</a> (ca. 6 MB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-html.zip">Download</a> (ca. 9 MiB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-html.tar.bz2">Download</a> (ca. 6 MiB)</td>
|
||||
</tr>
|
||||
<tr><td>Plain Text</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-text.zip">Download</a> (ca. 3 MB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-text.tar.bz2">Download</a> (ca. 2 MB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-text.zip">Download</a> (ca. 3 MiB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs-text.tar.bz2">Download</a> (ca. 2 MiB)</td>
|
||||
</tr>
|
||||
<tr><td>EPUB</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs.epub">Download</a> (ca. 5.5 MB)</td>
|
||||
<td><a href="{{ dlbase }}/python-{{ release }}-docs.epub">Download</a> (ca. 5 MiB)</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
|
|
@ -160,7 +160,7 @@
|
|||
*/
|
||||
#ifdef WITH_MEMORY_LIMITS
|
||||
#ifndef SMALL_MEMORY_LIMIT
|
||||
#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
|
||||
#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MiB -- more? */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -177,7 +177,7 @@
|
|||
* Arenas are allocated with mmap() on systems supporting anonymous memory
|
||||
* mappings to reduce heap fragmentation.
|
||||
*/
|
||||
#define ARENA_SIZE (256 << 10) /* 256KB */
|
||||
#define ARENA_SIZE (256 << 10) /* 256 KiB */
|
||||
|
||||
#ifdef WITH_MEMORY_LIMITS
|
||||
#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
|
||||
|
@ -435,7 +435,7 @@ currently in use isn't on either list.
|
|||
*/
|
||||
|
||||
/* How many arena_objects do we initially allocate?
|
||||
* 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
|
||||
* 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4 MiB before growing the
|
||||
* `arenas` vector.
|
||||
*/
|
||||
#define INITIAL_ARENA_OBJECTS 16
|
||||
|
|
|
@ -234,8 +234,8 @@ class CygwinCCompiler(UnixCCompiler):
|
|||
# who wants symbols and a many times larger output file
|
||||
# should explicitly switch the debug mode on
|
||||
# otherwise we let dllwrap/ld strip the output file
|
||||
# (On my machine: 10KB < stripped_file < ??100KB
|
||||
# unstripped_file = stripped_file + XXX KB
|
||||
# (On my machine: 10KiB < stripped_file < ??100KiB
|
||||
# unstripped_file = stripped_file + XXX KiB
|
||||
# ( XXX=254 for a typical python extension))
|
||||
if not debug:
|
||||
extra_preargs.append("-s")
|
||||
|
|
|
@ -308,7 +308,7 @@ class GzipFile(_compression.BaseStream):
|
|||
if self.mode == WRITE:
|
||||
fileobj.write(self.compress.flush())
|
||||
write32u(fileobj, self.crc)
|
||||
# self.size may exceed 2GB, or even 4GB
|
||||
# self.size may exceed 2 GiB, or even 4 GiB
|
||||
write32u(fileobj, self.size & 0xffffffff)
|
||||
elif self.mode == READ:
|
||||
self._buffer.close()
|
||||
|
|
|
@ -4221,7 +4221,7 @@ class TestIgnoreEINTR(unittest.TestCase):
|
|||
conn.send('ready')
|
||||
x = conn.recv()
|
||||
conn.send(x)
|
||||
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
|
||||
conn.send_bytes(b'x' * (1024 * 1024)) # sending 1 MiB should block
|
||||
|
||||
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
|
||||
def test_ignore(self):
|
||||
|
|
|
@ -96,7 +96,7 @@ resources to test. Currently only the following are defined:
|
|||
|
||||
largefile - It is okay to run some test that may create huge
|
||||
files. These tests can take a long time and may
|
||||
consume >2GB of disk space temporarily.
|
||||
consume >2 GiB of disk space temporarily.
|
||||
|
||||
network - It is okay to run tests that use external network
|
||||
resource, e.g. testing SSL support for sockets.
|
||||
|
|
|
@ -2276,7 +2276,7 @@ class AbstractPickleTests(unittest.TestCase):
|
|||
|
||||
class BigmemPickleTests(unittest.TestCase):
|
||||
|
||||
# Binary protocols can serialize longs of up to 2GB-1
|
||||
# Binary protocols can serialize longs of up to 2 GiB-1
|
||||
|
||||
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
|
||||
def test_huge_long_32b(self, size):
|
||||
|
@ -2291,7 +2291,7 @@ class BigmemPickleTests(unittest.TestCase):
|
|||
finally:
|
||||
data = None
|
||||
|
||||
# Protocol 3 can serialize up to 4GB-1 as a bytes object
|
||||
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
|
||||
# (older protocols don't have a dedicated opcode for bytes and are
|
||||
# too inefficient)
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ These tests are meant to exercise that requests to create objects bigger
|
|||
than what the address space allows are properly met with an OverflowError
|
||||
(rather than crash weirdly).
|
||||
|
||||
Primarily, this means 32-bit builds with at least 2 GB of available memory.
|
||||
Primarily, this means 32-bit builds with at least 2 GiB of available memory.
|
||||
You need to pass the -M option to regrtest (e.g. "-M 2.1G") for tests to
|
||||
be enabled.
|
||||
"""
|
||||
|
|
|
@ -62,7 +62,7 @@ class BaseTest(unittest.TestCase):
|
|||
BAD_DATA = b'this is not a valid bzip2 file'
|
||||
|
||||
# Some tests need more than one block of uncompressed data. Since one block
|
||||
# is at least 100 kB, we gather some data dynamically and compress it.
|
||||
# is at least 100,000 bytes, we gather some data dynamically and compress it.
|
||||
# Note that this assumes that compression works correctly, so we cannot
|
||||
# simply use the bigger test data for all tests.
|
||||
test_size = 0
|
||||
|
|
|
@ -564,7 +564,7 @@ class IOTest(unittest.TestCase):
|
|||
|
||||
def test_large_file_ops(self):
|
||||
# On Windows and Mac OSX this test comsumes large resources; It takes
|
||||
# a long time to build the >2GB file and takes >2GB of disk space
|
||||
# a long time to build the >2 GiB file and takes >2 GiB of disk space
|
||||
# therefore the resource must be enabled to run this test.
|
||||
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
|
||||
support.requires(
|
||||
|
@ -736,7 +736,7 @@ class IOTest(unittest.TestCase):
|
|||
if sys.maxsize > 0x7FFFFFFF:
|
||||
self.skipTest("test can only run in a 32-bit address space")
|
||||
if support.real_max_memuse < support._2G:
|
||||
self.skipTest("test requires at least 2GB of memory")
|
||||
self.skipTest("test requires at least 2 GiB of memory")
|
||||
with self.open(zero, "rb", buffering=0) as f:
|
||||
self.assertRaises(OverflowError, f.read)
|
||||
with self.open(zero, "rb") as f:
|
||||
|
@ -1421,7 +1421,7 @@ class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
|
|||
def test_constructor(self):
|
||||
BufferedReaderTest.test_constructor(self)
|
||||
# The allocation can succeed on 32-bit builds, e.g. with more
|
||||
# than 2GB RAM and a 64-bit kernel.
|
||||
# than 2 GiB RAM and a 64-bit kernel.
|
||||
if sys.maxsize > 0x7FFFFFFF:
|
||||
rawio = self.MockRawIO()
|
||||
bufio = self.tp(rawio)
|
||||
|
@ -1733,7 +1733,7 @@ class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
|
|||
def test_constructor(self):
|
||||
BufferedWriterTest.test_constructor(self)
|
||||
# The allocation can succeed on 32-bit builds, e.g. with more
|
||||
# than 2GB RAM and a 64-bit kernel.
|
||||
# than 2 GiB RAM and a 64-bit kernel.
|
||||
if sys.maxsize > 0x7FFFFFFF:
|
||||
rawio = self.MockRawIO()
|
||||
bufio = self.tp(rawio)
|
||||
|
@ -2206,7 +2206,7 @@ class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
|
|||
def test_constructor(self):
|
||||
BufferedRandomTest.test_constructor(self)
|
||||
# The allocation can succeed on 32-bit builds, e.g. with more
|
||||
# than 2GB RAM and a 64-bit kernel.
|
||||
# than 2 GiB RAM and a 64-bit kernel.
|
||||
if sys.maxsize > 0x7FFFFFFF:
|
||||
rawio = self.MockRawIO()
|
||||
bufio = self.tp(rawio)
|
||||
|
|
|
@ -9,12 +9,12 @@ from test.support import TESTFN, requires, unlink
|
|||
import io # C implementation of io
|
||||
import _pyio as pyio # Python implementation of io
|
||||
|
||||
# size of file to create (>2GB; 2GB == 2147483648 bytes)
|
||||
# size of file to create (>2 GiB; 2 GiB == 2,147,483,648 bytes)
|
||||
size = 2500000000
|
||||
|
||||
class LargeFileTest:
|
||||
"""Test that each file function works as expected for large
|
||||
(i.e. > 2GB) files.
|
||||
(i.e. > 2 GiB) files.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
|
@ -142,7 +142,7 @@ def setUpModule():
|
|||
pass
|
||||
|
||||
# On Windows and Mac OSX this test comsumes large resources; It
|
||||
# takes a long time to build the >2GB file and takes >2GB of disk
|
||||
# takes a long time to build the >2 GiB file and takes >2 GiB of disk
|
||||
# space therefore the resource must be enabled to run this test.
|
||||
# If not, nothing after this line stanza will be executed.
|
||||
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
|
||||
|
|
|
@ -777,7 +777,7 @@ class LargeMmapTests(unittest.TestCase):
|
|||
with mmap.mmap(f.fileno(), 0x10000, access=mmap.ACCESS_READ) as m:
|
||||
self.assertEqual(m.size(), 0x180000000)
|
||||
|
||||
# Issue 11277: mmap() with large (~4GB) sparse files crashes on OS X.
|
||||
# Issue 11277: mmap() with large (~4 GiB) sparse files crashes on OS X.
|
||||
|
||||
def _test_around_boundary(self, boundary):
|
||||
tail = b' DEARdear '
|
||||
|
|
|
@ -171,7 +171,7 @@ class FileTests(unittest.TestCase):
|
|||
with open(support.TESTFN, "rb") as fp:
|
||||
data = os.read(fp.fileno(), size)
|
||||
|
||||
# The test does not try to read more than 2 GB at once because the
|
||||
# The test does not try to read more than 2 GiB at once because the
|
||||
# operating system is free to return less bytes than requested.
|
||||
self.assertEqual(data, b'test')
|
||||
|
||||
|
@ -2573,7 +2573,7 @@ class SendfileTestServer(asyncore.dispatcher, threading.Thread):
|
|||
@unittest.skipUnless(hasattr(os, 'sendfile'), "test needs os.sendfile()")
|
||||
class TestSendfile(unittest.TestCase):
|
||||
|
||||
DATA = b"12345abcde" * 16 * 1024 # 160 KB
|
||||
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
|
||||
SUPPORT_HEADERS_TRAILERS = not sys.platform.startswith("linux") and \
|
||||
not sys.platform.startswith("solaris") and \
|
||||
not sys.platform.startswith("sunos")
|
||||
|
|
|
@ -5299,7 +5299,7 @@ class SendfileUsingSendTest(ThreadedTCPSocketTest):
|
|||
Test the send() implementation of socket.sendfile().
|
||||
"""
|
||||
|
||||
FILESIZE = (10 * 1024 * 1024) # 10MB
|
||||
FILESIZE = (10 * 1024 * 1024) # 10 MiB
|
||||
BUFSIZE = 8192
|
||||
FILEDATA = b""
|
||||
TIMEOUT = 2
|
||||
|
|
|
@ -779,12 +779,12 @@ class Bz2DetectReadTest(Bz2Test, DetectReadTest):
|
|||
def test_detect_stream_bz2(self):
|
||||
# Originally, tarfile's stream detection looked for the string
|
||||
# "BZh91" at the start of the file. This is incorrect because
|
||||
# the '9' represents the blocksize (900kB). If the file was
|
||||
# the '9' represents the blocksize (900,000 bytes). If the file was
|
||||
# compressed using another blocksize autodetection fails.
|
||||
with open(tarname, "rb") as fobj:
|
||||
data = fobj.read()
|
||||
|
||||
# Compress with blocksize 100kB, the file starts with "BZh11".
|
||||
# Compress with blocksize 100,000 bytes, the file starts with "BZh11".
|
||||
with bz2.BZ2File(tmpname, "wb", compresslevel=1) as fobj:
|
||||
fobj.write(data)
|
||||
|
||||
|
|
|
@ -132,10 +132,10 @@ class ThreadTests(BaseTestCase):
|
|||
# Kill the "immortal" _DummyThread
|
||||
del threading._active[ident[0]]
|
||||
|
||||
# run with a small(ish) thread stack size (256kB)
|
||||
# run with a small(ish) thread stack size (256 KiB)
|
||||
def test_various_ops_small_stack(self):
|
||||
if verbose:
|
||||
print('with 256kB thread stack size...')
|
||||
print('with 256 KiB thread stack size...')
|
||||
try:
|
||||
threading.stack_size(262144)
|
||||
except _thread.error:
|
||||
|
@ -144,10 +144,10 @@ class ThreadTests(BaseTestCase):
|
|||
self.test_various_ops()
|
||||
threading.stack_size(0)
|
||||
|
||||
# run with a large thread stack size (1MB)
|
||||
# run with a large thread stack size (1 MiB)
|
||||
def test_various_ops_large_stack(self):
|
||||
if verbose:
|
||||
print('with 1MB thread stack size...')
|
||||
print('with 1 MiB thread stack size...')
|
||||
try:
|
||||
threading.stack_size(0x100000)
|
||||
except _thread.error:
|
||||
|
|
|
@ -39,7 +39,7 @@ class TestsWithSourceFile(unittest.TestCase):
|
|||
# Create the ZIP archive.
|
||||
zipfp = zipfile.ZipFile(f, "w", compression)
|
||||
|
||||
# It will contain enough copies of self.data to reach about 6GB of
|
||||
# It will contain enough copies of self.data to reach about 6 GiB of
|
||||
# raw data to store.
|
||||
filecount = 6*1024**3 // len(self.data)
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ class ChecksumTestCase(unittest.TestCase):
|
|||
self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
|
||||
|
||||
|
||||
# Issue #10276 - check that inputs >=4GB are handled correctly.
|
||||
# Issue #10276 - check that inputs >=4 GiB are handled correctly.
|
||||
class ChecksumBigBufferTestCase(unittest.TestCase):
|
||||
|
||||
@bigmemtest(size=_4G + 4, memuse=1, dry_run=False)
|
||||
|
@ -130,7 +130,7 @@ class ExceptionTestCase(unittest.TestCase):
|
|||
class BaseCompressTestCase(object):
|
||||
def check_big_compress_buffer(self, size, compress_func):
|
||||
_1M = 1024 * 1024
|
||||
# Generate 10MB worth of random, and expand it by repeating it.
|
||||
# Generate 10 MiB worth of random, and expand it by repeating it.
|
||||
# The assumption is that zlib's memory is not big enough to exploit
|
||||
# such spread out redundancy.
|
||||
data = b''.join([random.getrandbits(8 * _1M).to_bytes(_1M, 'little')
|
||||
|
|
|
@ -1046,7 +1046,7 @@ def gzip_encode(data):
|
|||
# in the HTTP header, as described in RFC 1952
|
||||
#
|
||||
# @param data The encoded data
|
||||
# @keyparam max_decode Maximum bytes to decode (20MB default), use negative
|
||||
# @keyparam max_decode Maximum bytes to decode (20 MiB default), use negative
|
||||
# values for unlimited decoding
|
||||
# @return the unencoded data
|
||||
# @raises ValueError if data is not correctly coded.
|
||||
|
|
|
@ -3035,7 +3035,7 @@ by Phil Elson.
|
|||
.. section: Library
|
||||
|
||||
os.read() now uses a :c:func:`Py_ssize_t` type instead of :c:type:`int` for
|
||||
the size to support reading more than 2 GB at once. On Windows, the size is
|
||||
the size to support reading more than 2 GiB at once. On Windows, the size is
|
||||
truncted to INT_MAX. As any call to os.read(), the OS may read less bytes
|
||||
than the number of requested bytes.
|
||||
|
||||
|
@ -3144,7 +3144,7 @@ by Pablo Torres Navarrete and SilentGhost.
|
|||
.. nonce: u_oiv9
|
||||
.. section: Library
|
||||
|
||||
ssl.RAND_add() now supports strings longer than 2 GB.
|
||||
ssl.RAND_add() now supports strings longer than 2 GiB.
|
||||
|
||||
..
|
||||
|
||||
|
|
|
@ -51,24 +51,24 @@ Note: this section may not apply when compiling Python as a 64 bit
|
|||
application.
|
||||
|
||||
By default on AIX each program gets one segment register for its data
|
||||
segment. As each segment register covers 256 MB, a Python program that
|
||||
would use more than 256MB will raise a MemoryError. The standard
|
||||
segment. As each segment register covers 256 MiB, a Python program that
|
||||
would use more than 256 MiB will raise a MemoryError. The standard
|
||||
Python test suite is one such application.
|
||||
|
||||
To allocate more segment registers to Python, you must use the linker
|
||||
option -bmaxdata or the ldedit tool to specify the number of bytes you
|
||||
need in the data segment.
|
||||
|
||||
For example, if you want to allow 512MB of memory for Python (this is
|
||||
For example, if you want to allow 512 MiB of memory for Python (this is
|
||||
enough for the test suite to run without MemoryErrors), you should run
|
||||
the following command at the end of compilation:
|
||||
|
||||
ldedit -b maxdata:0x20000000 ./python
|
||||
|
||||
You can allow up to 2GB of memory for Python by using the value
|
||||
You can allow up to 2 GiB of memory for Python by using the value
|
||||
0x80000000 for maxdata.
|
||||
|
||||
It is also possible to go beyond 2GB of memory by activating Large
|
||||
It is also possible to go beyond 2 GiB of memory by activating Large
|
||||
Page Use. You should consult the IBM documentation if you need to use
|
||||
this option. You can also follow the discussion of this problem
|
||||
in issue 11212 at bugs.python.org.
|
||||
|
|
|
@ -801,7 +801,7 @@ _hashlib_scrypt_impl(PyObject *module, Py_buffer *password, Py_buffer *salt,
|
|||
}
|
||||
|
||||
if (maxmem < 0 || maxmem > INT_MAX) {
|
||||
/* OpenSSL 1.1.0 restricts maxmem to 32MB. It may change in the
|
||||
/* OpenSSL 1.1.0 restricts maxmem to 32 MiB. It may change in the
|
||||
future. The maxmem constant is private to OpenSSL. */
|
||||
PyErr_Format(PyExc_ValueError,
|
||||
"maxmem must be positive and smaller than %d",
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#if BUFSIZ < (8*1024)
|
||||
#define SMALLCHUNK (8*1024)
|
||||
#elif (BUFSIZ >= (2 << 25))
|
||||
#error "unreasonable BUFSIZ > 64MB defined"
|
||||
#error "unreasonable BUFSIZ > 64 MiB defined"
|
||||
#else
|
||||
#define SMALLCHUNK BUFSIZ
|
||||
#endif
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#if BUFSIZ < (16*1024)
|
||||
#define SMALLCHUNK (2*1024)
|
||||
#elif (BUFSIZ >= (2 << 25))
|
||||
#error "unreasonable BUFSIZ > 64MB defined"
|
||||
#error "unreasonable BUFSIZ > 64 MiB defined"
|
||||
#else
|
||||
#define SMALLCHUNK BUFSIZ
|
||||
#endif
|
||||
|
|
|
@ -1276,10 +1276,10 @@ exception is raised, and the stack size is unmodified. 32k bytes\n\
|
|||
sufficient stack space for the interpreter itself.\n\
|
||||
\n\
|
||||
Note that some platforms may have particular restrictions on values for\n\
|
||||
the stack size, such as requiring a minimum stack size larger than 32kB or\n\
|
||||
the stack size, such as requiring a minimum stack size larger than 32 KiB or\n\
|
||||
requiring allocation in multiples of the system memory page size\n\
|
||||
- platform documentation should be referred to for more information\n\
|
||||
(4kB pages are common; using multiples of 4096 for the stack size is\n\
|
||||
(4 KiB pages are common; using multiples of 4096 for the stack size is\n\
|
||||
the suggested approach in the absence of more specific information).");
|
||||
|
||||
static PyMethodDef thread_methods[] = {
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
# include <sys/resource.h>
|
||||
#endif
|
||||
|
||||
/* Allocate at maximum 100 MB of the stack to raise the stack overflow */
|
||||
#define STACK_OVERFLOW_MAX_SIZE (100*1024*1024)
|
||||
/* Allocate at maximum 100 MiB of the stack to raise the stack overflow */
|
||||
#define STACK_OVERFLOW_MAX_SIZE (100 * 1024 * 1024)
|
||||
|
||||
#define FAULTHANDLER_LATER
|
||||
|
||||
|
|
|
@ -243,7 +243,7 @@ _Py_hashtable_print_stats(_Py_hashtable_t *ht)
|
|||
ht, ht->entries, ht->num_buckets, load * 100.0);
|
||||
if (nchains)
|
||||
printf("avg_chain_len=%.1f, ", (double)total_chain_len / nchains);
|
||||
printf("max_chain_len=%" PY_FORMAT_SIZE_T "u, %" PY_FORMAT_SIZE_T "u kB\n",
|
||||
printf("max_chain_len=%" PY_FORMAT_SIZE_T "u, %" PY_FORMAT_SIZE_T "u KiB\n",
|
||||
max_chain_len, size / 1024);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -64,7 +64,7 @@ _PyAccu_Accumulate(_PyAccu *acc, PyObject *unicode)
|
|||
* builds) of:
|
||||
* - 8 bytes for the list slot
|
||||
* - 56 bytes for the header of the unicode object
|
||||
* that is, 64 bytes. 100000 such objects waste more than 6MB
|
||||
* that is, 64 bytes. 100000 such objects waste more than 6 MiB
|
||||
* compared to a single concatenated string.
|
||||
*/
|
||||
if (nsmall < 100000)
|
||||
|
|
|
@ -206,7 +206,7 @@ PyType_ClearCache(void)
|
|||
method_cache_misses, (int) (100.0 * method_cache_misses / total));
|
||||
fprintf(stderr, "-- Method cache collisions = %zd (%d%%)\n",
|
||||
method_cache_collisions, (int) (100.0 * method_cache_collisions / total));
|
||||
fprintf(stderr, "-- Method cache size = %zd KB\n",
|
||||
fprintf(stderr, "-- Method cache size = %zd KiB\n",
|
||||
sizeof(method_cache) / 1024);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -599,8 +599,8 @@ _Py_attribute_data_to_stat(BY_HANDLE_FILE_INFORMATION *info, ULONG reparse_tag,
|
|||
On POSIX, use fstat().
|
||||
|
||||
On Windows, use GetFileType() and GetFileInformationByHandle() which support
|
||||
files larger than 2 GB. fstat() may fail with EOVERFLOW on files larger
|
||||
than 2 GB because the file size type is a signed 32-bit integer: see issue
|
||||
files larger than 2 GiB. fstat() may fail with EOVERFLOW on files larger
|
||||
than 2 GiB because the file size type is a signed 32-bit integer: see issue
|
||||
#23152.
|
||||
|
||||
On Windows, set the last Windows error and return nonzero on error. On
|
||||
|
@ -665,8 +665,8 @@ _Py_fstat_noraise(int fd, struct _Py_stat_struct *status)
|
|||
On POSIX, use fstat().
|
||||
|
||||
On Windows, use GetFileType() and GetFileInformationByHandle() which support
|
||||
files larger than 2 GB. fstat() may fail with EOVERFLOW on files larger
|
||||
than 2 GB because the file size type is a signed 32-bit integer: see issue
|
||||
files larger than 2 GiB. fstat() may fail with EOVERFLOW on files larger
|
||||
than 2 GiB because the file size type is a signed 32-bit integer: see issue
|
||||
#23152.
|
||||
|
||||
Raise an exception and return -1 on error. On Windows, set the last Windows
|
||||
|
|
|
@ -323,8 +323,8 @@ PyThread_release_lock(PyThread_type_lock aLock)
|
|||
}
|
||||
|
||||
/* minimum/maximum thread stack sizes supported */
|
||||
#define THREAD_MIN_STACKSIZE 0x8000 /* 32kB */
|
||||
#define THREAD_MAX_STACKSIZE 0x10000000 /* 256MB */
|
||||
#define THREAD_MIN_STACKSIZE 0x8000 /* 32 KiB */
|
||||
#define THREAD_MAX_STACKSIZE 0x10000000 /* 256 MiB */
|
||||
|
||||
/* set the thread stack size.
|
||||
* Return 0 if size is valid, -1 otherwise.
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#define THREAD_STACK_SIZE 0x400000
|
||||
#endif
|
||||
/* for safety, ensure a viable minimum stacksize */
|
||||
#define THREAD_STACK_MIN 0x8000 /* 32kB */
|
||||
#define THREAD_STACK_MIN 0x8000 /* 32 KiB */
|
||||
#else /* !_POSIX_THREAD_ATTR_STACKSIZE */
|
||||
#ifdef THREAD_STACK_SIZE
|
||||
#error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
|
||||
|
|
|
@ -29,9 +29,9 @@ def text_open(fn, mode, encoding=None):
|
|||
return open(fn, mode)
|
||||
|
||||
def get_file_sizes():
|
||||
for s in ['20 KB', '400 KB', '10 MB']:
|
||||
for s in ['20 KiB', '400 KiB', '10 MiB']:
|
||||
size, unit = s.split()
|
||||
size = int(size) * {'KB': 1024, 'MB': 1024 ** 2}[unit]
|
||||
size = int(size) * {'KiB': 1024, 'MiB': 1024 ** 2}[unit]
|
||||
yield s.replace(' ', ''), size
|
||||
|
||||
def get_binary_files():
|
||||
|
@ -273,7 +273,7 @@ def run_all_tests(options):
|
|||
|
||||
def print_results(size, n, real, cpu):
|
||||
bw = n * float(size) / 1024 ** 2 / real
|
||||
bw = ("%4d MB/s" if bw > 100 else "%.3g MB/s") % bw
|
||||
bw = ("%4d MiB/s" if bw > 100 else "%.3g MiB/s") % bw
|
||||
out.write(bw.rjust(12) + "\n")
|
||||
if cpu < 0.90 * real:
|
||||
out.write(" warning: test above used only %d%% CPU, "
|
||||
|
|
2
setup.py
2
setup.py
|
@ -907,7 +907,7 @@ class PyBuildExt(build_ext):
|
|||
missing.append('_hashlib')
|
||||
|
||||
# We always compile these even when OpenSSL is available (issue #14693).
|
||||
# It's harmless and the object code is tiny (40-50 KB per module,
|
||||
# It's harmless and the object code is tiny (40-50 KiB per module,
|
||||
# only loaded when actually used).
|
||||
exts.append( Extension('_sha256', ['sha256module.c'],
|
||||
depends=['hashlib.h']) )
|
||||
|
|
Loading…
Reference in New Issue