cpython/Include/pymem.h

127 lines
4.0 KiB
C
Raw Normal View History

/* Lowest-level memory allocation interface */
#ifndef Py_PYMEM_H
#define Py_PYMEM_H
#include "pyport.h"
#ifdef __cplusplus
extern "C" {
#endif
/* BEWARE:
Each interface exports both functions and macros. Extension modules
should normally use the functions for ensuring binary compatibility
of the user's code across Python versions. Subsequently, if the
Python runtime switches to its own malloc (different from standard
malloc), no recompilation is required for the extensions.
The macro versions trade compatibility for speed. They can be used
whenever there is a performance problem, but their use implies
recompilation of the code for each new Python release. The Python
core uses the macros because it *is* compiled on every upgrade.
This might not be the case with 3rd party extensions in a custom
setup (for example, a customer does not always have access to the
source of 3rd party deliverables). You have been warned! */
/*
* Raw memory interface
* ====================
*/
2002-03-18 17:06:21 -04:00
/* To make sure the interpreter is user-malloc friendly, all memory
APIs are implemented on top of this one. */
/* Functions */
2002-03-18 17:06:21 -04:00
/* Function wrappers around PyMem_MALLOC and friends; useful if you
need to be sure that you are using the same memory allocator as
Python. Note that the wrappers make sure that allocating 0 bytes
returns a non-NULL pointer, even if the underlying malloc
doesn't. Returned pointers must be checked for NULL explicitly.
No action is performed on failure. */
extern DL_IMPORT(void *) PyMem_Malloc(size_t);
extern DL_IMPORT(void *) PyMem_Realloc(void *, size_t);
extern DL_IMPORT(void) PyMem_Free(void *);
/* Starting from Python 1.6, the wrappers Py_{Malloc,Realloc,Free} are
no longer supported. They used to call PyErr_NoMemory() on failure. */
2002-03-18 17:06:21 -04:00
/* Macros (override these if you want to a different malloc */
#ifndef PyMem_MALLOC
#define PyMem_MALLOC(n) malloc(n)
#define PyMem_REALLOC(p, n) realloc((void *)(p), (n))
#define PyMem_FREE(p) free((void *)(p))
#endif
/*
* Type-oriented memory interface
* ==============================
*/
/* Functions */
#define PyMem_New(type, n) \
( (type *) PyMem_Malloc((n) * sizeof(type)) )
#define PyMem_Resize(p, type, n) \
( (p) = (type *) PyMem_Realloc((p), (n) * sizeof(type)) )
#define PyMem_Del(p) PyMem_Free(p)
/* Macros */
#define PyMem_NEW(type, n) \
( (type *) PyMem_MALLOC(_PyMem_EXTRA + (n) * sizeof(type)) )
/* See comment near MALLOC_ZERO_RETURNS_NULL in pyport.h. */
#define PyMem_RESIZE(p, type, n) \
do { \
size_t _sum = (n) * sizeof(type); \
if (!_sum) \
_sum = 1; \
(p) = (type *)((p) ? \
PyMem_REALLOC(p, _sum) : \
PyMem_MALLOC(_sum)); \
} while (0)
#define PyMem_DEL(p) PyMem_FREE(p)
/* PyMem_XDEL is deprecated. To avoid the call when p is NULL,
it is recommended to write the test explicitly in the code.
Note that according to ANSI C, free(NULL) has no effect. */
Give Python a debug-mode pymalloc, much as sketched on Python-Dev. When WITH_PYMALLOC is defined, define PYMALLOC_DEBUG to enable the debug allocator. This can be done independent of build type (release or debug). A debug build automatically defines PYMALLOC_DEBUG when pymalloc is enabled. It's a detected error to define PYMALLOC_DEBUG when pymalloc isn't enabled. Two debugging entry points defined only under PYMALLOC_DEBUG: + _PyMalloc_DebugCheckAddress(const void *p) can be used (e.g., from gdb) to sanity-check a memory block obtained from pymalloc. It sprays info to stderr (see next) and dies via Py_FatalError if the block is detectably damaged. + _PyMalloc_DebugDumpAddress(const void *p) can be used to spray info about a debug memory block to stderr. A tiny start at implementing "API family" checks isn't good for anything yet. _PyMalloc_DebugRealloc() has been optimized to do little when the new size is <= old size. However, if the new size is larger, it really can't call the underlying realloc() routine without either violating its contract, or knowing something non-trivial about how the underlying realloc() works. A memcpy is always done in this case. This was a disaster for (and only) one of the std tests: test_bufio creates single text file lines up to a million characters long. On Windows, fileobject.c's get_line() uses the horridly funky getline_via_fgets(), which keeps growing and growing a string object hoping to find a newline. It grew the string object 1000 bytes each time, so for a million-character string it took approximately forever (I gave up after a few minutes). So, also: fileobject.c, getline_via_fgets(): When a single line is outrageously long, grow the string object at a mildly exponential rate, instead of just 1000 bytes at a time. That's enough so that a debug-build test_bufio finishes in about 5 seconds on my Win98SE box. I'm curious to try this on Win2K, because it has very different memory behavior than Win9X, and test_bufio always took a factor of 10 longer to complete on Win2K. It *could* be that the endless reallocs were simply killing it on Win2K even in the release build.
2002-03-23 06:03:50 -04:00
2002-03-18 17:06:21 -04:00
/* pymalloc (private to the interpreter) */
#ifdef WITH_PYMALLOC
DL_IMPORT(void *) _PyMalloc_Malloc(size_t nbytes);
DL_IMPORT(void *) _PyMalloc_Realloc(void *p, size_t nbytes);
DL_IMPORT(void) _PyMalloc_Free(void *p);
Give Python a debug-mode pymalloc, much as sketched on Python-Dev. When WITH_PYMALLOC is defined, define PYMALLOC_DEBUG to enable the debug allocator. This can be done independent of build type (release or debug). A debug build automatically defines PYMALLOC_DEBUG when pymalloc is enabled. It's a detected error to define PYMALLOC_DEBUG when pymalloc isn't enabled. Two debugging entry points defined only under PYMALLOC_DEBUG: + _PyMalloc_DebugCheckAddress(const void *p) can be used (e.g., from gdb) to sanity-check a memory block obtained from pymalloc. It sprays info to stderr (see next) and dies via Py_FatalError if the block is detectably damaged. + _PyMalloc_DebugDumpAddress(const void *p) can be used to spray info about a debug memory block to stderr. A tiny start at implementing "API family" checks isn't good for anything yet. _PyMalloc_DebugRealloc() has been optimized to do little when the new size is <= old size. However, if the new size is larger, it really can't call the underlying realloc() routine without either violating its contract, or knowing something non-trivial about how the underlying realloc() works. A memcpy is always done in this case. This was a disaster for (and only) one of the std tests: test_bufio creates single text file lines up to a million characters long. On Windows, fileobject.c's get_line() uses the horridly funky getline_via_fgets(), which keeps growing and growing a string object hoping to find a newline. It grew the string object 1000 bytes each time, so for a million-character string it took approximately forever (I gave up after a few minutes). So, also: fileobject.c, getline_via_fgets(): When a single line is outrageously long, grow the string object at a mildly exponential rate, instead of just 1000 bytes at a time. That's enough so that a debug-build test_bufio finishes in about 5 seconds on my Win98SE box. I'm curious to try this on Win2K, because it has very different memory behavior than Win9X, and test_bufio always took a factor of 10 longer to complete on Win2K. It *could* be that the endless reallocs were simply killing it on Win2K even in the release build.
2002-03-23 06:03:50 -04:00
#ifdef PYMALLOC_DEBUG
DL_IMPORT(void *) _PyMalloc_DebugMalloc(size_t nbytes);
DL_IMPORT(void *) _PyMalloc_DebugRealloc(void *p, size_t nbytes);
DL_IMPORT(void) _PyMalloc_DebugFree(void *p);
Give Python a debug-mode pymalloc, much as sketched on Python-Dev. When WITH_PYMALLOC is defined, define PYMALLOC_DEBUG to enable the debug allocator. This can be done independent of build type (release or debug). A debug build automatically defines PYMALLOC_DEBUG when pymalloc is enabled. It's a detected error to define PYMALLOC_DEBUG when pymalloc isn't enabled. Two debugging entry points defined only under PYMALLOC_DEBUG: + _PyMalloc_DebugCheckAddress(const void *p) can be used (e.g., from gdb) to sanity-check a memory block obtained from pymalloc. It sprays info to stderr (see next) and dies via Py_FatalError if the block is detectably damaged. + _PyMalloc_DebugDumpAddress(const void *p) can be used to spray info about a debug memory block to stderr. A tiny start at implementing "API family" checks isn't good for anything yet. _PyMalloc_DebugRealloc() has been optimized to do little when the new size is <= old size. However, if the new size is larger, it really can't call the underlying realloc() routine without either violating its contract, or knowing something non-trivial about how the underlying realloc() works. A memcpy is always done in this case. This was a disaster for (and only) one of the std tests: test_bufio creates single text file lines up to a million characters long. On Windows, fileobject.c's get_line() uses the horridly funky getline_via_fgets(), which keeps growing and growing a string object hoping to find a newline. It grew the string object 1000 bytes each time, so for a million-character string it took approximately forever (I gave up after a few minutes). So, also: fileobject.c, getline_via_fgets(): When a single line is outrageously long, grow the string object at a mildly exponential rate, instead of just 1000 bytes at a time. That's enough so that a debug-build test_bufio finishes in about 5 seconds on my Win98SE box. I'm curious to try this on Win2K, because it has very different memory behavior than Win9X, and test_bufio always took a factor of 10 longer to complete on Win2K. It *could* be that the endless reallocs were simply killing it on Win2K even in the release build.
2002-03-23 06:03:50 -04:00
DL_IMPORT(void) _PyMalloc_DebugDumpAddress(const void *p);
DL_IMPORT(void) _PyMalloc_DebugCheckAddress(const void *p);
#define _PyMalloc_MALLOC _PyMalloc_DebugMalloc
#define _PyMalloc_REALLOC _PyMalloc_DebugRealloc
#define _PyMalloc_FREE _PyMalloc_DebugFree
Give Python a debug-mode pymalloc, much as sketched on Python-Dev. When WITH_PYMALLOC is defined, define PYMALLOC_DEBUG to enable the debug allocator. This can be done independent of build type (release or debug). A debug build automatically defines PYMALLOC_DEBUG when pymalloc is enabled. It's a detected error to define PYMALLOC_DEBUG when pymalloc isn't enabled. Two debugging entry points defined only under PYMALLOC_DEBUG: + _PyMalloc_DebugCheckAddress(const void *p) can be used (e.g., from gdb) to sanity-check a memory block obtained from pymalloc. It sprays info to stderr (see next) and dies via Py_FatalError if the block is detectably damaged. + _PyMalloc_DebugDumpAddress(const void *p) can be used to spray info about a debug memory block to stderr. A tiny start at implementing "API family" checks isn't good for anything yet. _PyMalloc_DebugRealloc() has been optimized to do little when the new size is <= old size. However, if the new size is larger, it really can't call the underlying realloc() routine without either violating its contract, or knowing something non-trivial about how the underlying realloc() works. A memcpy is always done in this case. This was a disaster for (and only) one of the std tests: test_bufio creates single text file lines up to a million characters long. On Windows, fileobject.c's get_line() uses the horridly funky getline_via_fgets(), which keeps growing and growing a string object hoping to find a newline. It grew the string object 1000 bytes each time, so for a million-character string it took approximately forever (I gave up after a few minutes). So, also: fileobject.c, getline_via_fgets(): When a single line is outrageously long, grow the string object at a mildly exponential rate, instead of just 1000 bytes at a time. That's enough so that a debug-build test_bufio finishes in about 5 seconds on my Win98SE box. I'm curious to try this on Win2K, because it has very different memory behavior than Win9X, and test_bufio always took a factor of 10 longer to complete on Win2K. It *could* be that the endless reallocs were simply killing it on Win2K even in the release build.
2002-03-23 06:03:50 -04:00
#else /* WITH_PYMALLOC && ! PYMALLOC_DEBUG */
2002-03-18 17:06:21 -04:00
#define _PyMalloc_MALLOC _PyMalloc_Malloc
#define _PyMalloc_REALLOC _PyMalloc_Realloc
#define _PyMalloc_FREE _PyMalloc_Free
Give Python a debug-mode pymalloc, much as sketched on Python-Dev. When WITH_PYMALLOC is defined, define PYMALLOC_DEBUG to enable the debug allocator. This can be done independent of build type (release or debug). A debug build automatically defines PYMALLOC_DEBUG when pymalloc is enabled. It's a detected error to define PYMALLOC_DEBUG when pymalloc isn't enabled. Two debugging entry points defined only under PYMALLOC_DEBUG: + _PyMalloc_DebugCheckAddress(const void *p) can be used (e.g., from gdb) to sanity-check a memory block obtained from pymalloc. It sprays info to stderr (see next) and dies via Py_FatalError if the block is detectably damaged. + _PyMalloc_DebugDumpAddress(const void *p) can be used to spray info about a debug memory block to stderr. A tiny start at implementing "API family" checks isn't good for anything yet. _PyMalloc_DebugRealloc() has been optimized to do little when the new size is <= old size. However, if the new size is larger, it really can't call the underlying realloc() routine without either violating its contract, or knowing something non-trivial about how the underlying realloc() works. A memcpy is always done in this case. This was a disaster for (and only) one of the std tests: test_bufio creates single text file lines up to a million characters long. On Windows, fileobject.c's get_line() uses the horridly funky getline_via_fgets(), which keeps growing and growing a string object hoping to find a newline. It grew the string object 1000 bytes each time, so for a million-character string it took approximately forever (I gave up after a few minutes). So, also: fileobject.c, getline_via_fgets(): When a single line is outrageously long, grow the string object at a mildly exponential rate, instead of just 1000 bytes at a time. That's enough so that a debug-build test_bufio finishes in about 5 seconds on my Win98SE box. I'm curious to try this on Win2K, because it has very different memory behavior than Win9X, and test_bufio always took a factor of 10 longer to complete on Win2K. It *could* be that the endless reallocs were simply killing it on Win2K even in the release build.
2002-03-23 06:03:50 -04:00
#endif
#else /* ! WITH_PYMALLOC */
2002-03-18 17:06:21 -04:00
#define _PyMalloc_MALLOC PyMem_MALLOC
#define _PyMalloc_REALLOC PyMem_REALLOC
#define _PyMalloc_FREE PyMem_FREE
Give Python a debug-mode pymalloc, much as sketched on Python-Dev. When WITH_PYMALLOC is defined, define PYMALLOC_DEBUG to enable the debug allocator. This can be done independent of build type (release or debug). A debug build automatically defines PYMALLOC_DEBUG when pymalloc is enabled. It's a detected error to define PYMALLOC_DEBUG when pymalloc isn't enabled. Two debugging entry points defined only under PYMALLOC_DEBUG: + _PyMalloc_DebugCheckAddress(const void *p) can be used (e.g., from gdb) to sanity-check a memory block obtained from pymalloc. It sprays info to stderr (see next) and dies via Py_FatalError if the block is detectably damaged. + _PyMalloc_DebugDumpAddress(const void *p) can be used to spray info about a debug memory block to stderr. A tiny start at implementing "API family" checks isn't good for anything yet. _PyMalloc_DebugRealloc() has been optimized to do little when the new size is <= old size. However, if the new size is larger, it really can't call the underlying realloc() routine without either violating its contract, or knowing something non-trivial about how the underlying realloc() works. A memcpy is always done in this case. This was a disaster for (and only) one of the std tests: test_bufio creates single text file lines up to a million characters long. On Windows, fileobject.c's get_line() uses the horridly funky getline_via_fgets(), which keeps growing and growing a string object hoping to find a newline. It grew the string object 1000 bytes each time, so for a million-character string it took approximately forever (I gave up after a few minutes). So, also: fileobject.c, getline_via_fgets(): When a single line is outrageously long, grow the string object at a mildly exponential rate, instead of just 1000 bytes at a time. That's enough so that a debug-build test_bufio finishes in about 5 seconds on my Win98SE box. I'm curious to try this on Win2K, because it has very different memory behavior than Win9X, and test_bufio always took a factor of 10 longer to complete on Win2K. It *could* be that the endless reallocs were simply killing it on Win2K even in the release build.
2002-03-23 06:03:50 -04:00
#endif /* WITH_PYMALLOC */
2002-03-18 17:06:21 -04:00
#ifdef __cplusplus
}
#endif
#endif /* !Py_PYMEM_H */