_PyMalloc_Free(): As was already done for _PyMalloc_Malloc, rearranged

the code so that the most frequent cases come first.  Added comments.
Found a hidden assumption that a pool contains room for at least two
blocks, and added an assert to catch a violation if it ever happens in
a place where that matters.  Gave the normal "I allocated this block"
case a longer basic block to work with before it has to do its first
branch (via breaking apart an embedded assignment in an "if", and
hoisting common code out of both branches).
This commit is contained in:
Tim Peters 2002-03-31 02:18:01 +00:00
parent 1e16db6d3b
commit 2c95c99a64
1 changed files with 50 additions and 40 deletions

View File

@ -667,6 +667,7 @@ void
_PyMalloc_Free(void *p) _PyMalloc_Free(void *p)
{ {
poolp pool; poolp pool;
block *lastfree;
poolp next, prev; poolp next, prev;
uint size; uint size;
@ -679,57 +680,66 @@ _PyMalloc_Free(void *p)
LOCK(); LOCK();
INCMINE; INCMINE;
/* /*
* At this point, the pool is not empty * Link p to the start of the pool's freeblock list. Since
*/ * the pool had at least the p block outstanding, the pool
if ((*(block **)p = pool->freeblock) == NULL) { * wasn't empty (so it's already in a usedpools[] list, or
/* * was full and is in no list -- it's not in the freeblocks
* Pool was full * list in any case).
*/
pool->freeblock = (block *)p;
--pool->ref.count;
/*
* Frontlink to used pools
* This mimics LRU pool usage for new allocations and
* targets optimal filling when several pools contain
* blocks of the same size class.
*/
size = pool->szidx;
next = usedpools[size + size];
prev = next->prevpool;
pool->nextpool = next;
pool->prevpool = prev;
next->prevpool = pool;
prev->nextpool = pool;
UNLOCK();
return;
}
/*
* Pool was not full
*/ */
*(block **)p = lastfree = pool->freeblock;
pool->freeblock = (block *)p; pool->freeblock = (block *)p;
if (--pool->ref.count != 0) { if (lastfree) {
/*
* freeblock wasn't NULL, so the pool wasn't full,
* and the pool is in a usedpools[] list.
*/
assert(pool->ref.count < pool.capacity);
if (--pool->ref.count != 0) {
/* pool isn't empty: leave it in usedpools */
UNLOCK();
return;
}
/*
* Pool is now empty: unlink from usedpools, and
* link to the front of usedpools. This ensures that
* previously freed pools will be allocated later
* (being not referenced, they are perhaps paged out).
*/
next = pool->nextpool;
prev = pool->prevpool;
next->prevpool = prev;
prev->nextpool = next;
/* Link to freepools. This is a singly-linked list,
* and pool->prevpool isn't used there.
*/
pool->nextpool = freepools;
freepools = pool;
UNLOCK(); UNLOCK();
return; return;
} }
/* /*
* Pool is now empty, unlink from used pools * Pool was full, so doesn't currently live in any list:
* link it to the front of the appropriate usedpools[] list.
* This mimics LRU pool usage for new allocations and
* targets optimal filling when several pools contain
* blocks of the same size class.
*/ */
next = pool->nextpool; assert(pool->ref.count == pool->capacity); /* else not full */
prev = pool->prevpool; --pool->ref.count;
next->prevpool = prev; assert(pool->ref.count > 0); /* else the pool is empty */
prev->nextpool = next; size = pool->szidx;
/* next = usedpools[size + size];
* Frontlink to free pools prev = next->prevpool;
* This ensures that previously freed pools will be allocated /* insert pool before next: prev <-> pool <-> next */
* later (being not referenced, they are perhaps paged out). pool->nextpool = next;
*/ pool->prevpool = prev;
pool->nextpool = freepools; next->prevpool = pool;
freepools = pool; prev->nextpool = pool;
UNLOCK(); UNLOCK();
return; return;
} }
/* We did not allocate this address. */ /* We didn't allocate this address. */
INCTHEIRS; INCTHEIRS;
PyMem_FREE(p); PyMem_FREE(p);
} }