Merge backout of 57776eee74f2.

This commit is contained in:
Larry Hastings 2015-05-23 14:57:12 -07:00
commit d68ec172d1
4 changed files with 115 additions and 747 deletions

View File

@ -419,129 +419,120 @@ def lru_cache(maxsize=128, typed=False):
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
def decorating_function(user_function):
wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
return update_wrapper(wrapper, user_function)
return decorating_function
def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
cache = {}
hits = misses = 0
full = False
cache_get = cache.get # bound method to lookup a key or return None
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
def decorating_function(user_function):
cache = {}
hits = misses = 0
full = False
cache_get = cache.get # bound method to lookup a key or return None
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
if maxsize == 0:
if maxsize == 0:
def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call
nonlocal misses
result = user_function(*args, **kwds)
misses += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
nonlocal hits, misses
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits += 1
def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call
nonlocal misses
result = user_function(*args, **kwds)
misses += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
misses += 1
return result
else:
elif maxsize is None:
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
nonlocal root, hits, misses, full
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
nonlocal hits, misses
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full:
# Use the old root to store the new key and result.
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
root = oldroot[NEXT]
oldkey = root[KEY]
oldresult = root[RESULT]
root[KEY] = root[RESULT] = None
# Now update the cache dictionary.
del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot
else:
# Put result in a new link at the front of the queue.
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
full = (len(cache) >= maxsize)
result = user_function(*args, **kwds)
cache[key] = result
misses += 1
return result
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits, misses, maxsize, len(cache))
else:
def cache_clear():
"""Clear the cache and cache statistics"""
nonlocal hits, misses, full
with lock:
cache.clear()
root[:] = [root, root, None, None]
hits = misses = 0
full = False
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
nonlocal root, hits, misses, full
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
hits += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full:
# Use the old root to store the new key and result.
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
root = oldroot[NEXT]
oldkey = root[KEY]
oldresult = root[RESULT]
root[KEY] = root[RESULT] = None
# Now update the cache dictionary.
del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot
else:
# Put result in a new link at the front of the queue.
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
full = (len(cache) >= maxsize)
misses += 1
return result
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits, misses, maxsize, len(cache))
try:
from _functools import _lru_cache_wrapper
except ImportError:
pass
def cache_clear():
"""Clear the cache and cache statistics"""
nonlocal hits, misses, full
with lock:
cache.clear()
root[:] = [root, root, None, None]
hits = misses = 0
full = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
################################################################################

View File

@ -7,10 +7,6 @@ import sys
from test import support
import unittest
from weakref import proxy
try:
import threading
except ImportError:
threading = None
import functools
@ -916,12 +912,12 @@ class Orderable_LT:
return self.value == other.value
class TestLRU:
class TestLRU(unittest.TestCase):
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
f = functools.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
@ -959,7 +955,7 @@ class TestLRU:
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
@functools.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
@ -975,7 +971,7 @@ class TestLRU:
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
@functools.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
@ -991,7 +987,7 @@ class TestLRU:
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
@functools.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
@ -1008,7 +1004,7 @@ class TestLRU:
self.assertEqual(currsize, 2)
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
@functools.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
@ -1016,26 +1012,17 @@ class TestLRU:
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
functools._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
functools._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
@functools.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
@ -1048,7 +1035,7 @@ class TestLRU:
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
@functools.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
@ -1063,7 +1050,7 @@ class TestLRU:
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
@functools.lru_cache()
def fib(n):
if n < 2:
return n
@ -1073,13 +1060,13 @@ class TestLRU:
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
functools._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
functools._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
@functools.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
@ -1087,71 +1074,15 @@ class TestLRU:
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
functools._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
def full(f, *args):
for _ in range(10):
f(*args)
def clear(f):
for _ in range(10):
f.cache_clear()
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
# create 5 threads in order to fill cache
threads = []
for k in range(5):
t = threading.Thread(target=full, args=[f, k, k])
t.start()
threads.append(t)
for t in threads:
t.join()
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 45)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 5)
# create 5 threads in order to fill cache and 1 to clear it
cleaner = threading.Thread(target=clear, args=[f])
cleaner.start()
threads = [cleaner]
for k in range(5):
t = threading.Thread(target=full, args=[f, k, k])
t.start()
threads.append(t)
for t in threads:
t.join()
finally:
sys.setswitchinterval(orig_si)
functools._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
@functools.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
@ -1179,12 +1110,6 @@ class TestLRU:
def f():
pass
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):

View File

@ -63,9 +63,6 @@ Core and Builtins
Library
-------
- Issue #14373: Added C implementation of functools.lru_cache(). Based on
patches by Matt Joiner and Alexey Kachayev.
- Issue 24230: The tempfile module now accepts bytes for prefix, suffix and dir
parameters and returns bytes in such situations (matching the os module APIs).

View File

@ -590,539 +590,6 @@ For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates\n\
of the sequence in the calculation, and serves as a default when the\n\
sequence is empty.");
/* lru_cache object **********************************************************/
/* this object is used delimit args and keywords in the cache keys */
static PyObject *kwd_mark = NULL;
struct lru_list_elem;
struct lru_cache_object;
typedef struct lru_list_elem {
PyObject_HEAD
struct lru_list_elem *prev, *next; /* borrowed links */
PyObject *key, *result;
} lru_list_elem;
static void
lru_list_elem_dealloc(lru_list_elem *link)
{
_PyObject_GC_UNTRACK(link);
Py_XDECREF(link->key);
Py_XDECREF(link->result);
PyObject_GC_Del(link);
}
static int
lru_list_elem_traverse(lru_list_elem *link, visitproc visit, void *arg)
{
Py_VISIT(link->key);
Py_VISIT(link->result);
return 0;
}
static int
lru_list_elem_clear(lru_list_elem *link)
{
Py_CLEAR(link->key);
Py_CLEAR(link->result);
return 0;
}
static PyTypeObject lru_list_elem_type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"functools._lru_list_elem", /* tp_name */
sizeof(lru_list_elem), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
(destructor)lru_list_elem_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
(traverseproc)lru_list_elem_traverse, /* tp_traverse */
(inquiry)lru_list_elem_clear, /* tp_clear */
};
typedef PyObject *(*lru_cache_ternaryfunc)(struct lru_cache_object *, PyObject *, PyObject *);
typedef struct lru_cache_object {
lru_list_elem root; /* includes PyObject_HEAD */
Py_ssize_t maxsize;
PyObject *maxsize_O;
PyObject *func;
lru_cache_ternaryfunc wrapper;
PyObject *cache;
PyObject *cache_info_type;
Py_ssize_t misses, hits;
int typed;
PyObject *dict;
int full;
} lru_cache_object;
static PyTypeObject lru_cache_type;
static PyObject *
lru_cache_make_key(PyObject *args, PyObject *kwds, int typed)
{
PyObject *key, *sorted_items;
Py_ssize_t key_size, pos, key_pos;
/* short path, key will match args anyway, which is a tuple */
if (!typed && !kwds) {
Py_INCREF(args);
return args;
}
if (kwds && PyDict_Size(kwds) > 0) {
sorted_items = PyDict_Items(kwds);
if (!sorted_items)
return NULL;
if (PyList_Sort(sorted_items) < 0) {
Py_DECREF(sorted_items);
return NULL;
}
} else
sorted_items = NULL;
key_size = PyTuple_GET_SIZE(args);
if (sorted_items)
key_size += PyList_GET_SIZE(sorted_items);
if (typed)
key_size *= 2;
if (sorted_items)
key_size++;
key = PyTuple_New(key_size);
if (key == NULL)
goto done;
key_pos = 0;
for (pos = 0; pos < PyTuple_GET_SIZE(args); ++pos) {
PyObject *item = PyTuple_GET_ITEM(args, pos);
Py_INCREF(item);
PyTuple_SET_ITEM(key, key_pos++, item);
}
if (sorted_items) {
Py_INCREF(kwd_mark);
PyTuple_SET_ITEM(key, key_pos++, kwd_mark);
for (pos = 0; pos < PyList_GET_SIZE(sorted_items); ++pos) {
PyObject *item = PyList_GET_ITEM(sorted_items, pos);
Py_INCREF(item);
PyTuple_SET_ITEM(key, key_pos++, item);
}
}
if (typed) {
for (pos = 0; pos < PyTuple_GET_SIZE(args); ++pos) {
PyObject *item = (PyObject *)Py_TYPE(PyTuple_GET_ITEM(args, pos));
Py_INCREF(item);
PyTuple_SET_ITEM(key, key_pos++, item);
}
if (sorted_items) {
for (pos = 0; pos < PyList_GET_SIZE(sorted_items); ++pos) {
PyObject *tp_items = PyList_GET_ITEM(sorted_items, pos);
PyObject *item = (PyObject *)Py_TYPE(PyTuple_GET_ITEM(tp_items, 1));
Py_INCREF(item);
PyTuple_SET_ITEM(key, key_pos++, item);
}
}
}
assert(key_pos == key_size);
done:
if (sorted_items)
Py_DECREF(sorted_items);
return key;
}
static PyObject *
uncached_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds)
{
PyObject *result = PyObject_Call(self->func, args, kwds);
if (!result)
return NULL;
self->misses++;
return result;
}
static PyObject *
infinite_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds)
{
PyObject *result;
PyObject *key = lru_cache_make_key(args, kwds, self->typed);
if (!key)
return NULL;
result = PyDict_GetItemWithError(self->cache, key);
if (result) {
Py_INCREF(result);
self->hits++;
Py_DECREF(key);
return result;
}
if (PyErr_Occurred()) {
Py_DECREF(key);
return NULL;
}
result = PyObject_Call(self->func, args, kwds);
if (!result) {
Py_DECREF(key);
return NULL;
}
if (PyDict_SetItem(self->cache, key, result) < 0) {
Py_DECREF(result);
Py_DECREF(key);
return NULL;
}
Py_DECREF(key);
self->misses++;
return result;
}
static void
lru_cache_extricate_link(lru_list_elem *link)
{
link->prev->next = link->next;
link->next->prev = link->prev;
}
static void
lru_cache_append_link(lru_cache_object *self, lru_list_elem *link)
{
lru_list_elem *root = &self->root;
lru_list_elem *last = root->prev;
last->next = root->prev = link;
link->prev = last;
link->next = root;
}
static PyObject *
bounded_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds)
{
lru_list_elem *link;
PyObject *key, *result;
key = lru_cache_make_key(args, kwds, self->typed);
if (!key)
return NULL;
link = (lru_list_elem *)PyDict_GetItemWithError(self->cache, key);
if (link) {
lru_cache_extricate_link(link);
lru_cache_append_link(self, link);
self->hits++;
result = link->result;
Py_INCREF(result);
Py_DECREF(key);
return result;
}
if (PyErr_Occurred()) {
Py_DECREF(key);
return NULL;
}
result = PyObject_Call(self->func, args, kwds);
if (!result) {
Py_DECREF(key);
return NULL;
}
if (self->full && self->root.next != &self->root) {
/* Use the oldest item to store the new key and result. */
PyObject *oldkey, *oldresult;
/* Extricate the oldest item. */
link = self->root.next;
lru_cache_extricate_link(link);
/* Remove it from the cache.
The cache dict holds one reference to the link,
and the linked list holds yet one reference to it. */
if (PyDict_DelItem(self->cache, link->key) < 0) {
lru_cache_append_link(self, link);
Py_DECREF(key);
Py_DECREF(result);
return NULL;
}
/* Keep a reference to the old key and old result to
prevent their ref counts from going to zero during the
update. That will prevent potentially arbitrary object
clean-up code (i.e. __del__) from running while we're
still adjusting the links. */
oldkey = link->key;
oldresult = link->result;
link->key = key;
link->result = result;
if (PyDict_SetItem(self->cache, key, (PyObject *)link) < 0) {
Py_DECREF(link);
Py_DECREF(oldkey);
Py_DECREF(oldresult);
return NULL;
}
lru_cache_append_link(self, link);
Py_INCREF(result); /* for return */
Py_DECREF(oldkey);
Py_DECREF(oldresult);
} else {
/* Put result in a new link at the front of the queue. */
link = (lru_list_elem *)PyObject_GC_New(lru_list_elem,
&lru_list_elem_type);
if (link == NULL) {
Py_DECREF(key);
Py_DECREF(result);
return NULL;
}
link->key = key;
link->result = result;
_PyObject_GC_TRACK(link);
if (PyDict_SetItem(self->cache, key, (PyObject *)link) < 0) {
Py_DECREF(link);
return NULL;
}
lru_cache_append_link(self, link);
Py_INCREF(result); /* for return */
self->full = (PyDict_Size(self->cache) >= self->maxsize);
}
self->misses++;
return result;
}
static PyObject *
lru_cache_new(PyTypeObject *type, PyObject *args, PyObject *kw)
{
PyObject *func, *maxsize_O, *cache_info_type;
int typed;
lru_cache_object *obj;
Py_ssize_t maxsize;
PyObject *(*wrapper)(lru_cache_object *, PyObject *, PyObject *);
static char *keywords[] = {"user_function", "maxsize", "typed",
"cache_info_type", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kw, "OOpO:lru_cache", keywords,
&func, &maxsize_O, &typed,
&cache_info_type)) {
return NULL;
}
if (!PyCallable_Check(func)) {
PyErr_SetString(PyExc_TypeError,
"the first argument must be callable");
return NULL;
}
/* select the caching function, and make/inc maxsize_O */
if (maxsize_O == Py_None) {
wrapper = infinite_lru_cache_wrapper;
/* use this only to initialize lru_cache_object attribute maxsize */
maxsize = -1;
} else if (PyIndex_Check(maxsize_O)) {
maxsize = PyNumber_AsSsize_t(maxsize_O, PyExc_OverflowError);
if (maxsize == -1 && PyErr_Occurred())
return NULL;
if (maxsize == 0)
wrapper = uncached_lru_cache_wrapper;
else
wrapper = bounded_lru_cache_wrapper;
} else {
PyErr_SetString(PyExc_TypeError, "maxsize should be integer or None");
return NULL;
}
obj = (lru_cache_object *)type->tp_alloc(type, 0);
if (obj == NULL)
return NULL;
if (!(obj->cache = PyDict_New())) {
Py_DECREF(obj);
return NULL;
}
obj->root.prev = &obj->root;
obj->root.next = &obj->root;
obj->maxsize = maxsize;
Py_INCREF(maxsize_O);
obj->maxsize_O = maxsize_O;
Py_INCREF(func);
obj->func = func;
obj->wrapper = wrapper;
obj->misses = obj->hits = 0;
obj->typed = typed;
Py_INCREF(cache_info_type);
obj->cache_info_type = cache_info_type;
return (PyObject *)obj;
}
static lru_list_elem *
lru_cache_unlink_list(lru_cache_object *self)
{
lru_list_elem *root = &self->root;
lru_list_elem *link = root->next;
if (link == root)
return NULL;
root->prev->next = NULL;
root->next = root->prev = root;
return link;
}
static void
lru_cache_clear_list(lru_list_elem *link)
{
while (link != NULL) {
lru_list_elem *next = link->next;
Py_DECREF(link);
link = next;
}
}
static void
lru_cache_dealloc(lru_cache_object *obj)
{
lru_list_elem *list = lru_cache_unlink_list(obj);
Py_XDECREF(obj->maxsize_O);
Py_XDECREF(obj->func);
Py_XDECREF(obj->cache);
Py_XDECREF(obj->dict);
Py_XDECREF(obj->cache_info_type);
lru_cache_clear_list(list);
Py_TYPE(obj)->tp_free(obj);
}
static PyObject *
lru_cache_call(lru_cache_object *self, PyObject *args, PyObject *kwds)
{
return self->wrapper(self, args, kwds);
}
static PyObject *
lru_cache_cache_info(lru_cache_object *self, PyObject *unused)
{
return PyObject_CallFunction(self->cache_info_type, "nnOn",
self->hits, self->misses, self->maxsize_O,
PyDict_Size(self->cache));
}
static PyObject *
lru_cache_cache_clear(lru_cache_object *self, PyObject *unused)
{
lru_list_elem *list = lru_cache_unlink_list(self);
self->hits = self->misses = 0;
self->full = 0;
PyDict_Clear(self->cache);
lru_cache_clear_list(list);
Py_RETURN_NONE;
}
static int
lru_cache_tp_traverse(lru_cache_object *self, visitproc visit, void *arg)
{
lru_list_elem *link = self->root.next;
while (link != &self->root) {
lru_list_elem *next = link->next;
Py_VISIT(link);
link = next;
}
Py_VISIT(self->maxsize_O);
Py_VISIT(self->func);
Py_VISIT(self->cache);
Py_VISIT(self->cache_info_type);
Py_VISIT(self->dict);
return 0;
}
static int
lru_cache_tp_clear(lru_cache_object *self)
{
lru_list_elem *list = lru_cache_unlink_list(self);
Py_CLEAR(self->maxsize_O);
Py_CLEAR(self->func);
Py_CLEAR(self->cache);
Py_CLEAR(self->cache_info_type);
Py_CLEAR(self->dict);
lru_cache_clear_list(list);
return 0;
}
PyDoc_STRVAR(lru_cache_doc,
"Create a cached callable that wraps another function.\n\
\n\
user_function: the function being cached\n\
\n\
maxsize: 0 for no caching\n\
None for unlimited cache size\n\
n for a bounded cache\n\
\n\
typed: False cache f(3) and f(3.0) as identical calls\n\
True cache f(3) and f(3.0) as distinct calls\n\
\n\
cache_info_type: namedtuple class with the fields:\n\
hits misses currsize maxsize\n"
);
static PyMethodDef lru_cache_methods[] = {
{"cache_info", (PyCFunction)lru_cache_cache_info, METH_NOARGS},
{"cache_clear", (PyCFunction)lru_cache_cache_clear, METH_NOARGS},
{NULL}
};
static PyGetSetDef lru_cache_getsetlist[] = {
{"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict},
{NULL}
};
static PyTypeObject lru_cache_type = {
PyVarObject_HEAD_INIT(NULL, 0)
"functools._lru_cache_wrapper", /* tp_name */
sizeof(lru_cache_object), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
(destructor)lru_cache_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
(ternaryfunc)lru_cache_call, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
/* tp_flags */
lru_cache_doc, /* tp_doc */
(traverseproc)lru_cache_tp_traverse,/* tp_traverse */
(inquiry)lru_cache_tp_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
lru_cache_methods, /* tp_methods */
0, /* tp_members */
lru_cache_getsetlist, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
offsetof(lru_cache_object, dict), /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
lru_cache_new, /* tp_new */
};
/* module level code ********************************************************/
PyDoc_STRVAR(module_doc,
@ -1135,11 +602,6 @@ static PyMethodDef module_methods[] = {
{NULL, NULL} /* sentinel */
};
static void
module_free(void *m)
{
Py_CLEAR(kwd_mark);
}
static struct PyModuleDef _functoolsmodule = {
PyModuleDef_HEAD_INIT,
@ -1150,7 +612,7 @@ static struct PyModuleDef _functoolsmodule = {
NULL,
NULL,
NULL,
module_free,
NULL
};
PyMODINIT_FUNC
@ -1161,7 +623,6 @@ PyInit__functools(void)
char *name;
PyTypeObject *typelist[] = {
&partial_type,
&lru_cache_type,
NULL
};
@ -1169,12 +630,6 @@ PyInit__functools(void)
if (m == NULL)
return NULL;
kwd_mark = PyObject_CallObject((PyObject *)&PyBaseObject_Type, NULL);
if (!kwd_mark) {
Py_DECREF(m);
return NULL;
}
for (i=0 ; typelist[i] != NULL ; i++) {
if (PyType_Ready(typelist[i]) < 0) {
Py_DECREF(m);