Follow up to #9778: fix regressions on 64-bit Windows builds

This commit is contained in:
Antoine Pitrou 2010-10-23 17:37:54 +00:00
parent 1e908af335
commit fbb1c6191c
4 changed files with 9 additions and 18 deletions

View File

@ -827,7 +827,7 @@ class SizeofTest(unittest.TestCase):
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
basicsize = size(h + 'PPliP') + usize * (len(s) + 1)
basicsize = size(h + 'PPPiP') + usize * (len(s) + 1)
check(s, basicsize)
# weakref
import weakref

View File

@ -124,15 +124,6 @@ masked); and the PyDictObject struct required a member to hold the table's
polynomial. In Tim's experiments the current scheme ran faster, produced
equally good collision statistics, needed less code & used less memory.
Theoretical Python 2.5 headache: hash codes are only C "long", but
sizeof(Py_ssize_t) > sizeof(long) may be possible. In that case, and if a
dict is genuinely huge, then only the slots directly reachable via indexing
by a C long can be the first slot in a probe sequence. The probe sequence
will still eventually reach every slot in the table, but the collision rate
on initial probes may be much higher than this scheme was designed for.
Getting a hash code as fat as Py_ssize_t is the only real cure. But in
practice, this probably won't make a lick of difference for many years (at
which point everyone will have terabytes of RAM on 64-bit boxes).
*/
/* Object used as dummy key to fill deleted entries */
@ -531,7 +522,7 @@ insertdict(register PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject *v
{
PyObject *old_value;
register PyDictEntry *ep;
typedef PyDictEntry *(*lookupfunc)(PyDictObject *, PyObject *, long);
typedef PyDictEntry *(*lookupfunc)(PyDictObject *, PyObject *, Py_hash_t);
assert(mp->ma_lookup != NULL);
ep = mp->ma_lookup(mp, key, hash);

View File

@ -214,7 +214,7 @@ static int
set_insert_key(register PySetObject *so, PyObject *key, Py_hash_t hash)
{
register setentry *entry;
typedef setentry *(*lookupfunc)(PySetObject *, PyObject *, long);
typedef setentry *(*lookupfunc)(PySetObject *, PyObject *, Py_hash_t);
assert(so->lookup != NULL);
entry = so->lookup(so, key, hash);
@ -663,7 +663,7 @@ set_merge(PySetObject *so, PyObject *otherset)
if (key != NULL &&
key != dummy) {
Py_INCREF(key);
if (set_insert_key(so, key, (long) entry->hash) == -1) {
if (set_insert_key(so, key, entry->hash) == -1) {
Py_DECREF(key);
return -1;
}
@ -772,14 +772,14 @@ frozenset_hash(PyObject *self)
if (so->hash != -1)
return so->hash;
hash *= (long) PySet_GET_SIZE(self) + 1;
hash *= PySet_GET_SIZE(self) + 1;
while (set_next(so, &pos, &entry)) {
/* Work to increase the bit dispersion for closely spaced hash
values. The is important because some use cases have many
combinations of a small number of elements with nearby
hashes so that many distinct combinations collapse to only
a handful of distinct hash values. */
h = (long) entry->hash;
h = entry->hash;
hash ^= (h ^ (h << 16) ^ 89869747L) * 3644798167u;
}
hash = hash * 69069L + 907133923L;
@ -1116,7 +1116,7 @@ set_swap_bodies(PySetObject *a, PySetObject *b)
setentry *u;
setentry *(*f)(PySetObject *so, PyObject *key, Py_ssize_t hash);
setentry tab[PySet_MINSIZE];
long h;
Py_hash_t h;
t = a->fill; a->fill = b->fill; b->fill = t;
t = a->used; a->used = b->used; b->used = t;
@ -1550,7 +1550,7 @@ set_difference(PySetObject *so, PyObject *other)
setentry entrycopy;
entrycopy.hash = entry->hash;
entrycopy.key = entry->key;
if (!_PyDict_Contains(other, entry->key, (long) entry->hash)) {
if (!_PyDict_Contains(other, entry->key, entry->hash)) {
if (set_add_entry((PySetObject *)result, &entrycopy) == -1) {
Py_DECREF(result);
return NULL;

View File

@ -452,7 +452,7 @@ PyHKEY_compareFunc(PyObject *ob1, PyObject *ob2)
(pyhkey1 < pyhkey2 ? -1 : 1);
}
static long
static Py_hash_t
PyHKEY_hashFunc(PyObject *ob)
{
/* Just use the address.