2013-11-20 06:46:18 -04:00
|
|
|
/* Set of hash utility functions to help maintaining the invariant that
|
|
|
|
if a==b then hash(a)==hash(b)
|
|
|
|
|
|
|
|
All the utility functions (_Py_Hash*()) return "-1" to signify an error.
|
|
|
|
*/
|
|
|
|
#include "Python.h"
|
2023-07-22 10:49:37 -03:00
|
|
|
#include "pycore_pyhash.h" // _Py_HashSecret_t
|
2013-11-20 06:46:18 -04:00
|
|
|
|
|
|
|
#ifdef __APPLE__
|
|
|
|
# include <libkern/OSByteOrder.h>
|
|
|
|
#elif defined(HAVE_LE64TOH) && defined(HAVE_ENDIAN_H)
|
|
|
|
# include <endian.h>
|
|
|
|
#elif defined(HAVE_LE64TOH) && defined(HAVE_SYS_ENDIAN_H)
|
|
|
|
# include <sys/endian.h>
|
|
|
|
#endif
|
|
|
|
|
2018-02-09 11:31:26 -04:00
|
|
|
_Py_HashSecret_t _Py_HashSecret = {{0}};
|
2013-11-20 06:46:18 -04:00
|
|
|
|
|
|
|
#if Py_HASH_ALGORITHM == Py_HASH_EXTERNAL
|
|
|
|
extern PyHash_FuncDef PyHash_Func;
|
|
|
|
#else
|
|
|
|
static PyHash_FuncDef PyHash_Func;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Count _Py_HashBytes() calls */
|
|
|
|
#ifdef Py_HASH_STATS
|
|
|
|
#define Py_HASH_STATS_MAX 32
|
|
|
|
static Py_ssize_t hashstats[Py_HASH_STATS_MAX + 1] = {0};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* For numeric types, the hash of a number x is based on the reduction
|
|
|
|
of x modulo the prime P = 2**_PyHASH_BITS - 1. It's designed so that
|
|
|
|
hash(x) == hash(y) whenever x and y are numerically equal, even if
|
|
|
|
x and y have different types.
|
|
|
|
|
|
|
|
A quick summary of the hashing strategy:
|
|
|
|
|
|
|
|
(1) First define the 'reduction of x modulo P' for any rational
|
|
|
|
number x; this is a standard extension of the usual notion of
|
|
|
|
reduction modulo P for integers. If x == p/q (written in lowest
|
|
|
|
terms), the reduction is interpreted as the reduction of p times
|
|
|
|
the inverse of the reduction of q, all modulo P; if q is exactly
|
|
|
|
divisible by P then define the reduction to be infinity. So we've
|
|
|
|
got a well-defined map
|
|
|
|
|
|
|
|
reduce : { rational numbers } -> { 0, 1, 2, ..., P-1, infinity }.
|
|
|
|
|
|
|
|
(2) Now for a rational number x, define hash(x) by:
|
|
|
|
|
|
|
|
reduce(x) if x >= 0
|
|
|
|
-reduce(-x) if x < 0
|
|
|
|
|
|
|
|
If the result of the reduction is infinity (this is impossible for
|
|
|
|
integers, floats and Decimals) then use the predefined hash value
|
|
|
|
_PyHASH_INF for x >= 0, or -_PyHASH_INF for x < 0, instead.
|
2021-04-22 12:34:57 -03:00
|
|
|
_PyHASH_INF and -_PyHASH_INF are also used for the
|
|
|
|
hashes of float and Decimal infinities.
|
|
|
|
|
|
|
|
NaNs hash with a pointer hash. Having distinct hash values prevents
|
|
|
|
catastrophic pileups from distinct NaN instances which used to always
|
|
|
|
have the same hash value but would compare unequal.
|
2013-11-20 06:46:18 -04:00
|
|
|
|
|
|
|
A selling point for the above strategy is that it makes it possible
|
|
|
|
to compute hashes of decimal and binary floating-point numbers
|
|
|
|
efficiently, even if the exponent of the binary or decimal number
|
|
|
|
is large. The key point is that
|
|
|
|
|
|
|
|
reduce(x * y) == reduce(x) * reduce(y) (modulo _PyHASH_MODULUS)
|
|
|
|
|
|
|
|
provided that {reduce(x), reduce(y)} != {0, infinity}. The reduction of a
|
|
|
|
binary or decimal float is never infinity, since the denominator is a power
|
|
|
|
of 2 (for binary) or a divisor of a power of 10 (for decimal). So we have,
|
|
|
|
for nonnegative x,
|
|
|
|
|
|
|
|
reduce(x * 2**e) == reduce(x) * reduce(2**e) % _PyHASH_MODULUS
|
|
|
|
|
|
|
|
reduce(x * 10**e) == reduce(x) * reduce(10**e) % _PyHASH_MODULUS
|
|
|
|
|
|
|
|
and reduce(10**e) can be computed efficiently by the usual modular
|
|
|
|
exponentiation algorithm. For reduce(2**e) it's even better: since
|
|
|
|
P is of the form 2**n-1, reduce(2**e) is 2**(e mod n), and multiplication
|
|
|
|
by 2**(e mod n) modulo 2**n-1 just amounts to a rotation of bits.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
Py_hash_t
|
2021-04-22 12:34:57 -03:00
|
|
|
_Py_HashDouble(PyObject *inst, double v)
|
2013-11-20 06:46:18 -04:00
|
|
|
{
|
|
|
|
int e, sign;
|
|
|
|
double m;
|
|
|
|
Py_uhash_t x, y;
|
|
|
|
|
2024-05-29 04:51:19 -03:00
|
|
|
if (!isfinite(v)) {
|
|
|
|
if (isinf(v))
|
2013-11-20 06:46:18 -04:00
|
|
|
return v > 0 ? _PyHASH_INF : -_PyHASH_INF;
|
|
|
|
else
|
2024-03-22 15:19:10 -03:00
|
|
|
return PyObject_GenericHash(inst);
|
2013-11-20 06:46:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
m = frexp(v, &e);
|
|
|
|
|
|
|
|
sign = 1;
|
|
|
|
if (m < 0) {
|
|
|
|
sign = -1;
|
|
|
|
m = -m;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* process 28 bits at a time; this should work well both for binary
|
|
|
|
and hexadecimal floating point. */
|
|
|
|
x = 0;
|
|
|
|
while (m) {
|
|
|
|
x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28);
|
|
|
|
m *= 268435456.0; /* 2**28 */
|
|
|
|
e -= 28;
|
|
|
|
y = (Py_uhash_t)m; /* pull out integer part */
|
|
|
|
m -= y;
|
|
|
|
x += y;
|
|
|
|
if (x >= _PyHASH_MODULUS)
|
|
|
|
x -= _PyHASH_MODULUS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* adjust for the exponent; first reduce it modulo _PyHASH_BITS */
|
|
|
|
e = e >= 0 ? e % _PyHASH_BITS : _PyHASH_BITS-1-((-1-e) % _PyHASH_BITS);
|
|
|
|
x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e);
|
|
|
|
|
|
|
|
x = x * sign;
|
|
|
|
if (x == (Py_uhash_t)-1)
|
|
|
|
x = (Py_uhash_t)-2;
|
|
|
|
return (Py_hash_t)x;
|
|
|
|
}
|
|
|
|
|
|
|
|
Py_hash_t
|
2023-12-06 10:09:22 -04:00
|
|
|
Py_HashPointer(const void *ptr)
|
2020-05-12 13:46:20 -03:00
|
|
|
{
|
2023-12-06 10:09:22 -04:00
|
|
|
Py_hash_t hash = _Py_HashPointerRaw(ptr);
|
|
|
|
if (hash == -1) {
|
|
|
|
hash = -2;
|
2020-05-12 13:46:20 -03:00
|
|
|
}
|
2023-12-06 10:09:22 -04:00
|
|
|
return hash;
|
2013-11-20 06:46:18 -04:00
|
|
|
}
|
|
|
|
|
2024-03-22 15:19:10 -03:00
|
|
|
Py_hash_t
|
|
|
|
PyObject_GenericHash(PyObject *obj)
|
|
|
|
{
|
|
|
|
return Py_HashPointer(obj);
|
|
|
|
}
|
|
|
|
|
2013-11-20 06:46:18 -04:00
|
|
|
Py_hash_t
|
|
|
|
_Py_HashBytes(const void *src, Py_ssize_t len)
|
|
|
|
{
|
|
|
|
Py_hash_t x;
|
|
|
|
/*
|
|
|
|
We make the hash of the empty string be 0, rather than using
|
|
|
|
(prefix ^ suffix), since this slightly obfuscates the hash secret
|
|
|
|
*/
|
|
|
|
if (len == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef Py_HASH_STATS
|
|
|
|
hashstats[(len <= Py_HASH_STATS_MAX) ? len : 0]++;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if Py_HASH_CUTOFF > 0
|
|
|
|
if (len < Py_HASH_CUTOFF) {
|
|
|
|
/* Optimize hashing of very small strings with inline DJBX33A. */
|
|
|
|
Py_uhash_t hash;
|
|
|
|
const unsigned char *p = src;
|
|
|
|
hash = 5381; /* DJBX33A starts with 5381 */
|
|
|
|
|
|
|
|
switch(len) {
|
|
|
|
/* ((hash << 5) + hash) + *p == hash * 33 + *p */
|
|
|
|
case 7: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
|
|
|
|
case 6: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
|
|
|
|
case 5: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
|
|
|
|
case 4: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
|
|
|
|
case 3: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
|
|
|
|
case 2: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
|
|
|
|
case 1: hash = ((hash << 5) + hash) + *p++; break;
|
|
|
|
default:
|
2017-09-14 22:13:16 -03:00
|
|
|
Py_UNREACHABLE();
|
2013-11-20 06:46:18 -04:00
|
|
|
}
|
|
|
|
hash ^= len;
|
|
|
|
hash ^= (Py_uhash_t) _Py_HashSecret.djbx33a.suffix;
|
|
|
|
x = (Py_hash_t)hash;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif /* Py_HASH_CUTOFF */
|
|
|
|
x = PyHash_Func.hash(src, len);
|
|
|
|
|
|
|
|
if (x == -1)
|
|
|
|
return -2;
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_PyHash_Fini(void)
|
|
|
|
{
|
|
|
|
#ifdef Py_HASH_STATS
|
|
|
|
fprintf(stderr, "len calls total\n");
|
2020-06-10 13:38:05 -03:00
|
|
|
Py_ssize_t total = 0;
|
|
|
|
for (int i = 1; i <= Py_HASH_STATS_MAX; i++) {
|
2013-11-20 06:46:18 -04:00
|
|
|
total += hashstats[i];
|
2020-06-10 13:38:05 -03:00
|
|
|
fprintf(stderr, "%2i %8zd %8zd\n", i, hashstats[i], total);
|
2013-11-20 06:46:18 -04:00
|
|
|
}
|
|
|
|
total += hashstats[0];
|
2020-06-10 13:38:05 -03:00
|
|
|
fprintf(stderr, "> %8zd %8zd\n", hashstats[0], total);
|
2013-11-20 06:46:18 -04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
PyHash_FuncDef *
|
|
|
|
PyHash_GetFuncDef(void)
|
|
|
|
{
|
|
|
|
return &PyHash_Func;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Optimized memcpy() for Windows */
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
# if SIZEOF_PY_UHASH_T == 4
|
|
|
|
# define PY_UHASH_CPY(dst, src) do { \
|
|
|
|
dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; \
|
|
|
|
} while(0)
|
|
|
|
# elif SIZEOF_PY_UHASH_T == 8
|
|
|
|
# define PY_UHASH_CPY(dst, src) do { \
|
|
|
|
dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; \
|
|
|
|
dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; \
|
|
|
|
} while(0)
|
|
|
|
# else
|
|
|
|
# error SIZEOF_PY_UHASH_T must be 4 or 8
|
|
|
|
# endif /* SIZEOF_PY_UHASH_T */
|
|
|
|
#else /* not Windows */
|
|
|
|
# define PY_UHASH_CPY(dst, src) memcpy(dst, src, SIZEOF_PY_UHASH_T)
|
|
|
|
#endif /* _MSC_VER */
|
|
|
|
|
|
|
|
|
|
|
|
#if Py_HASH_ALGORITHM == Py_HASH_FNV
|
|
|
|
/* **************************************************************************
|
|
|
|
* Modified Fowler-Noll-Vo (FNV) hash function
|
|
|
|
*/
|
|
|
|
static Py_hash_t
|
|
|
|
fnv(const void *src, Py_ssize_t len)
|
|
|
|
{
|
|
|
|
const unsigned char *p = src;
|
|
|
|
Py_uhash_t x;
|
|
|
|
Py_ssize_t remainder, blocks;
|
|
|
|
union {
|
|
|
|
Py_uhash_t value;
|
|
|
|
unsigned char bytes[SIZEOF_PY_UHASH_T];
|
|
|
|
} block;
|
|
|
|
|
|
|
|
#ifdef Py_DEBUG
|
|
|
|
assert(_Py_HashSecret_Initialized);
|
|
|
|
#endif
|
|
|
|
remainder = len % SIZEOF_PY_UHASH_T;
|
|
|
|
if (remainder == 0) {
|
|
|
|
/* Process at least one block byte by byte to reduce hash collisions
|
|
|
|
* for strings with common prefixes. */
|
|
|
|
remainder = SIZEOF_PY_UHASH_T;
|
|
|
|
}
|
|
|
|
blocks = (len - remainder) / SIZEOF_PY_UHASH_T;
|
|
|
|
|
|
|
|
x = (Py_uhash_t) _Py_HashSecret.fnv.prefix;
|
|
|
|
x ^= (Py_uhash_t) *p << 7;
|
|
|
|
while (blocks--) {
|
|
|
|
PY_UHASH_CPY(block.bytes, p);
|
2024-05-21 14:51:51 -03:00
|
|
|
x = (PyHASH_MULTIPLIER * x) ^ block.value;
|
2013-11-20 06:46:18 -04:00
|
|
|
p += SIZEOF_PY_UHASH_T;
|
|
|
|
}
|
|
|
|
/* add remainder */
|
|
|
|
for (; remainder > 0; remainder--)
|
2024-05-21 14:51:51 -03:00
|
|
|
x = (PyHASH_MULTIPLIER * x) ^ (Py_uhash_t) *p++;
|
2013-11-20 06:46:18 -04:00
|
|
|
x ^= (Py_uhash_t) len;
|
|
|
|
x ^= (Py_uhash_t) _Py_HashSecret.fnv.suffix;
|
2018-06-04 07:57:08 -03:00
|
|
|
if (x == (Py_uhash_t) -1) {
|
|
|
|
x = (Py_uhash_t) -2;
|
2013-11-20 06:46:18 -04:00
|
|
|
}
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static PyHash_FuncDef PyHash_Func = {fnv, "fnv", 8 * SIZEOF_PY_HASH_T,
|
|
|
|
16 * SIZEOF_PY_HASH_T};
|
|
|
|
|
|
|
|
#endif /* Py_HASH_ALGORITHM == Py_HASH_FNV */
|
|
|
|
|
|
|
|
|
|
|
|
/* **************************************************************************
|
|
|
|
<MIT License>
|
|
|
|
Copyright (c) 2013 Marek Majkowski <marek@popcount.org>
|
|
|
|
|
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
of this software and associated documentation files (the "Software"), to deal
|
|
|
|
in the Software without restriction, including without limitation the rights
|
|
|
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
copies of the Software, and to permit persons to whom the Software is
|
|
|
|
furnished to do so, subject to the following conditions:
|
|
|
|
|
|
|
|
The above copyright notice and this permission notice shall be included in
|
|
|
|
all copies or substantial portions of the Software.
|
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
THE SOFTWARE.
|
|
|
|
</MIT License>
|
|
|
|
|
|
|
|
Original location:
|
|
|
|
https://github.com/majek/csiphash/
|
|
|
|
|
|
|
|
Solution inspired by code from:
|
|
|
|
Samuel Neves (supercop/crypto_auth/siphash24/little)
|
|
|
|
djb (supercop/crypto_auth/siphash24/little2)
|
|
|
|
Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)
|
|
|
|
|
|
|
|
Modified for Python by Christian Heimes:
|
|
|
|
- C89 / MSVC compatibility
|
|
|
|
- _rotl64() on Windows
|
|
|
|
- letoh64() fallback
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* byte swap little endian to host endian
|
|
|
|
* Endian conversion not only ensures that the hash function returns the same
|
|
|
|
* value on all platforms. It is also required to for a good dispersion of
|
|
|
|
* the hash values' least significant bits.
|
|
|
|
*/
|
|
|
|
#if PY_LITTLE_ENDIAN
|
2016-09-06 17:24:00 -03:00
|
|
|
# define _le64toh(x) ((uint64_t)(x))
|
2013-11-20 06:46:18 -04:00
|
|
|
#elif defined(__APPLE__)
|
|
|
|
# define _le64toh(x) OSSwapLittleToHostInt64(x)
|
|
|
|
#elif defined(HAVE_LETOH64)
|
|
|
|
# define _le64toh(x) le64toh(x)
|
|
|
|
#else
|
2016-09-06 17:24:00 -03:00
|
|
|
# define _le64toh(x) (((uint64_t)(x) << 56) | \
|
|
|
|
(((uint64_t)(x) << 40) & 0xff000000000000ULL) | \
|
|
|
|
(((uint64_t)(x) << 24) & 0xff0000000000ULL) | \
|
|
|
|
(((uint64_t)(x) << 8) & 0xff00000000ULL) | \
|
|
|
|
(((uint64_t)(x) >> 8) & 0xff000000ULL) | \
|
|
|
|
(((uint64_t)(x) >> 24) & 0xff0000ULL) | \
|
|
|
|
(((uint64_t)(x) >> 40) & 0xff00ULL) | \
|
|
|
|
((uint64_t)(x) >> 56))
|
2013-11-20 06:46:18 -04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
# define ROTATE(x, b) _rotl64(x, b)
|
|
|
|
#else
|
2016-09-06 17:24:00 -03:00
|
|
|
# define ROTATE(x, b) (uint64_t)( ((x) << (b)) | ( (x) >> (64 - (b))) )
|
2013-11-20 06:46:18 -04:00
|
|
|
#endif
|
|
|
|
|
2021-10-10 05:29:46 -03:00
|
|
|
#define HALF_ROUND(a,b,c,d,s,t) \
|
|
|
|
a += b; c += d; \
|
2013-11-20 06:46:18 -04:00
|
|
|
b = ROTATE(b, s) ^ a; \
|
|
|
|
d = ROTATE(d, t) ^ c; \
|
|
|
|
a = ROTATE(a, 32);
|
|
|
|
|
2021-10-10 05:29:46 -03:00
|
|
|
#define SINGLE_ROUND(v0,v1,v2,v3) \
|
|
|
|
HALF_ROUND(v0,v1,v2,v3,13,16); \
|
2013-11-20 06:46:18 -04:00
|
|
|
HALF_ROUND(v2,v1,v0,v3,17,21);
|
|
|
|
|
2021-10-10 05:29:46 -03:00
|
|
|
#define DOUBLE_ROUND(v0,v1,v2,v3) \
|
|
|
|
SINGLE_ROUND(v0,v1,v2,v3); \
|
|
|
|
SINGLE_ROUND(v0,v1,v2,v3);
|
|
|
|
|
2013-11-20 06:46:18 -04:00
|
|
|
|
2021-10-10 05:29:46 -03:00
|
|
|
static uint64_t
|
|
|
|
siphash13(uint64_t k0, uint64_t k1, const void *src, Py_ssize_t src_sz) {
|
|
|
|
uint64_t b = (uint64_t)src_sz << 56;
|
|
|
|
const uint8_t *in = (const uint8_t*)src;
|
|
|
|
|
|
|
|
uint64_t v0 = k0 ^ 0x736f6d6570736575ULL;
|
|
|
|
uint64_t v1 = k1 ^ 0x646f72616e646f6dULL;
|
|
|
|
uint64_t v2 = k0 ^ 0x6c7967656e657261ULL;
|
|
|
|
uint64_t v3 = k1 ^ 0x7465646279746573ULL;
|
|
|
|
|
|
|
|
uint64_t t;
|
|
|
|
uint8_t *pt;
|
|
|
|
|
|
|
|
while (src_sz >= 8) {
|
|
|
|
uint64_t mi;
|
|
|
|
memcpy(&mi, in, sizeof(mi));
|
|
|
|
mi = _le64toh(mi);
|
|
|
|
in += sizeof(mi);
|
|
|
|
src_sz -= sizeof(mi);
|
|
|
|
v3 ^= mi;
|
|
|
|
SINGLE_ROUND(v0,v1,v2,v3);
|
|
|
|
v0 ^= mi;
|
|
|
|
}
|
|
|
|
|
|
|
|
t = 0;
|
|
|
|
pt = (uint8_t *)&t;
|
|
|
|
switch (src_sz) {
|
|
|
|
case 7: pt[6] = in[6]; /* fall through */
|
|
|
|
case 6: pt[5] = in[5]; /* fall through */
|
|
|
|
case 5: pt[4] = in[4]; /* fall through */
|
|
|
|
case 4: memcpy(pt, in, sizeof(uint32_t)); break;
|
|
|
|
case 3: pt[2] = in[2]; /* fall through */
|
|
|
|
case 2: pt[1] = in[1]; /* fall through */
|
|
|
|
case 1: pt[0] = in[0]; /* fall through */
|
|
|
|
}
|
|
|
|
b |= _le64toh(t);
|
|
|
|
|
|
|
|
v3 ^= b;
|
|
|
|
SINGLE_ROUND(v0,v1,v2,v3);
|
|
|
|
v0 ^= b;
|
|
|
|
v2 ^= 0xff;
|
|
|
|
SINGLE_ROUND(v0,v1,v2,v3);
|
|
|
|
SINGLE_ROUND(v0,v1,v2,v3);
|
|
|
|
SINGLE_ROUND(v0,v1,v2,v3);
|
|
|
|
|
|
|
|
/* modified */
|
|
|
|
t = (v0 ^ v1) ^ (v2 ^ v3);
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if Py_HASH_ALGORITHM == Py_HASH_SIPHASH24
|
2017-12-09 14:26:52 -04:00
|
|
|
static uint64_t
|
2017-12-09 15:24:18 -04:00
|
|
|
siphash24(uint64_t k0, uint64_t k1, const void *src, Py_ssize_t src_sz) {
|
2016-09-06 17:24:00 -03:00
|
|
|
uint64_t b = (uint64_t)src_sz << 56;
|
closes bpo-39605: Fix some casts to not cast away const. (GH-18453)
gcc -Wcast-qual turns up a number of instances of casting away constness of pointers. Some of these can be safely modified, by either:
Adding the const to the type cast, as in:
- return _PyUnicode_FromUCS1((unsigned char*)s, size);
+ return _PyUnicode_FromUCS1((const unsigned char*)s, size);
or, Removing the cast entirely, because it's not necessary (but probably was at one time), as in:
- PyDTrace_FUNCTION_ENTRY((char *)filename, (char *)funcname, lineno);
+ PyDTrace_FUNCTION_ENTRY(filename, funcname, lineno);
These changes will not change code, but they will make it much easier to check for errors in consts
2020-02-11 22:28:35 -04:00
|
|
|
const uint8_t *in = (const uint8_t*)src;
|
2013-11-20 06:46:18 -04:00
|
|
|
|
2016-09-06 17:24:00 -03:00
|
|
|
uint64_t v0 = k0 ^ 0x736f6d6570736575ULL;
|
|
|
|
uint64_t v1 = k1 ^ 0x646f72616e646f6dULL;
|
|
|
|
uint64_t v2 = k0 ^ 0x6c7967656e657261ULL;
|
|
|
|
uint64_t v3 = k1 ^ 0x7465646279746573ULL;
|
2013-11-20 06:46:18 -04:00
|
|
|
|
2016-09-06 17:24:00 -03:00
|
|
|
uint64_t t;
|
|
|
|
uint8_t *pt;
|
2013-11-20 06:46:18 -04:00
|
|
|
|
|
|
|
while (src_sz >= 8) {
|
2018-05-13 07:57:31 -03:00
|
|
|
uint64_t mi;
|
|
|
|
memcpy(&mi, in, sizeof(mi));
|
|
|
|
mi = _le64toh(mi);
|
|
|
|
in += sizeof(mi);
|
|
|
|
src_sz -= sizeof(mi);
|
2013-11-20 06:46:18 -04:00
|
|
|
v3 ^= mi;
|
|
|
|
DOUBLE_ROUND(v0,v1,v2,v3);
|
|
|
|
v0 ^= mi;
|
|
|
|
}
|
|
|
|
|
|
|
|
t = 0;
|
2016-09-06 17:24:00 -03:00
|
|
|
pt = (uint8_t *)&t;
|
2013-11-20 06:46:18 -04:00
|
|
|
switch (src_sz) {
|
2018-05-13 07:57:31 -03:00
|
|
|
case 7: pt[6] = in[6]; /* fall through */
|
|
|
|
case 6: pt[5] = in[5]; /* fall through */
|
|
|
|
case 5: pt[4] = in[4]; /* fall through */
|
|
|
|
case 4: memcpy(pt, in, sizeof(uint32_t)); break;
|
|
|
|
case 3: pt[2] = in[2]; /* fall through */
|
|
|
|
case 2: pt[1] = in[1]; /* fall through */
|
|
|
|
case 1: pt[0] = in[0]; /* fall through */
|
2013-11-20 06:46:18 -04:00
|
|
|
}
|
|
|
|
b |= _le64toh(t);
|
|
|
|
|
|
|
|
v3 ^= b;
|
|
|
|
DOUBLE_ROUND(v0,v1,v2,v3);
|
|
|
|
v0 ^= b;
|
|
|
|
v2 ^= 0xff;
|
|
|
|
DOUBLE_ROUND(v0,v1,v2,v3);
|
|
|
|
DOUBLE_ROUND(v0,v1,v2,v3);
|
|
|
|
|
|
|
|
/* modified */
|
|
|
|
t = (v0 ^ v1) ^ (v2 ^ v3);
|
2017-12-09 14:26:52 -04:00
|
|
|
return t;
|
|
|
|
}
|
2021-10-10 05:29:46 -03:00
|
|
|
#endif
|
2017-12-09 14:26:52 -04:00
|
|
|
|
|
|
|
uint64_t
|
|
|
|
_Py_KeyedHash(uint64_t key, const void *src, Py_ssize_t src_sz)
|
|
|
|
{
|
2021-10-10 05:29:46 -03:00
|
|
|
return siphash13(key, 0, src, src_sz);
|
2013-11-20 06:46:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-10-10 05:29:46 -03:00
|
|
|
#if Py_HASH_ALGORITHM == Py_HASH_SIPHASH13
|
|
|
|
static Py_hash_t
|
|
|
|
pysiphash(const void *src, Py_ssize_t src_sz) {
|
|
|
|
return (Py_hash_t)siphash13(
|
|
|
|
_le64toh(_Py_HashSecret.siphash.k0), _le64toh(_Py_HashSecret.siphash.k1),
|
|
|
|
src, src_sz);
|
|
|
|
}
|
|
|
|
|
|
|
|
static PyHash_FuncDef PyHash_Func = {pysiphash, "siphash13", 64, 128};
|
|
|
|
#endif
|
|
|
|
|
2017-12-09 14:26:52 -04:00
|
|
|
#if Py_HASH_ALGORITHM == Py_HASH_SIPHASH24
|
2020-04-04 18:25:12 -03:00
|
|
|
static Py_hash_t
|
|
|
|
pysiphash(const void *src, Py_ssize_t src_sz) {
|
|
|
|
return (Py_hash_t)siphash24(
|
|
|
|
_le64toh(_Py_HashSecret.siphash.k0), _le64toh(_Py_HashSecret.siphash.k1),
|
|
|
|
src, src_sz);
|
|
|
|
}
|
|
|
|
|
2017-12-09 14:26:52 -04:00
|
|
|
static PyHash_FuncDef PyHash_Func = {pysiphash, "siphash24", 64, 128};
|
|
|
|
#endif
|