mirror of https://github.com/python/cpython
gh-121368: Fix seq lock memory ordering in _PyType_Lookup (#121388)
The `_PySeqLock_EndRead` function needs an acquire fence to ensure that the load of the sequence happens after any loads within the read side critical section. The missing fence can trigger bugs on macOS arm64. Additionally, we need a release fence in `_PySeqLock_LockWrite` to ensure that the sequence update is visible before any modifications to the cache entry.
This commit is contained in:
parent
31873bea47
commit
1d3cf79a50
|
@ -510,6 +510,9 @@ _Py_atomic_load_ssize_acquire(const Py_ssize_t *obj);
|
||||||
// See https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence
|
// See https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence
|
||||||
static inline void _Py_atomic_fence_seq_cst(void);
|
static inline void _Py_atomic_fence_seq_cst(void);
|
||||||
|
|
||||||
|
// Acquire fence
|
||||||
|
static inline void _Py_atomic_fence_acquire(void);
|
||||||
|
|
||||||
// Release fence
|
// Release fence
|
||||||
static inline void _Py_atomic_fence_release(void);
|
static inline void _Py_atomic_fence_release(void);
|
||||||
|
|
||||||
|
|
|
@ -542,6 +542,10 @@ static inline void
|
||||||
_Py_atomic_fence_seq_cst(void)
|
_Py_atomic_fence_seq_cst(void)
|
||||||
{ __atomic_thread_fence(__ATOMIC_SEQ_CST); }
|
{ __atomic_thread_fence(__ATOMIC_SEQ_CST); }
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
_Py_atomic_fence_acquire(void)
|
||||||
|
{ __atomic_thread_fence(__ATOMIC_ACQUIRE); }
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
_Py_atomic_fence_release(void)
|
_Py_atomic_fence_release(void)
|
||||||
{ __atomic_thread_fence(__ATOMIC_RELEASE); }
|
{ __atomic_thread_fence(__ATOMIC_RELEASE); }
|
||||||
|
|
|
@ -1066,6 +1066,18 @@ _Py_atomic_fence_seq_cst(void)
|
||||||
#else
|
#else
|
||||||
# error "no implementation of _Py_atomic_fence_seq_cst"
|
# error "no implementation of _Py_atomic_fence_seq_cst"
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
_Py_atomic_fence_acquire(void)
|
||||||
|
{
|
||||||
|
#if defined(_M_ARM64)
|
||||||
|
__dmb(_ARM64_BARRIER_ISHLD);
|
||||||
|
#elif defined(_M_X64) || defined(_M_IX86)
|
||||||
|
_ReadBarrier();
|
||||||
|
#else
|
||||||
|
# error "no implementation of _Py_atomic_fence_acquire"
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
|
|
@ -961,6 +961,13 @@ _Py_atomic_fence_seq_cst(void)
|
||||||
atomic_thread_fence(memory_order_seq_cst);
|
atomic_thread_fence(memory_order_seq_cst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
_Py_atomic_fence_acquire(void)
|
||||||
|
{
|
||||||
|
_Py_USING_STD;
|
||||||
|
atomic_thread_fence(memory_order_acquire);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
_Py_atomic_fence_release(void)
|
_Py_atomic_fence_release(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -228,12 +228,12 @@ PyAPI_FUNC(void) _PySeqLock_AbandonWrite(_PySeqLock *seqlock);
|
||||||
PyAPI_FUNC(uint32_t) _PySeqLock_BeginRead(_PySeqLock *seqlock);
|
PyAPI_FUNC(uint32_t) _PySeqLock_BeginRead(_PySeqLock *seqlock);
|
||||||
|
|
||||||
// End the read operation and confirm that the sequence number has not changed.
|
// End the read operation and confirm that the sequence number has not changed.
|
||||||
// Returns 1 if the read was successful or 0 if the read should be re-tried.
|
// Returns 1 if the read was successful or 0 if the read should be retried.
|
||||||
PyAPI_FUNC(uint32_t) _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous);
|
PyAPI_FUNC(int) _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous);
|
||||||
|
|
||||||
// Check if the lock was held during a fork and clear the lock. Returns 1
|
// Check if the lock was held during a fork and clear the lock. Returns 1
|
||||||
// if the lock was held and any associated datat should be cleared.
|
// if the lock was held and any associated data should be cleared.
|
||||||
PyAPI_FUNC(uint32_t) _PySeqLock_AfterFork(_PySeqLock *seqlock);
|
PyAPI_FUNC(int) _PySeqLock_AfterFork(_PySeqLock *seqlock);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
Fix race condition in ``_PyType_Lookup`` in the free-threaded build due to
|
||||||
|
a missing memory fence. This could lead to ``_PyType_Lookup`` returning
|
||||||
|
incorrect results on arm64.
|
|
@ -125,6 +125,7 @@ test_atomic_fences(PyObject *self, PyObject *obj) {
|
||||||
// Just make sure that the fences compile. We are not
|
// Just make sure that the fences compile. We are not
|
||||||
// testing any synchronizing ordering.
|
// testing any synchronizing ordering.
|
||||||
_Py_atomic_fence_seq_cst();
|
_Py_atomic_fence_seq_cst();
|
||||||
|
_Py_atomic_fence_acquire();
|
||||||
_Py_atomic_fence_release();
|
_Py_atomic_fence_release();
|
||||||
Py_RETURN_NONE;
|
Py_RETURN_NONE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5387,7 +5387,7 @@ _PyType_LookupRef(PyTypeObject *type, PyObject *name)
|
||||||
#ifdef Py_GIL_DISABLED
|
#ifdef Py_GIL_DISABLED
|
||||||
// synchronize-with other writing threads by doing an acquire load on the sequence
|
// synchronize-with other writing threads by doing an acquire load on the sequence
|
||||||
while (1) {
|
while (1) {
|
||||||
int sequence = _PySeqLock_BeginRead(&entry->sequence);
|
uint32_t sequence = _PySeqLock_BeginRead(&entry->sequence);
|
||||||
uint32_t entry_version = _Py_atomic_load_uint32_relaxed(&entry->version);
|
uint32_t entry_version = _Py_atomic_load_uint32_relaxed(&entry->version);
|
||||||
uint32_t type_version = _Py_atomic_load_uint32_acquire(&type->tp_version_tag);
|
uint32_t type_version = _Py_atomic_load_uint32_acquire(&type->tp_version_tag);
|
||||||
if (entry_version == type_version &&
|
if (entry_version == type_version &&
|
||||||
|
|
|
@ -514,6 +514,7 @@ void _PySeqLock_LockWrite(_PySeqLock *seqlock)
|
||||||
}
|
}
|
||||||
else if (_Py_atomic_compare_exchange_uint32(&seqlock->sequence, &prev, prev + 1)) {
|
else if (_Py_atomic_compare_exchange_uint32(&seqlock->sequence, &prev, prev + 1)) {
|
||||||
// We've locked the cache
|
// We've locked the cache
|
||||||
|
_Py_atomic_fence_release();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -547,28 +548,31 @@ uint32_t _PySeqLock_BeginRead(_PySeqLock *seqlock)
|
||||||
return sequence;
|
return sequence;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous)
|
int _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous)
|
||||||
{
|
{
|
||||||
// Synchronize again and validate that the entry hasn't been updated
|
// gh-121368: We need an explicit acquire fence here to ensure that
|
||||||
// while we were readying the values.
|
// this load of the sequence number is not reordered before any loads
|
||||||
if (_Py_atomic_load_uint32_acquire(&seqlock->sequence) == previous) {
|
// within the read lock.
|
||||||
return 1;
|
_Py_atomic_fence_acquire();
|
||||||
}
|
|
||||||
|
|
||||||
_Py_yield();
|
if (_Py_atomic_load_uint32_relaxed(&seqlock->sequence) == previous) {
|
||||||
return 0;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
_Py_yield();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t _PySeqLock_AfterFork(_PySeqLock *seqlock)
|
int _PySeqLock_AfterFork(_PySeqLock *seqlock)
|
||||||
{
|
{
|
||||||
// Synchronize again and validate that the entry hasn't been updated
|
// Synchronize again and validate that the entry hasn't been updated
|
||||||
// while we were readying the values.
|
// while we were readying the values.
|
||||||
if (SEQLOCK_IS_UPDATING(seqlock->sequence)) {
|
if (SEQLOCK_IS_UPDATING(seqlock->sequence)) {
|
||||||
seqlock->sequence = 0;
|
seqlock->sequence = 0;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef PyMutex_Lock
|
#undef PyMutex_Lock
|
||||||
|
|
Loading…
Reference in New Issue