gh-109693: Update _gil_runtime_state.last_holder to use pyatomic.h (#110605)

This commit is contained in:
Donghee Na 2023-10-13 10:07:27 +09:00 committed by GitHub
parent 4a53a397c3
commit 2566434e59
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 9 deletions

View File

@ -8,7 +8,7 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define" # error "this header requires Py_BUILD_CORE define"
#endif #endif
#include "pycore_atomic.h" // _Py_atomic_address #include "pycore_atomic.h" // _Py_atomic_int
#include "pycore_condvar.h" // PyCOND_T #include "pycore_condvar.h" // PyCOND_T
#ifndef Py_HAVE_CONDVAR #ifndef Py_HAVE_CONDVAR
@ -25,7 +25,7 @@ struct _gil_runtime_state {
unsigned long interval; unsigned long interval;
/* Last PyThreadState holding / having held the GIL. This helps us /* Last PyThreadState holding / having held the GIL. This helps us
know whether anyone else was scheduled after we dropped the GIL. */ know whether anyone else was scheduled after we dropped the GIL. */
_Py_atomic_address last_holder; PyThreadState* last_holder;
/* Whether the GIL is already taken (-1 if uninitialized). This is /* Whether the GIL is already taken (-1 if uninitialized). This is
atomic because it can be read without any lock taken in ceval.c. */ atomic because it can be read without any lock taken in ceval.c. */
_Py_atomic_int locked; _Py_atomic_int locked;

View File

@ -189,7 +189,7 @@ static void create_gil(struct _gil_runtime_state *gil)
#ifdef FORCE_SWITCHING #ifdef FORCE_SWITCHING
COND_INIT(gil->switch_cond); COND_INIT(gil->switch_cond);
#endif #endif
_Py_atomic_store_relaxed(&gil->last_holder, 0); _Py_atomic_store_ptr_relaxed(&gil->last_holder, 0);
_Py_ANNOTATE_RWLOCK_CREATE(&gil->locked); _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
_Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release); _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
} }
@ -233,7 +233,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
// XXX assert(tstate == NULL || !tstate->_status.cleared); // XXX assert(tstate == NULL || !tstate->_status.cleared);
struct _gil_runtime_state *gil = ceval->gil; struct _gil_runtime_state *gil = ceval->gil;
if (!_Py_atomic_load_relaxed(&gil->locked)) { if (!_Py_atomic_load_ptr_relaxed(&gil->locked)) {
Py_FatalError("drop_gil: GIL is not locked"); Py_FatalError("drop_gil: GIL is not locked");
} }
@ -242,7 +242,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
/* Sub-interpreter support: threads might have been switched /* Sub-interpreter support: threads might have been switched
under our feet using PyThreadState_Swap(). Fix the GIL last under our feet using PyThreadState_Swap(). Fix the GIL last
holder variable so that our heuristics work. */ holder variable so that our heuristics work. */
_Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate); _Py_atomic_store_ptr_relaxed(&gil->last_holder, tstate);
} }
MUTEX_LOCK(gil->mutex); MUTEX_LOCK(gil->mutex);
@ -263,7 +263,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
if (tstate != NULL && _Py_eval_breaker_bit_is_set(interp, _PY_GIL_DROP_REQUEST_BIT)) { if (tstate != NULL && _Py_eval_breaker_bit_is_set(interp, _PY_GIL_DROP_REQUEST_BIT)) {
MUTEX_LOCK(gil->switch_mutex); MUTEX_LOCK(gil->switch_mutex);
/* Not switched yet => wait */ /* Not switched yet => wait */
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate) if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) == tstate)
{ {
assert(_PyThreadState_CheckConsistency(tstate)); assert(_PyThreadState_CheckConsistency(tstate));
RESET_GIL_DROP_REQUEST(tstate->interp); RESET_GIL_DROP_REQUEST(tstate->interp);
@ -361,8 +361,8 @@ _ready:
_Py_atomic_store_relaxed(&gil->locked, 1); _Py_atomic_store_relaxed(&gil->locked, 1);
_Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1); _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) { if (tstate != (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) {
_Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate); _Py_atomic_store_ptr_relaxed(&gil->last_holder, tstate);
++gil->switch_number; ++gil->switch_number;
} }
@ -434,7 +434,7 @@ PyEval_ThreadsInitialized(void)
static inline int static inline int
current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate) current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
{ {
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) != tstate) { if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) != tstate) {
return 0; return 0;
} }
return _Py_atomic_load_relaxed(&gil->locked); return _Py_atomic_load_relaxed(&gil->locked);