diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h index 251ee06ed4b..fafc72eb97a 100644 --- a/Include/internal/pycore_interp.h +++ b/Include/internal/pycore_interp.h @@ -42,6 +42,8 @@ struct _ceval_state { /* This single variable consolidates all requests to break out of the fast path in the eval loop. */ _Py_atomic_int eval_breaker; + /* Request for dropping the GIL */ + _Py_atomic_int gil_drop_request; struct _pending_calls pending; /* Request for checking signals. */ _Py_atomic_int signals_pending; diff --git a/Include/internal/pycore_runtime.h b/Include/internal/pycore_runtime.h index d432c6cc511..c5973355916 100644 --- a/Include/internal/pycore_runtime.h +++ b/Include/internal/pycore_runtime.h @@ -15,8 +15,6 @@ extern "C" { struct _ceval_runtime_state { int recursion_limit; - /* Request for dropping the GIL */ - _Py_atomic_int gil_drop_request; struct _gil_runtime_state gil; }; diff --git a/Python/ceval.c b/Python/ceval.c index addc0264b17..6b002730c8d 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -143,77 +143,70 @@ is_tstate_valid(PyThreadState *tstate) the GIL eventually anyway. */ static inline void COMPUTE_EVAL_BREAKER(PyInterpreterState *interp, - struct _ceval_runtime_state *ceval, - struct _ceval_state *ceval2) + struct _ceval_state *ceval) { - _Py_atomic_store_relaxed(&ceval2->eval_breaker, + _Py_atomic_store_relaxed(&ceval->eval_breaker, _Py_atomic_load_relaxed(&ceval->gil_drop_request) - | (_Py_atomic_load_relaxed(&ceval2->signals_pending) + | (_Py_atomic_load_relaxed(&ceval->signals_pending) && _Py_ThreadCanHandleSignals(interp)) - | (_Py_atomic_load_relaxed(&ceval2->pending.calls_to_do) + | (_Py_atomic_load_relaxed(&ceval->pending.calls_to_do) && _Py_ThreadCanHandlePendingCalls()) - | ceval2->pending.async_exc); + | ceval->pending.async_exc); } static inline void SET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; + struct _ceval_state *ceval = &interp->ceval; _Py_atomic_store_relaxed(&ceval->gil_drop_request, 1); - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); + _Py_atomic_store_relaxed(&ceval->eval_breaker, 1); } static inline void RESET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; + struct _ceval_state *ceval = &interp->ceval; _Py_atomic_store_relaxed(&ceval->gil_drop_request, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + COMPUTE_EVAL_BREAKER(interp, ceval); } static inline void SIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 1); + COMPUTE_EVAL_BREAKER(interp, ceval); } static inline void UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 0); + COMPUTE_EVAL_BREAKER(interp, ceval); } static inline void SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->signals_pending, 1); + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->signals_pending, 1); /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */ - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + COMPUTE_EVAL_BREAKER(interp, ceval); } static inline void UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->signals_pending, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->signals_pending, 0); + COMPUTE_EVAL_BREAKER(interp, ceval); } @@ -229,10 +222,9 @@ SIGNAL_ASYNC_EXC(PyInterpreterState *interp) static inline void UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - ceval2->pending.async_exc = 0; - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_state *ceval = &interp->ceval; + ceval->pending.async_exc = 0; + COMPUTE_EVAL_BREAKER(interp, ceval); } @@ -357,17 +349,19 @@ PyEval_ReleaseLock(void) { _PyRuntimeState *runtime = &_PyRuntime; PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); + struct _ceval_state *ceval2 = &tstate->interp->ceval; /* This function must succeed when the current thread state is NULL. We therefore avoid PyThreadState_Get() which dumps a fatal error in debug mode. */ - drop_gil(&runtime->ceval, tstate); + drop_gil(&runtime->ceval, ceval2, tstate); } void _PyEval_ReleaseLock(PyThreadState *tstate) { struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval; - drop_gil(ceval, tstate); + struct _ceval_state *ceval2 = &tstate->interp->ceval; + drop_gil(ceval, ceval2, tstate); } void @@ -393,7 +387,9 @@ PyEval_ReleaseThread(PyThreadState *tstate) if (new_tstate != tstate) { Py_FatalError("wrong thread state"); } - drop_gil(&runtime->ceval, tstate); + struct _ceval_runtime_state *ceval = &runtime->ceval; + struct _ceval_state *ceval2 = &tstate->interp->ceval; + drop_gil(ceval, ceval2, tstate); } #ifdef HAVE_FORK @@ -439,13 +435,14 @@ PyThreadState * PyEval_SaveThread(void) { _PyRuntimeState *runtime = &_PyRuntime; - struct _ceval_runtime_state *ceval = &runtime->ceval; PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); ensure_tstate_not_null(__func__, tstate); + struct _ceval_runtime_state *ceval = &runtime->ceval; + struct _ceval_state *ceval2 = &tstate->interp->ceval; assert(gil_created(&ceval->gil)); - drop_gil(ceval, tstate); + drop_gil(ceval, ceval2, tstate); return tstate; } @@ -847,12 +844,12 @@ eval_frame_handle_pending(PyThreadState *tstate) } /* GIL drop request */ - if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) { + if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) { /* Give another thread a chance */ if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) { Py_FatalError("tstate mix-up"); } - drop_gil(ceval, tstate); + drop_gil(ceval, ceval2, tstate); /* Other threads may run now */ diff --git a/Python/ceval_gil.h b/Python/ceval_gil.h index a025a9fad12..db47077d5c1 100644 --- a/Python/ceval_gil.h +++ b/Python/ceval_gil.h @@ -141,7 +141,8 @@ static void recreate_gil(struct _gil_runtime_state *gil) } static void -drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate) +drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2, + PyThreadState *tstate) { struct _gil_runtime_state *gil = &ceval->gil; if (!_Py_atomic_load_relaxed(&gil->locked)) { @@ -163,7 +164,7 @@ drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate) MUTEX_UNLOCK(gil->mutex); #ifdef FORCE_SWITCHING - if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) { + if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) { MUTEX_LOCK(gil->switch_mutex); /* Not switched yet => wait */ if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate) @@ -226,6 +227,7 @@ take_gil(PyThreadState *tstate) assert(is_tstate_valid(tstate)); PyInterpreterState *interp = tstate->interp; struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; struct _gil_runtime_state *gil = &ceval->gil; /* Check that _PyEval_InitThreads() was called to create the lock */ @@ -289,12 +291,12 @@ _ready: in take_gil() while the main thread called wait_for_thread_shutdown() from Py_Finalize(). */ MUTEX_UNLOCK(gil->mutex); - drop_gil(ceval, tstate); + drop_gil(ceval, ceval2, tstate); PyThread_exit_thread(); } assert(is_tstate_valid(tstate)); - if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) { + if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) { RESET_GIL_DROP_REQUEST(interp); } else { @@ -303,8 +305,7 @@ _ready: handle signals. Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */ - struct _ceval_state *ceval2 = &interp->ceval; - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + COMPUTE_EVAL_BREAKER(interp, ceval2); } /* Don't access tstate if the thread must exit */