Revert "bpo-40513: Per-interpreter signals pending (GH-19924)" (GH-19932)

This reverts commit 4e01946caf.
This commit is contained in:
Victor Stinner 2020-05-05 17:40:18 +02:00 committed by GitHub
parent 6351d9e440
commit 299b8c61e9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 42 additions and 32 deletions

View File

@ -46,8 +46,6 @@ struct _ceval_state {
/* Request for dropping the GIL */ /* Request for dropping the GIL */
_Py_atomic_int gil_drop_request; _Py_atomic_int gil_drop_request;
struct _pending_calls pending; struct _pending_calls pending;
/* Request for checking signals. */
_Py_atomic_int signals_pending;
}; };

View File

@ -14,6 +14,11 @@ extern "C" {
/* ceval state */ /* ceval state */
struct _ceval_runtime_state { struct _ceval_runtime_state {
/* Request for checking signals. It is shared by all interpreters (see
bpo-40513). Any thread of any interpreter can receive a signal, but only
the main thread of the main interpreter can handle signals: see
_Py_ThreadCanHandleSignals(). */
_Py_atomic_int signals_pending;
struct _gil_runtime_state gil; struct _gil_runtime_state gil;
}; };

View File

@ -143,70 +143,76 @@ is_tstate_valid(PyThreadState *tstate)
the GIL eventually anyway. */ the GIL eventually anyway. */
static inline void static inline void
COMPUTE_EVAL_BREAKER(PyInterpreterState *interp, COMPUTE_EVAL_BREAKER(PyInterpreterState *interp,
struct _ceval_state *ceval) struct _ceval_runtime_state *ceval,
struct _ceval_state *ceval2)
{ {
_Py_atomic_store_relaxed(&ceval->eval_breaker, _Py_atomic_store_relaxed(&ceval2->eval_breaker,
_Py_atomic_load_relaxed(&ceval->gil_drop_request) _Py_atomic_load_relaxed(&ceval2->gil_drop_request)
| (_Py_atomic_load_relaxed(&ceval->signals_pending) | (_Py_atomic_load_relaxed(&ceval->signals_pending)
&& _Py_ThreadCanHandleSignals(interp)) && _Py_ThreadCanHandleSignals(interp))
| (_Py_atomic_load_relaxed(&ceval->pending.calls_to_do) | (_Py_atomic_load_relaxed(&ceval2->pending.calls_to_do)
&& _Py_ThreadCanHandlePendingCalls()) && _Py_ThreadCanHandlePendingCalls())
| ceval->pending.async_exc); | ceval2->pending.async_exc);
} }
static inline void static inline void
SET_GIL_DROP_REQUEST(PyInterpreterState *interp) SET_GIL_DROP_REQUEST(PyInterpreterState *interp)
{ {
struct _ceval_state *ceval = &interp->ceval; struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->gil_drop_request, 1); _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1);
_Py_atomic_store_relaxed(&ceval->eval_breaker, 1); _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
} }
static inline void static inline void
RESET_GIL_DROP_REQUEST(PyInterpreterState *interp) RESET_GIL_DROP_REQUEST(PyInterpreterState *interp)
{ {
struct _ceval_state *ceval = &interp->ceval; struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
_Py_atomic_store_relaxed(&ceval->gil_drop_request, 0); struct _ceval_state *ceval2 = &interp->ceval;
COMPUTE_EVAL_BREAKER(interp, ceval); _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
static inline void static inline void
SIGNAL_PENDING_CALLS(PyInterpreterState *interp) SIGNAL_PENDING_CALLS(PyInterpreterState *interp)
{ {
struct _ceval_state *ceval = &interp->ceval; struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
_Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 1); struct _ceval_state *ceval2 = &interp->ceval;
COMPUTE_EVAL_BREAKER(interp, ceval); _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
static inline void static inline void
UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp) UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
{ {
struct _ceval_state *ceval = &interp->ceval; struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
_Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 0); struct _ceval_state *ceval2 = &interp->ceval;
COMPUTE_EVAL_BREAKER(interp, ceval); _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
static inline void static inline void
SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
{ {
struct _ceval_state *ceval = &interp->ceval; struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->signals_pending, 1); _Py_atomic_store_relaxed(&ceval->signals_pending, 1);
/* eval_breaker is not set to 1 if thread_can_handle_signals() is false */ /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */
COMPUTE_EVAL_BREAKER(interp, ceval); COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
static inline void static inline void
UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
{ {
struct _ceval_state *ceval = &interp->ceval; struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->signals_pending, 0); _Py_atomic_store_relaxed(&ceval->signals_pending, 0);
COMPUTE_EVAL_BREAKER(interp, ceval); COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
@ -222,9 +228,10 @@ SIGNAL_ASYNC_EXC(PyInterpreterState *interp)
static inline void static inline void
UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp) UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp)
{ {
struct _ceval_state *ceval = &interp->ceval; struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
ceval->pending.async_exc = 0; struct _ceval_state *ceval2 = &interp->ceval;
COMPUTE_EVAL_BREAKER(interp, ceval); ceval2->pending.async_exc = 0;
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
@ -349,11 +356,12 @@ PyEval_ReleaseLock(void)
{ {
_PyRuntimeState *runtime = &_PyRuntime; _PyRuntimeState *runtime = &_PyRuntime;
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
struct _ceval_state *ceval2 = &tstate->interp->ceval;
/* This function must succeed when the current thread state is NULL. /* This function must succeed when the current thread state is NULL.
We therefore avoid PyThreadState_Get() which dumps a fatal error We therefore avoid PyThreadState_Get() which dumps a fatal error
in debug mode. */ in debug mode. */
drop_gil(&runtime->ceval, ceval2, tstate); struct _ceval_runtime_state *ceval = &runtime->ceval;
struct _ceval_state *ceval2 = &tstate->interp->ceval;
drop_gil(ceval, ceval2, tstate);
} }
void void
@ -435,7 +443,6 @@ PyThreadState *
PyEval_SaveThread(void) PyEval_SaveThread(void)
{ {
_PyRuntimeState *runtime = &_PyRuntime; _PyRuntimeState *runtime = &_PyRuntime;
PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL);
ensure_tstate_not_null(__func__, tstate); ensure_tstate_not_null(__func__, tstate);
@ -831,16 +838,16 @@ eval_frame_handle_pending(PyThreadState *tstate)
{ {
_PyRuntimeState * const runtime = &_PyRuntime; _PyRuntimeState * const runtime = &_PyRuntime;
struct _ceval_runtime_state *ceval = &runtime->ceval; struct _ceval_runtime_state *ceval = &runtime->ceval;
struct _ceval_state *ceval2 = &tstate->interp->ceval;
/* Pending signals */ /* Pending signals */
if (_Py_atomic_load_relaxed(&ceval2->signals_pending)) { if (_Py_atomic_load_relaxed(&ceval->signals_pending)) {
if (handle_signals(tstate) != 0) { if (handle_signals(tstate) != 0) {
return -1; return -1;
} }
} }
/* Pending calls */ /* Pending calls */
struct _ceval_state *ceval2 = &tstate->interp->ceval;
if (_Py_atomic_load_relaxed(&ceval2->pending.calls_to_do)) { if (_Py_atomic_load_relaxed(&ceval2->pending.calls_to_do)) {
if (make_pending_calls(tstate) != 0) { if (make_pending_calls(tstate) != 0) {
return -1; return -1;

View File

@ -305,7 +305,7 @@ _ready:
handle signals. handle signals.
Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */ Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
COMPUTE_EVAL_BREAKER(interp, ceval2); COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
/* Don't access tstate if the thread must exit */ /* Don't access tstate if the thread must exit */