bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). (gh-13714)
This commit is contained in:
parent
218e47b618
commit
6a150bcaeb
|
@ -12,19 +12,22 @@ extern "C" {
|
|||
#include "pycore_pystate.h"
|
||||
#include "pythread.h"
|
||||
|
||||
PyAPI_FUNC(void) _Py_FinishPendingCalls(_PyRuntimeState *runtime);
|
||||
PyAPI_FUNC(void) _PyEval_Initialize(struct _ceval_runtime_state *);
|
||||
PyAPI_FUNC(void) _PyEval_FiniThreads(
|
||||
struct _ceval_runtime_state *ceval);
|
||||
struct _ceval_runtime_state *);
|
||||
PyAPI_FUNC(void) _PyEval_SignalReceived(
|
||||
struct _ceval_runtime_state *ceval);
|
||||
struct _ceval_runtime_state *);
|
||||
PyAPI_FUNC(int) _PyEval_AddPendingCall(
|
||||
PyThreadState *tstate,
|
||||
struct _ceval_runtime_state *ceval,
|
||||
struct _ceval_runtime_state *,
|
||||
struct _ceval_interpreter_state *,
|
||||
unsigned long thread_id,
|
||||
int (*func)(void *),
|
||||
void *arg);
|
||||
PyAPI_FUNC(void) _PyEval_FinishPendingCalls(PyInterpreterState *);
|
||||
PyAPI_FUNC(void) _PyEval_SignalAsyncExc(
|
||||
struct _ceval_runtime_state *ceval);
|
||||
struct _ceval_runtime_state *,
|
||||
struct _ceval_interpreter_state *);
|
||||
PyAPI_FUNC(void) _PyEval_ReInitThreads(
|
||||
_PyRuntimeState *runtime);
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ struct pyruntimestate;
|
|||
|
||||
/* ceval state */
|
||||
|
||||
struct _pending_calls {
|
||||
struct _ceval_pending_calls {
|
||||
int finishing;
|
||||
PyThread_type_lock lock;
|
||||
/* Request for running pending calls. */
|
||||
|
@ -36,6 +36,7 @@ struct _pending_calls {
|
|||
int async_exc;
|
||||
#define NPENDINGCALLS 32
|
||||
struct {
|
||||
unsigned long thread_id;
|
||||
int (*func)(void *);
|
||||
void *arg;
|
||||
} calls[NPENDINGCALLS];
|
||||
|
@ -53,15 +54,21 @@ struct _ceval_runtime_state {
|
|||
int tracing_possible;
|
||||
/* This single variable consolidates all requests to break out of
|
||||
the fast path in the eval loop. */
|
||||
// XXX This can move to _ceval_interpreter_state once all parts
|
||||
// from COMPUTE_EVAL_BREAKER have moved under PyInterpreterState.
|
||||
_Py_atomic_int eval_breaker;
|
||||
/* Request for dropping the GIL */
|
||||
_Py_atomic_int gil_drop_request;
|
||||
struct _pending_calls pending;
|
||||
/* Request for checking signals. */
|
||||
_Py_atomic_int signals_pending;
|
||||
struct _gil_runtime_state gil;
|
||||
};
|
||||
|
||||
struct _ceval_interpreter_state {
|
||||
struct _ceval_pending_calls pending;
|
||||
};
|
||||
|
||||
|
||||
/* interpreter state */
|
||||
|
||||
typedef PyObject* (*_PyFrameEvalFunction)(struct _frame *, int);
|
||||
|
@ -136,6 +143,7 @@ struct _is {
|
|||
|
||||
uint64_t tstate_next_unique_id;
|
||||
|
||||
struct _ceval_interpreter_state ceval;
|
||||
struct _warnings_runtime_state warnings;
|
||||
|
||||
PyObject *audit_hooks;
|
||||
|
|
|
@ -431,7 +431,7 @@ class TestPendingCalls(unittest.TestCase):
|
|||
def test_pendingcalls_threaded(self):
|
||||
|
||||
#do every callback on a separate thread
|
||||
n = 32 #total callbacks
|
||||
n = 32 #total callbacks (see NPENDINGCALLS in pycore_ceval.h)
|
||||
threads = []
|
||||
class foo(object):pass
|
||||
context = foo()
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
We added a new internal _Py_AddPendingCall() that operates relative to the
|
||||
provided interpreter. This allows us to use the existing implementation to
|
||||
ask another interpreter to do work that cannot be done in the current
|
||||
interpreter, like decref an object the other interpreter owns. The existing
|
||||
Py_AddPendingCall() only operates relative to the main interpreter.
|
|
@ -2677,6 +2677,7 @@ pending_threadfunc(PyObject *self, PyObject *arg)
|
|||
Py_INCREF(callable);
|
||||
|
||||
Py_BEGIN_ALLOW_THREADS
|
||||
/* XXX Use the internal _Py_AddPendingCall(). */
|
||||
r = Py_AddPendingCall(&_pending_callback, callable);
|
||||
Py_END_ALLOW_THREADS
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <process.h>
|
||||
#endif
|
||||
#endif
|
||||
#include "internal/pycore_pystate.h"
|
||||
|
||||
#ifdef HAVE_SIGNAL_H
|
||||
#include <signal.h>
|
||||
|
@ -259,6 +260,7 @@ trip_signal(int sig_num)
|
|||
/* Notify ceval.c */
|
||||
_PyRuntimeState *runtime = &_PyRuntime;
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
PyInterpreterState *interp = runtime->interpreters.main;
|
||||
_PyEval_SignalReceived(&runtime->ceval);
|
||||
|
||||
/* And then write to the wakeup fd *after* setting all the globals and
|
||||
|
@ -299,7 +301,10 @@ trip_signal(int sig_num)
|
|||
{
|
||||
/* Py_AddPendingCall() isn't signal-safe, but we
|
||||
still use it for this exceptional case. */
|
||||
_PyEval_AddPendingCall(tstate, &runtime->ceval,
|
||||
_PyEval_AddPendingCall(tstate,
|
||||
&runtime->ceval,
|
||||
&interp->ceval,
|
||||
runtime->main_thread,
|
||||
report_wakeup_send_error,
|
||||
(void *)(intptr_t) last_error);
|
||||
}
|
||||
|
@ -318,7 +323,10 @@ trip_signal(int sig_num)
|
|||
{
|
||||
/* Py_AddPendingCall() isn't signal-safe, but we
|
||||
still use it for this exceptional case. */
|
||||
_PyEval_AddPendingCall(tstate, &runtime->ceval,
|
||||
_PyEval_AddPendingCall(tstate,
|
||||
&runtime->ceval,
|
||||
&interp->ceval,
|
||||
runtime->main_thread,
|
||||
report_wakeup_write_error,
|
||||
(void *)(intptr_t)errno);
|
||||
}
|
||||
|
|
336
Python/ceval.c
336
Python/ceval.c
|
@ -101,66 +101,64 @@ static long dxp[256];
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#define GIL_REQUEST _Py_atomic_load_relaxed(&ceval->gil_drop_request)
|
||||
|
||||
/* This can set eval_breaker to 0 even though gil_drop_request became
|
||||
1. We believe this is all right because the eval loop will release
|
||||
the GIL eventually anyway. */
|
||||
#define COMPUTE_EVAL_BREAKER(ceval) \
|
||||
#define COMPUTE_EVAL_BREAKER(ceval_r, ceval_i) \
|
||||
_Py_atomic_store_relaxed( \
|
||||
&(ceval)->eval_breaker, \
|
||||
GIL_REQUEST | \
|
||||
_Py_atomic_load_relaxed(&(ceval)->signals_pending) | \
|
||||
_Py_atomic_load_relaxed(&(ceval)->pending.calls_to_do) | \
|
||||
(ceval)->pending.async_exc)
|
||||
&(ceval_r)->eval_breaker, \
|
||||
_Py_atomic_load_relaxed(&(ceval_r)->gil_drop_request) | \
|
||||
_Py_atomic_load_relaxed(&(ceval_r)->signals_pending) | \
|
||||
_Py_atomic_load_relaxed(&(ceval_i)->pending.calls_to_do) | \
|
||||
(ceval_i)->pending.async_exc)
|
||||
|
||||
#define SET_GIL_DROP_REQUEST(ceval) \
|
||||
#define SET_GIL_DROP_REQUEST(ceval_r) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&(ceval)->gil_drop_request, 1); \
|
||||
_Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \
|
||||
_Py_atomic_store_relaxed(&(ceval_r)->gil_drop_request, 1); \
|
||||
_Py_atomic_store_relaxed(&(ceval_r)->eval_breaker, 1); \
|
||||
} while (0)
|
||||
|
||||
#define RESET_GIL_DROP_REQUEST(ceval) \
|
||||
#define RESET_GIL_DROP_REQUEST(ceval_r, ceval_i) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&(ceval)->gil_drop_request, 0); \
|
||||
COMPUTE_EVAL_BREAKER(ceval); \
|
||||
_Py_atomic_store_relaxed(&(ceval_r)->gil_drop_request, 0); \
|
||||
COMPUTE_EVAL_BREAKER(ceval_r, ceval_i); \
|
||||
} while (0)
|
||||
|
||||
/* Pending calls are only modified under pending_lock */
|
||||
#define SIGNAL_PENDING_CALLS(ceval) \
|
||||
#define SIGNAL_PENDING_CALLS(ceval_r, ceval_i) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&(ceval)->pending.calls_to_do, 1); \
|
||||
_Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \
|
||||
_Py_atomic_store_relaxed(&(ceval_i)->pending.calls_to_do, 1); \
|
||||
_Py_atomic_store_relaxed(&(ceval_r)->eval_breaker, 1); \
|
||||
} while (0)
|
||||
|
||||
#define UNSIGNAL_PENDING_CALLS(ceval) \
|
||||
#define UNSIGNAL_PENDING_CALLS(ceval_r, ceval_i) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&(ceval)->pending.calls_to_do, 0); \
|
||||
COMPUTE_EVAL_BREAKER(ceval); \
|
||||
_Py_atomic_store_relaxed(&(ceval_i)->pending.calls_to_do, 0); \
|
||||
COMPUTE_EVAL_BREAKER(ceval_r, ceval_i); \
|
||||
} while (0)
|
||||
|
||||
#define SIGNAL_PENDING_SIGNALS(ceval) \
|
||||
#define SIGNAL_PENDING_SIGNALS(ceval_r) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&(ceval)->signals_pending, 1); \
|
||||
_Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \
|
||||
_Py_atomic_store_relaxed(&(ceval_r)->signals_pending, 1); \
|
||||
_Py_atomic_store_relaxed(&(ceval_r)->eval_breaker, 1); \
|
||||
} while (0)
|
||||
|
||||
#define UNSIGNAL_PENDING_SIGNALS(ceval) \
|
||||
#define UNSIGNAL_PENDING_SIGNALS(ceval_r, ceval_i) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&(ceval)->signals_pending, 0); \
|
||||
COMPUTE_EVAL_BREAKER(ceval); \
|
||||
_Py_atomic_store_relaxed(&(ceval_r)->signals_pending, 0); \
|
||||
COMPUTE_EVAL_BREAKER(ceval_r, ceval_i); \
|
||||
} while (0)
|
||||
|
||||
#define SIGNAL_ASYNC_EXC(ceval) \
|
||||
#define SIGNAL_ASYNC_EXC(ceval_r, ceval_i) \
|
||||
do { \
|
||||
(ceval)->pending.async_exc = 1; \
|
||||
_Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \
|
||||
(ceval_i)->pending.async_exc = 1; \
|
||||
_Py_atomic_store_relaxed(&(ceval_r)->eval_breaker, 1); \
|
||||
} while (0)
|
||||
|
||||
#define UNSIGNAL_ASYNC_EXC(ceval) \
|
||||
#define UNSIGNAL_ASYNC_EXC(ceval_r, ceval_i) \
|
||||
do { \
|
||||
(ceval)->pending.async_exc = 0; \
|
||||
COMPUTE_EVAL_BREAKER(ceval); \
|
||||
(ceval_i)->pending.async_exc = 0; \
|
||||
COMPUTE_EVAL_BREAKER(ceval_r, ceval_i); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
@ -180,8 +178,8 @@ void
|
|||
PyEval_InitThreads(void)
|
||||
{
|
||||
_PyRuntimeState *runtime = &_PyRuntime;
|
||||
struct _ceval_runtime_state *ceval = &runtime->ceval;
|
||||
struct _gil_runtime_state *gil = &ceval->gil;
|
||||
struct _ceval_runtime_state *ceval_r = &runtime->ceval;
|
||||
struct _gil_runtime_state *gil = &ceval_r->gil;
|
||||
if (gil_created(gil)) {
|
||||
return;
|
||||
}
|
||||
|
@ -189,19 +187,15 @@ PyEval_InitThreads(void)
|
|||
PyThread_init_thread();
|
||||
create_gil(gil);
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
take_gil(ceval, tstate);
|
||||
take_gil(ceval_r, tstate);
|
||||
|
||||
struct _pending_calls *pending = &ceval->pending;
|
||||
pending->lock = PyThread_allocate_lock();
|
||||
if (pending->lock == NULL) {
|
||||
Py_FatalError("Can't initialize threads for pending calls");
|
||||
}
|
||||
// The pending calls mutex is initialized in PyInterpreterState_New().
|
||||
}
|
||||
|
||||
void
|
||||
_PyEval_FiniThreads(struct _ceval_runtime_state *ceval)
|
||||
_PyEval_FiniThreads(struct _ceval_runtime_state *ceval_r)
|
||||
{
|
||||
struct _gil_runtime_state *gil = &ceval->gil;
|
||||
struct _gil_runtime_state *gil = &ceval_r->gil;
|
||||
if (!gil_created(gil)) {
|
||||
return;
|
||||
}
|
||||
|
@ -209,20 +203,24 @@ _PyEval_FiniThreads(struct _ceval_runtime_state *ceval)
|
|||
destroy_gil(gil);
|
||||
assert(!gil_created(gil));
|
||||
|
||||
struct _pending_calls *pending = &ceval->pending;
|
||||
if (pending->lock != NULL) {
|
||||
PyThread_free_lock(pending->lock);
|
||||
pending->lock = NULL;
|
||||
}
|
||||
// The pending calls mutex is freed in PyInterpreterState_Delete().
|
||||
}
|
||||
|
||||
static inline void
|
||||
exit_thread_if_finalizing(PyThreadState *tstate)
|
||||
{
|
||||
_PyRuntimeState *runtime = tstate->interp->runtime;
|
||||
/* _Py_Finalizing is protected by the GIL */
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
// Stop if thread/interpreter inalization already stated.
|
||||
if (interp == NULL) {
|
||||
return;
|
||||
}
|
||||
_PyRuntimeState *runtime = interp->runtime;
|
||||
if (runtime == NULL) {
|
||||
return;
|
||||
}
|
||||
// Don't exit if the main thread (i.e. of the main interpreter).
|
||||
if (runtime->finalizing != NULL && !_Py_CURRENTLY_FINALIZING(runtime, tstate)) {
|
||||
drop_gil(&runtime->ceval, tstate);
|
||||
drop_gil(&runtime->ceval, &interp->ceval, tstate);
|
||||
PyThread_exit_thread();
|
||||
}
|
||||
}
|
||||
|
@ -231,12 +229,12 @@ void
|
|||
PyEval_AcquireLock(void)
|
||||
{
|
||||
_PyRuntimeState *runtime = &_PyRuntime;
|
||||
struct _ceval_runtime_state *ceval = &runtime->ceval;
|
||||
struct _ceval_runtime_state *ceval_r = &runtime->ceval;
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
if (tstate == NULL) {
|
||||
Py_FatalError("PyEval_AcquireLock: current thread state is NULL");
|
||||
}
|
||||
take_gil(ceval, tstate);
|
||||
take_gil(ceval_r, tstate);
|
||||
exit_thread_if_finalizing(tstate);
|
||||
}
|
||||
|
||||
|
@ -244,12 +242,21 @@ void
|
|||
PyEval_ReleaseLock(void)
|
||||
{
|
||||
_PyRuntimeState *runtime = &_PyRuntime;
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
/* This function must succeed when the current thread state is NULL.
|
||||
We therefore avoid PyThreadState_Get() which dumps a fatal error
|
||||
in debug mode.
|
||||
*/
|
||||
drop_gil(&runtime->ceval, tstate);
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
// Fall back to the main interpreter if there is not active Python
|
||||
// thread. This only affects the eval_breaker.
|
||||
PyInterpreterState *interp = runtime->interpreters.main;
|
||||
if (tstate != NULL) {
|
||||
interp = tstate->interp;
|
||||
if (interp == NULL) {
|
||||
Py_FatalError("PyEval_ReleaseLock: NULL interpreter state");
|
||||
}
|
||||
}
|
||||
drop_gil(&runtime->ceval, &interp->ceval, tstate);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -258,14 +265,19 @@ PyEval_AcquireThread(PyThreadState *tstate)
|
|||
if (tstate == NULL) {
|
||||
Py_FatalError("PyEval_AcquireThread: NULL new thread state");
|
||||
}
|
||||
assert(tstate->interp != NULL);
|
||||
|
||||
_PyRuntimeState *runtime = tstate->interp->runtime;
|
||||
struct _ceval_runtime_state *ceval = &runtime->ceval;
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
if (interp == NULL) {
|
||||
Py_FatalError("PyEval_AcquireThread: NULL interpreter state");
|
||||
}
|
||||
_PyRuntimeState *runtime = interp->runtime;
|
||||
if (runtime == NULL) {
|
||||
Py_FatalError("PyEval_AcquireThread: NULL runtime state");
|
||||
}
|
||||
struct _ceval_runtime_state *ceval_r = &runtime->ceval;
|
||||
|
||||
/* Check someone has called PyEval_InitThreads() to create the lock */
|
||||
assert(gil_created(&ceval->gil));
|
||||
take_gil(ceval, tstate);
|
||||
assert(gil_created(&ceval_r->gil));
|
||||
take_gil(ceval_r, tstate);
|
||||
exit_thread_if_finalizing(tstate);
|
||||
if (_PyThreadState_Swap(&runtime->gilstate, tstate) != NULL) {
|
||||
Py_FatalError("PyEval_AcquireThread: non-NULL old thread state");
|
||||
|
@ -278,14 +290,20 @@ PyEval_ReleaseThread(PyThreadState *tstate)
|
|||
if (tstate == NULL) {
|
||||
Py_FatalError("PyEval_ReleaseThread: NULL thread state");
|
||||
}
|
||||
assert(tstate->interp != NULL);
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
if (interp == NULL) {
|
||||
Py_FatalError("PyEval_ReleaseThread: NULL interpreter state");
|
||||
}
|
||||
_PyRuntimeState *runtime = interp->runtime;
|
||||
if (runtime == NULL) {
|
||||
Py_FatalError("PyEval_ReleaseThread: NULL runtime state");
|
||||
}
|
||||
|
||||
_PyRuntimeState *runtime = tstate->interp->runtime;
|
||||
PyThreadState *new_tstate = _PyThreadState_Swap(&runtime->gilstate, NULL);
|
||||
if (new_tstate != tstate) {
|
||||
Py_FatalError("PyEval_ReleaseThread: wrong thread state");
|
||||
}
|
||||
drop_gil(&runtime->ceval, tstate);
|
||||
drop_gil(&runtime->ceval, &interp->ceval, tstate);
|
||||
}
|
||||
|
||||
/* This function is called from PyOS_AfterFork_Child to destroy all threads
|
||||
|
@ -296,15 +314,17 @@ PyEval_ReleaseThread(PyThreadState *tstate)
|
|||
void
|
||||
_PyEval_ReInitThreads(_PyRuntimeState *runtime)
|
||||
{
|
||||
struct _ceval_runtime_state *ceval = &runtime->ceval;
|
||||
if (!gil_created(&ceval->gil)) {
|
||||
struct _ceval_runtime_state *ceval_r = &runtime->ceval;
|
||||
if (!gil_created(&ceval_r->gil)) {
|
||||
return;
|
||||
}
|
||||
recreate_gil(&ceval->gil);
|
||||
recreate_gil(&ceval_r->gil);
|
||||
PyThreadState *current_tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
take_gil(ceval, current_tstate);
|
||||
take_gil(ceval_r, current_tstate);
|
||||
|
||||
struct _pending_calls *pending = &ceval->pending;
|
||||
// Only the main interpreter remains, so ignore the rest.
|
||||
PyInterpreterState *interp = _PyRuntime.interpreters.main;
|
||||
struct _ceval_pending_calls *pending = &interp->ceval.pending;
|
||||
pending->lock = PyThread_allocate_lock();
|
||||
if (pending->lock == NULL) {
|
||||
Py_FatalError("Can't initialize threads for pending calls");
|
||||
|
@ -318,22 +338,28 @@ _PyEval_ReInitThreads(_PyRuntimeState *runtime)
|
|||
raised. */
|
||||
|
||||
void
|
||||
_PyEval_SignalAsyncExc(struct _ceval_runtime_state *ceval)
|
||||
_PyEval_SignalAsyncExc(struct _ceval_runtime_state *ceval_r,
|
||||
struct _ceval_interpreter_state *ceval_i)
|
||||
{
|
||||
SIGNAL_ASYNC_EXC(ceval);
|
||||
SIGNAL_ASYNC_EXC(ceval_r, ceval_i);
|
||||
}
|
||||
|
||||
PyThreadState *
|
||||
PyEval_SaveThread(void)
|
||||
{
|
||||
_PyRuntimeState *runtime = &_PyRuntime;
|
||||
struct _ceval_runtime_state *ceval = &runtime->ceval;
|
||||
struct _ceval_runtime_state *ceval_r = &runtime->ceval;
|
||||
PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL);
|
||||
if (tstate == NULL) {
|
||||
Py_FatalError("PyEval_SaveThread: NULL tstate");
|
||||
}
|
||||
assert(gil_created(&ceval->gil));
|
||||
drop_gil(ceval, tstate);
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
if (interp == NULL) {
|
||||
Py_FatalError("PyEval_SaveThread: NULL interpreter state");
|
||||
}
|
||||
|
||||
assert(gil_created(&ceval_r->gil));
|
||||
drop_gil(ceval_r, &interp->ceval, tstate);
|
||||
return tstate;
|
||||
}
|
||||
|
||||
|
@ -343,14 +369,20 @@ PyEval_RestoreThread(PyThreadState *tstate)
|
|||
if (tstate == NULL) {
|
||||
Py_FatalError("PyEval_RestoreThread: NULL tstate");
|
||||
}
|
||||
assert(tstate->interp != NULL);
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
if (interp == NULL) {
|
||||
Py_FatalError("PyEval_RestoreThread: NULL interpreter state");
|
||||
}
|
||||
_PyRuntimeState *runtime = interp->runtime;
|
||||
if (runtime == NULL) {
|
||||
Py_FatalError("PyEval_RestoreThread: NULL runtime state");
|
||||
}
|
||||
struct _ceval_runtime_state *ceval_r = &runtime->ceval;
|
||||
|
||||
_PyRuntimeState *runtime = tstate->interp->runtime;
|
||||
struct _ceval_runtime_state *ceval = &runtime->ceval;
|
||||
assert(gil_created(&ceval->gil));
|
||||
assert(gil_created(&ceval_r->gil));
|
||||
|
||||
int err = errno;
|
||||
take_gil(ceval, tstate);
|
||||
take_gil(ceval_r, tstate);
|
||||
exit_thread_if_finalizing(tstate);
|
||||
errno = err;
|
||||
|
||||
|
@ -381,17 +413,17 @@ PyEval_RestoreThread(PyThreadState *tstate)
|
|||
*/
|
||||
|
||||
void
|
||||
_PyEval_SignalReceived(struct _ceval_runtime_state *ceval)
|
||||
_PyEval_SignalReceived(struct _ceval_runtime_state *ceval_r)
|
||||
{
|
||||
/* bpo-30703: Function called when the C signal handler of Python gets a
|
||||
signal. We cannot queue a callback using Py_AddPendingCall() since
|
||||
that function is not async-signal-safe. */
|
||||
SIGNAL_PENDING_SIGNALS(ceval);
|
||||
SIGNAL_PENDING_SIGNALS(ceval_r);
|
||||
}
|
||||
|
||||
/* Push one item onto the queue while holding the lock. */
|
||||
static int
|
||||
_push_pending_call(struct _pending_calls *pending,
|
||||
_push_pending_call(struct _ceval_pending_calls *pending, unsigned long thread_id,
|
||||
int (*func)(void *), void *arg)
|
||||
{
|
||||
int i = pending->last;
|
||||
|
@ -399,6 +431,7 @@ _push_pending_call(struct _pending_calls *pending,
|
|||
if (j == pending->first) {
|
||||
return -1; /* Queue full */
|
||||
}
|
||||
pending->calls[i].thread_id = thread_id;
|
||||
pending->calls[i].func = func;
|
||||
pending->calls[i].arg = arg;
|
||||
pending->last = j;
|
||||
|
@ -407,7 +440,7 @@ _push_pending_call(struct _pending_calls *pending,
|
|||
|
||||
/* Pop one item off the queue while holding the lock. */
|
||||
static void
|
||||
_pop_pending_call(struct _pending_calls *pending,
|
||||
_pop_pending_call(struct _ceval_pending_calls *pending, unsigned long *thread_id,
|
||||
int (**func)(void *), void **arg)
|
||||
{
|
||||
int i = pending->first;
|
||||
|
@ -417,6 +450,7 @@ _pop_pending_call(struct _pending_calls *pending,
|
|||
|
||||
*func = pending->calls[i].func;
|
||||
*arg = pending->calls[i].arg;
|
||||
*thread_id = pending->calls[i].thread_id;
|
||||
pending->first = (i + 1) % NPENDINGCALLS;
|
||||
}
|
||||
|
||||
|
@ -427,10 +461,12 @@ _pop_pending_call(struct _pending_calls *pending,
|
|||
|
||||
int
|
||||
_PyEval_AddPendingCall(PyThreadState *tstate,
|
||||
struct _ceval_runtime_state *ceval,
|
||||
struct _ceval_runtime_state *ceval_r,
|
||||
struct _ceval_interpreter_state *ceval_i,
|
||||
unsigned long thread_id,
|
||||
int (*func)(void *), void *arg)
|
||||
{
|
||||
struct _pending_calls *pending = &ceval->pending;
|
||||
struct _ceval_pending_calls *pending = &ceval_i->pending;
|
||||
|
||||
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
|
||||
if (pending->finishing) {
|
||||
|
@ -445,20 +481,27 @@ _PyEval_AddPendingCall(PyThreadState *tstate,
|
|||
_PyErr_Restore(tstate, exc, val, tb);
|
||||
return -1;
|
||||
}
|
||||
int result = _push_pending_call(pending, func, arg);
|
||||
int result = _push_pending_call(pending, thread_id, func, arg);
|
||||
|
||||
/* signal loop */
|
||||
SIGNAL_PENDING_CALLS(ceval_r, ceval_i);
|
||||
PyThread_release_lock(pending->lock);
|
||||
|
||||
/* signal main loop */
|
||||
SIGNAL_PENDING_CALLS(ceval);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Py_AddPendingCall() is a simple wrapper for the sake
|
||||
of backward-compatibility. */
|
||||
int
|
||||
Py_AddPendingCall(int (*func)(void *), void *arg)
|
||||
{
|
||||
_PyRuntimeState *runtime = &_PyRuntime;
|
||||
PyInterpreterState *interp = runtime->interpreters.main;
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
return _PyEval_AddPendingCall(tstate, &runtime->ceval, func, arg);
|
||||
return _PyEval_AddPendingCall(tstate,
|
||||
&runtime->ceval, &interp->ceval,
|
||||
runtime->main_thread,
|
||||
func, arg);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -479,47 +522,69 @@ handle_signals(_PyRuntimeState *runtime)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct _ceval_runtime_state *ceval = &runtime->ceval;
|
||||
UNSIGNAL_PENDING_SIGNALS(ceval);
|
||||
struct _ceval_runtime_state *ceval_r = &runtime->ceval;
|
||||
struct _ceval_interpreter_state *ceval_i = &interp->ceval;
|
||||
UNSIGNAL_PENDING_SIGNALS(ceval_r, ceval_i);
|
||||
if (_PyErr_CheckSignals() < 0) {
|
||||
SIGNAL_PENDING_SIGNALS(ceval); /* We're not done yet */
|
||||
SIGNAL_PENDING_SIGNALS(ceval_r); /* We're not done yet */
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
make_pending_calls(_PyRuntimeState *runtime)
|
||||
make_pending_calls(PyInterpreterState *interp)
|
||||
{
|
||||
static int busy = 0;
|
||||
|
||||
/* only service pending calls on main thread */
|
||||
if (PyThread_get_thread_ident() != runtime->main_thread) {
|
||||
return 0;
|
||||
if (interp == NULL) {
|
||||
Py_FatalError("make_pending_calls: NULL interpreter state");
|
||||
}
|
||||
_PyRuntimeState *runtime = interp->runtime;
|
||||
if (runtime == NULL) {
|
||||
Py_FatalError("make_pending_calls: NULL runtime state");
|
||||
}
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
if (tstate == NULL) {
|
||||
Py_FatalError("make_pending_calls: NULL thread state");
|
||||
}
|
||||
if (tstate->interp == NULL || tstate->interp != interp) {
|
||||
Py_FatalError("make_pending_calls: thread state mismatch");
|
||||
}
|
||||
static int busy = 0;
|
||||
|
||||
/* don't perform recursive pending calls */
|
||||
if (busy) {
|
||||
return 0;
|
||||
}
|
||||
busy = 1;
|
||||
struct _ceval_runtime_state *ceval = &runtime->ceval;
|
||||
struct _ceval_runtime_state *ceval_r = &runtime->ceval;
|
||||
struct _ceval_interpreter_state *ceval_i = &interp->ceval;
|
||||
/* unsignal before starting to call callbacks, so that any callback
|
||||
added in-between re-signals */
|
||||
UNSIGNAL_PENDING_CALLS(ceval);
|
||||
UNSIGNAL_PENDING_CALLS(ceval_r, ceval_i);
|
||||
int res = 0;
|
||||
|
||||
/* perform a bounded number of calls, in case of recursion */
|
||||
struct _pending_calls *pending = &ceval->pending;
|
||||
struct _ceval_pending_calls *pending = &ceval_i->pending;
|
||||
unsigned long thread_id = 0;
|
||||
for (int i=0; i<NPENDINGCALLS; i++) {
|
||||
int (*func)(void *) = NULL;
|
||||
void *arg = NULL;
|
||||
|
||||
/* pop one item off the queue while holding the lock */
|
||||
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
|
||||
_pop_pending_call(pending, &func, &arg);
|
||||
_pop_pending_call(pending, &thread_id, &func, &arg);
|
||||
PyThread_release_lock(pending->lock);
|
||||
|
||||
if (thread_id && PyThread_get_thread_ident() != thread_id) {
|
||||
// Thread mismatch, so move it to the end of the list
|
||||
// and start over.
|
||||
_PyEval_AddPendingCall(tstate,
|
||||
&runtime->ceval, &interp->ceval,
|
||||
thread_id,
|
||||
func, arg);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* having released the lock, perform the callback */
|
||||
if (func == NULL) {
|
||||
break;
|
||||
|
@ -535,17 +600,16 @@ make_pending_calls(_PyRuntimeState *runtime)
|
|||
|
||||
error:
|
||||
busy = 0;
|
||||
SIGNAL_PENDING_CALLS(ceval);
|
||||
SIGNAL_PENDING_CALLS(ceval_r, ceval_i);
|
||||
return res;
|
||||
}
|
||||
|
||||
void
|
||||
_Py_FinishPendingCalls(_PyRuntimeState *runtime)
|
||||
_PyEval_FinishPendingCalls(PyInterpreterState *interp)
|
||||
{
|
||||
assert(PyGILState_Check());
|
||||
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
struct _pending_calls *pending = &runtime->ceval.pending;
|
||||
struct _ceval_pending_calls *pending = &interp->ceval.pending;
|
||||
|
||||
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
|
||||
pending->finishing = 1;
|
||||
|
@ -555,12 +619,19 @@ _Py_FinishPendingCalls(_PyRuntimeState *runtime)
|
|||
return;
|
||||
}
|
||||
|
||||
if (make_pending_calls(runtime) < 0) {
|
||||
PyObject *exc, *val, *tb;
|
||||
_PyErr_Fetch(tstate, &exc, &val, &tb);
|
||||
PyErr_BadInternalCall();
|
||||
_PyErr_ChainExceptions(exc, val, tb);
|
||||
_PyErr_Print(tstate);
|
||||
if (make_pending_calls(interp) < 0) {
|
||||
_PyRuntimeState *runtime = interp->runtime;
|
||||
if (runtime == NULL) {
|
||||
runtime = &_PyRuntime;
|
||||
}
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
if (tstate != NULL) {
|
||||
PyObject *exc, *val, *tb;
|
||||
_PyErr_Fetch(tstate, &exc, &val, &tb);
|
||||
PyErr_BadInternalCall();
|
||||
_PyErr_ChainExceptions(exc, val, tb);
|
||||
_PyErr_Print(tstate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -579,7 +650,8 @@ Py_MakePendingCalls(void)
|
|||
return res;
|
||||
}
|
||||
|
||||
res = make_pending_calls(runtime);
|
||||
PyInterpreterState *interp = _PyRuntime.interpreters.main;
|
||||
res = make_pending_calls(interp);
|
||||
if (res != 0) {
|
||||
return res;
|
||||
}
|
||||
|
@ -596,11 +668,11 @@ Py_MakePendingCalls(void)
|
|||
int _Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;
|
||||
|
||||
void
|
||||
_PyEval_Initialize(struct _ceval_runtime_state *state)
|
||||
_PyEval_Initialize(struct _ceval_runtime_state *ceval_r)
|
||||
{
|
||||
state->recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
|
||||
ceval_r->recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
|
||||
_Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;
|
||||
_gil_initialize(&state->gil);
|
||||
_gil_initialize(&ceval_r->gil);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -612,9 +684,9 @@ Py_GetRecursionLimit(void)
|
|||
void
|
||||
Py_SetRecursionLimit(int new_limit)
|
||||
{
|
||||
struct _ceval_runtime_state *ceval = &_PyRuntime.ceval;
|
||||
ceval->recursion_limit = new_limit;
|
||||
_Py_CheckRecursionLimit = ceval->recursion_limit;
|
||||
struct _ceval_runtime_state *ceval_r = &_PyRuntime.ceval;
|
||||
ceval_r->recursion_limit = new_limit;
|
||||
_Py_CheckRecursionLimit = ceval_r->recursion_limit;
|
||||
}
|
||||
|
||||
/* the macro Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall()
|
||||
|
@ -663,7 +735,7 @@ _Py_CheckRecursiveCall(const char *where)
|
|||
static int do_raise(PyThreadState *tstate, PyObject *exc, PyObject *cause);
|
||||
static int unpack_iterable(PyThreadState *, PyObject *, int, int, PyObject **);
|
||||
|
||||
#define _Py_TracingPossible(ceval) ((ceval)->tracing_possible)
|
||||
#define _Py_TracingPossible(ceval_r) ((ceval_r)->tracing_possible)
|
||||
|
||||
|
||||
PyObject *
|
||||
|
@ -709,8 +781,10 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
|
|||
PyObject *retval = NULL; /* Return value */
|
||||
_PyRuntimeState * const runtime = &_PyRuntime;
|
||||
PyThreadState * const tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
struct _ceval_runtime_state * const ceval = &runtime->ceval;
|
||||
_Py_atomic_int * const eval_breaker = &ceval->eval_breaker;
|
||||
PyInterpreterState * const interp = tstate->interp;
|
||||
struct _ceval_runtime_state * const ceval_r = &runtime->ceval;
|
||||
struct _ceval_interpreter_state * const ceval_i = &interp->ceval;
|
||||
_Py_atomic_int * const eval_breaker = &ceval_r->eval_breaker;
|
||||
PyCodeObject *co;
|
||||
|
||||
/* when tracing we set things up so that
|
||||
|
@ -797,7 +871,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
|
|||
#ifdef LLTRACE
|
||||
#define FAST_DISPATCH() \
|
||||
{ \
|
||||
if (!lltrace && !_Py_TracingPossible(ceval) && !PyDTrace_LINE_ENABLED()) { \
|
||||
if (!lltrace && !_Py_TracingPossible(ceval_r) && !PyDTrace_LINE_ENABLED()) { \
|
||||
f->f_lasti = INSTR_OFFSET(); \
|
||||
NEXTOPARG(); \
|
||||
goto *opcode_targets[opcode]; \
|
||||
|
@ -807,7 +881,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
|
|||
#else
|
||||
#define FAST_DISPATCH() \
|
||||
{ \
|
||||
if (!_Py_TracingPossible(ceval) && !PyDTrace_LINE_ENABLED()) { \
|
||||
if (!_Py_TracingPossible(ceval_r) && !PyDTrace_LINE_ENABLED()) { \
|
||||
f->f_lasti = INSTR_OFFSET(); \
|
||||
NEXTOPARG(); \
|
||||
goto *opcode_targets[opcode]; \
|
||||
|
@ -1122,27 +1196,27 @@ main_loop:
|
|||
goto fast_next_opcode;
|
||||
}
|
||||
|
||||
if (_Py_atomic_load_relaxed(&ceval->signals_pending)) {
|
||||
if (_Py_atomic_load_relaxed(&ceval_r->signals_pending)) {
|
||||
if (handle_signals(runtime) != 0) {
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
if (_Py_atomic_load_relaxed(&ceval->pending.calls_to_do)) {
|
||||
if (make_pending_calls(runtime) != 0) {
|
||||
if (_Py_atomic_load_relaxed(&ceval_i->pending.calls_to_do)) {
|
||||
if (make_pending_calls(interp) != 0) {
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
|
||||
if (_Py_atomic_load_relaxed(&ceval_r->gil_drop_request)) {
|
||||
/* Give another thread a chance */
|
||||
if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) {
|
||||
Py_FatalError("ceval: tstate mix-up");
|
||||
}
|
||||
drop_gil(ceval, tstate);
|
||||
drop_gil(ceval_r, ceval_i, tstate);
|
||||
|
||||
/* Other threads may run now */
|
||||
|
||||
take_gil(ceval, tstate);
|
||||
take_gil(ceval_r, tstate);
|
||||
|
||||
/* Check if we should make a quick exit. */
|
||||
exit_thread_if_finalizing(tstate);
|
||||
|
@ -1155,7 +1229,7 @@ main_loop:
|
|||
if (tstate->async_exc != NULL) {
|
||||
PyObject *exc = tstate->async_exc;
|
||||
tstate->async_exc = NULL;
|
||||
UNSIGNAL_ASYNC_EXC(ceval);
|
||||
UNSIGNAL_ASYNC_EXC(ceval_r, ceval_i);
|
||||
_PyErr_SetNone(tstate, exc);
|
||||
Py_DECREF(exc);
|
||||
goto error;
|
||||
|
@ -1170,7 +1244,7 @@ main_loop:
|
|||
|
||||
/* line-by-line tracing support */
|
||||
|
||||
if (_Py_TracingPossible(ceval) &&
|
||||
if (_Py_TracingPossible(ceval_r) &&
|
||||
tstate->c_tracefunc != NULL && !tstate->tracing) {
|
||||
int err;
|
||||
/* see maybe_call_line_trace
|
||||
|
|
|
@ -141,9 +141,11 @@ static void recreate_gil(struct _gil_runtime_state *gil)
|
|||
}
|
||||
|
||||
static void
|
||||
drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
|
||||
drop_gil(struct _ceval_runtime_state *ceval_r,
|
||||
struct _ceval_interpreter_state *ceval_i,
|
||||
PyThreadState *tstate)
|
||||
{
|
||||
struct _gil_runtime_state *gil = &ceval->gil;
|
||||
struct _gil_runtime_state *gil = &ceval_r->gil;
|
||||
if (!_Py_atomic_load_relaxed(&gil->locked)) {
|
||||
Py_FatalError("drop_gil: GIL is not locked");
|
||||
}
|
||||
|
@ -163,12 +165,12 @@ drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
|
|||
MUTEX_UNLOCK(gil->mutex);
|
||||
|
||||
#ifdef FORCE_SWITCHING
|
||||
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) {
|
||||
if (_Py_atomic_load_relaxed(&ceval_r->gil_drop_request) && tstate != NULL) {
|
||||
MUTEX_LOCK(gil->switch_mutex);
|
||||
/* Not switched yet => wait */
|
||||
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
|
||||
{
|
||||
RESET_GIL_DROP_REQUEST(ceval);
|
||||
RESET_GIL_DROP_REQUEST(ceval_r, ceval_i);
|
||||
/* NOTE: if COND_WAIT does not atomically start waiting when
|
||||
releasing the mutex, another thread can run through, take
|
||||
the GIL and drop it again, and reset the condition
|
||||
|
@ -181,13 +183,19 @@ drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
|
|||
}
|
||||
|
||||
static void
|
||||
take_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
|
||||
take_gil(struct _ceval_runtime_state *ceval_r,
|
||||
PyThreadState *tstate)
|
||||
{
|
||||
if (tstate == NULL) {
|
||||
Py_FatalError("take_gil: NULL tstate");
|
||||
}
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
if (interp == NULL) {
|
||||
Py_FatalError("take_gil: NULL interp");
|
||||
}
|
||||
struct _ceval_interpreter_state *ceval_i = &interp->ceval;
|
||||
|
||||
struct _gil_runtime_state *gil = &ceval->gil;
|
||||
struct _gil_runtime_state *gil = &ceval_r->gil;
|
||||
int err = errno;
|
||||
MUTEX_LOCK(gil->mutex);
|
||||
|
||||
|
@ -210,7 +218,7 @@ take_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
|
|||
_Py_atomic_load_relaxed(&gil->locked) &&
|
||||
gil->switch_number == saved_switchnum)
|
||||
{
|
||||
SET_GIL_DROP_REQUEST(ceval);
|
||||
SET_GIL_DROP_REQUEST(ceval_r);
|
||||
}
|
||||
}
|
||||
_ready:
|
||||
|
@ -232,11 +240,11 @@ _ready:
|
|||
COND_SIGNAL(gil->switch_cond);
|
||||
MUTEX_UNLOCK(gil->switch_mutex);
|
||||
#endif
|
||||
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
|
||||
RESET_GIL_DROP_REQUEST(ceval);
|
||||
if (_Py_atomic_load_relaxed(&ceval_r->gil_drop_request)) {
|
||||
RESET_GIL_DROP_REQUEST(ceval_r, ceval_i);
|
||||
}
|
||||
if (tstate->async_exc != NULL) {
|
||||
_PyEval_SignalAsyncExc(ceval);
|
||||
_PyEval_SignalAsyncExc(ceval_r, ceval_i);
|
||||
}
|
||||
|
||||
MUTEX_UNLOCK(gil->mutex);
|
||||
|
|
|
@ -1147,15 +1147,31 @@ Py_FinalizeEx(void)
|
|||
return status;
|
||||
}
|
||||
|
||||
/* Get current thread state and interpreter pointer */
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
|
||||
// Wrap up existing "threading"-module-created, non-daemon threads.
|
||||
wait_for_thread_shutdown();
|
||||
|
||||
// Make any remaining pending calls.
|
||||
_Py_FinishPendingCalls(runtime);
|
||||
|
||||
/* Get current thread state and interpreter pointer */
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
/* XXX For the moment we are going to ignore lingering pending calls.
|
||||
* We've seen sporadic on some of the buildbots during finalization
|
||||
* with the changes for per-interpreter pending calls (see bpo-33608),
|
||||
* meaning the previous _PyEval_FinishPendincCalls() call here is
|
||||
* a trigger, if not responsible.
|
||||
*
|
||||
* Ignoring pending calls at this point in the runtime lifecycle
|
||||
* is okay (for now) for the following reasons:
|
||||
*
|
||||
* * pending calls are still not a widely-used feature
|
||||
* * this only affects runtime finalization, where the process is
|
||||
* likely to end soon anyway (except for some embdding cases)
|
||||
*
|
||||
* See bpo-37127 about resolving the problem. Ultimately the call
|
||||
* here should be re-enabled.
|
||||
*/
|
||||
//_PyEval_FinishPendingCalls(interp);
|
||||
|
||||
/* The interpreter is still entirely intact at this point, and the
|
||||
* exit funcs may be relying on that. In particular, if some thread
|
||||
|
@ -1580,6 +1596,9 @@ Py_EndInterpreter(PyThreadState *tstate)
|
|||
// Wrap up existing "threading"-module-created, non-daemon threads.
|
||||
wait_for_thread_shutdown();
|
||||
|
||||
// Make any remaining pending calls.
|
||||
_PyEval_FinishPendingCalls(interp);
|
||||
|
||||
call_py_exitfuncs(interp);
|
||||
|
||||
if (tstate != interp->tstate_head || tstate->next != NULL)
|
||||
|
|
|
@ -218,6 +218,13 @@ PyInterpreterState_New(void)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
interp->ceval.pending.lock = PyThread_allocate_lock();
|
||||
if (interp->ceval.pending.lock == NULL) {
|
||||
PyErr_SetString(PyExc_RuntimeError,
|
||||
"failed to create interpreter ceval pending mutex");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
interp->eval_frame = _PyEval_EvalFrameDefault;
|
||||
#ifdef HAVE_DLOPEN
|
||||
#if HAVE_DECL_RTLD_NOW
|
||||
|
@ -345,6 +352,10 @@ PyInterpreterState_Delete(PyInterpreterState *interp)
|
|||
if (interp->id_mutex != NULL) {
|
||||
PyThread_free_lock(interp->id_mutex);
|
||||
}
|
||||
if (interp->ceval.pending.lock != NULL) {
|
||||
PyThread_free_lock(interp->ceval.pending.lock);
|
||||
interp->ceval.pending.lock = NULL;
|
||||
}
|
||||
PyMem_RawFree(interp);
|
||||
}
|
||||
|
||||
|
@ -1014,7 +1025,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
|
|||
p->async_exc = exc;
|
||||
HEAD_UNLOCK(runtime);
|
||||
Py_XDECREF(old_exc);
|
||||
_PyEval_SignalAsyncExc(&runtime->ceval);
|
||||
_PyEval_SignalAsyncExc(&runtime->ceval, &interp->ceval);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -1444,7 +1455,7 @@ _PyObject_GetCrossInterpreterData(PyObject *obj, _PyCrossInterpreterData *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
_release_xidata(void *arg)
|
||||
{
|
||||
_PyCrossInterpreterData *data = (_PyCrossInterpreterData *)arg;
|
||||
|
@ -1452,42 +1463,21 @@ _release_xidata(void *arg)
|
|||
data->free(data->data);
|
||||
}
|
||||
Py_XDECREF(data->obj);
|
||||
}
|
||||
|
||||
static void
|
||||
_call_in_interpreter(struct _gilstate_runtime_state *gilstate,
|
||||
PyInterpreterState *interp,
|
||||
void (*func)(void *), void *arg)
|
||||
{
|
||||
/* We would use Py_AddPendingCall() if it weren't specific to the
|
||||
* main interpreter (see bpo-33608). In the meantime we take a
|
||||
* naive approach.
|
||||
*/
|
||||
PyThreadState *save_tstate = NULL;
|
||||
if (interp != _PyRuntimeGILState_GetThreadState(gilstate)->interp) {
|
||||
// XXX Using the "head" thread isn't strictly correct.
|
||||
PyThreadState *tstate = PyInterpreterState_ThreadHead(interp);
|
||||
// XXX Possible GILState issues?
|
||||
save_tstate = _PyThreadState_Swap(gilstate, tstate);
|
||||
}
|
||||
|
||||
func(arg);
|
||||
|
||||
// Switch back.
|
||||
if (save_tstate != NULL) {
|
||||
_PyThreadState_Swap(gilstate, save_tstate);
|
||||
}
|
||||
PyMem_Free(data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
_PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
|
||||
{
|
||||
_PyRuntimeState *runtime = &_PyRuntime;
|
||||
|
||||
if (data->data == NULL && data->obj == NULL) {
|
||||
// Nothing to release!
|
||||
return;
|
||||
}
|
||||
|
||||
// Switch to the original interpreter.
|
||||
// Get the original interpreter.
|
||||
PyInterpreterState *interp = _PyInterpreterState_LookUpID(data->interp);
|
||||
if (interp == NULL) {
|
||||
// The intepreter was already destroyed.
|
||||
|
@ -1496,10 +1486,28 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
|
|||
}
|
||||
return;
|
||||
}
|
||||
// XXX There's an ever-so-slight race here...
|
||||
if (interp->finalizing) {
|
||||
// XXX Someone leaked some memory...
|
||||
return;
|
||||
}
|
||||
|
||||
// "Release" the data and/or the object.
|
||||
struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
|
||||
_call_in_interpreter(gilstate, interp, _release_xidata, data);
|
||||
_PyCrossInterpreterData *copied = PyMem_Malloc(sizeof(_PyCrossInterpreterData));
|
||||
if (copied == NULL) {
|
||||
PyErr_SetString(PyExc_MemoryError,
|
||||
"Not enough memory to preserve cross-interpreter data");
|
||||
PyErr_Print();
|
||||
return;
|
||||
}
|
||||
memcpy(copied, data, sizeof(_PyCrossInterpreterData));
|
||||
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
|
||||
int res = _PyEval_AddPendingCall(tstate,
|
||||
&runtime->ceval, &interp->ceval,
|
||||
0, _release_xidata, copied);
|
||||
if (res != 0) {
|
||||
// XXX Queue full or couldn't get lock. Try again somehow?
|
||||
}
|
||||
}
|
||||
|
||||
PyObject *
|
||||
|
|
Loading…
Reference in New Issue