bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). (GH-11617)
This involves moving the global "pending calls" state to PyInterpreterState. https://bugs.python.org/issue33608
This commit is contained in:
parent
463572c8be
commit
ef4ac967e2
|
@ -221,7 +221,7 @@ PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc);
|
|||
#ifndef Py_LIMITED_API
|
||||
PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *);
|
||||
PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *);
|
||||
PyAPI_FUNC(void) _PyEval_SignalAsyncExc(void);
|
||||
PyAPI_FUNC(void) _PyEval_SignalAsyncExc(PyInterpreterState *);
|
||||
#endif
|
||||
|
||||
/* Masks and values used by FORMAT_VALUE opcode. */
|
||||
|
|
|
@ -11,8 +11,12 @@ extern "C" {
|
|||
#include "pycore_atomic.h"
|
||||
#include "pythread.h"
|
||||
|
||||
struct _is; // See PyInterpreterState in cpython/pystate.h.
|
||||
|
||||
PyAPI_FUNC(int) _Py_AddPendingCall(struct _is*, unsigned long, int (*)(void *), void *);
|
||||
PyAPI_FUNC(int) _Py_MakePendingCalls(struct _is*);
|
||||
|
||||
struct _pending_calls {
|
||||
unsigned long main_thread;
|
||||
PyThread_type_lock lock;
|
||||
/* Request for running pending calls. */
|
||||
_Py_atomic_int calls_to_do;
|
||||
|
@ -22,6 +26,7 @@ struct _pending_calls {
|
|||
int async_exc;
|
||||
#define NPENDINGCALLS 32
|
||||
struct {
|
||||
unsigned long thread_id;
|
||||
int (*func)(void *);
|
||||
void *arg;
|
||||
} calls[NPENDINGCALLS];
|
||||
|
@ -29,6 +34,13 @@ struct _pending_calls {
|
|||
int last;
|
||||
};
|
||||
|
||||
struct _ceval_interpreter_state {
|
||||
/* This single variable consolidates all requests to break out of
|
||||
the fast path in the eval loop. */
|
||||
_Py_atomic_int eval_breaker;
|
||||
struct _pending_calls pending;
|
||||
};
|
||||
|
||||
#include "pycore_gil.h"
|
||||
|
||||
struct _ceval_runtime_state {
|
||||
|
@ -39,12 +51,8 @@ struct _ceval_runtime_state {
|
|||
c_tracefunc. This speeds up the if statement in
|
||||
PyEval_EvalFrameEx() after fast_next_opcode. */
|
||||
int tracing_possible;
|
||||
/* This single variable consolidates all requests to break out of
|
||||
the fast path in the eval loop. */
|
||||
_Py_atomic_int eval_breaker;
|
||||
/* Request for dropping the GIL */
|
||||
_Py_atomic_int gil_drop_request;
|
||||
struct _pending_calls pending;
|
||||
/* Request for checking signals. */
|
||||
_Py_atomic_int signals_pending;
|
||||
struct _gil_runtime_state gil;
|
||||
|
|
|
@ -11,6 +11,7 @@ extern "C" {
|
|||
#include "pystate.h"
|
||||
#include "pythread.h"
|
||||
|
||||
#include "pycore_atomic.h"
|
||||
#include "pycore_ceval.h"
|
||||
#include "pycore_pathconfig.h"
|
||||
#include "pycore_pymem.h"
|
||||
|
@ -31,6 +32,8 @@ struct _is {
|
|||
int64_t id_refcount;
|
||||
PyThread_type_lock id_mutex;
|
||||
|
||||
int finalizing;
|
||||
|
||||
PyObject *modules;
|
||||
PyObject *modules_by_index;
|
||||
PyObject *sysdict;
|
||||
|
@ -78,6 +81,8 @@ struct _is {
|
|||
PyObject *pyexitmodule;
|
||||
|
||||
uint64_t tstate_next_unique_id;
|
||||
|
||||
struct _ceval_interpreter_state ceval;
|
||||
};
|
||||
|
||||
PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(PY_INT64_T);
|
||||
|
@ -207,6 +212,8 @@ typedef struct pyruntimestate {
|
|||
struct _xidregitem *head;
|
||||
} xidregistry;
|
||||
|
||||
unsigned long main_thread;
|
||||
|
||||
#define NEXITFUNCS 32
|
||||
void (*exitfuncs[NEXITFUNCS])(void);
|
||||
int nexitfuncs;
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
We added a new internal _Py_AddPendingCall() that operates relative to the
|
||||
provided interpreter. This allows us to use the existing implementation to
|
||||
ask another interpreter to do work that cannot be done in the current
|
||||
interpreter, like decref an object the other interpreter owns. The existing
|
||||
Py_AddPendingCall() only operates relative to the main interpreter.
|
|
@ -2445,6 +2445,7 @@ pending_threadfunc(PyObject *self, PyObject *arg)
|
|||
Py_INCREF(callable);
|
||||
|
||||
Py_BEGIN_ALLOW_THREADS
|
||||
/* XXX Use the internal _Py_AddPendingCall(). */
|
||||
r = Py_AddPendingCall(&_pending_callback, callable);
|
||||
Py_END_ALLOW_THREADS
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <process.h>
|
||||
#endif
|
||||
#endif
|
||||
#include "internal/pycore_pystate.h"
|
||||
|
||||
#ifdef HAVE_SIGNAL_H
|
||||
#include <signal.h>
|
||||
|
@ -295,8 +296,10 @@ trip_signal(int sig_num)
|
|||
{
|
||||
/* Py_AddPendingCall() isn't signal-safe, but we
|
||||
still use it for this exceptional case. */
|
||||
Py_AddPendingCall(report_wakeup_send_error,
|
||||
(void *)(intptr_t) last_error);
|
||||
_Py_AddPendingCall(_PyRuntime.interpreters.main,
|
||||
main_thread,
|
||||
report_wakeup_send_error,
|
||||
(void *)(intptr_t) last_error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -313,8 +316,10 @@ trip_signal(int sig_num)
|
|||
{
|
||||
/* Py_AddPendingCall() isn't signal-safe, but we
|
||||
still use it for this exceptional case. */
|
||||
Py_AddPendingCall(report_wakeup_write_error,
|
||||
(void *)(intptr_t)errno);
|
||||
_Py_AddPendingCall(_PyRuntime.interpreters.main,
|
||||
main_thread,
|
||||
report_wakeup_write_error,
|
||||
(void *)(intptr_t)errno);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
199
Python/ceval.c
199
Python/ceval.c
|
@ -96,61 +96,61 @@ static long dxp[256];
|
|||
/* This can set eval_breaker to 0 even though gil_drop_request became
|
||||
1. We believe this is all right because the eval loop will release
|
||||
the GIL eventually anyway. */
|
||||
#define COMPUTE_EVAL_BREAKER() \
|
||||
#define COMPUTE_EVAL_BREAKER(interp) \
|
||||
_Py_atomic_store_relaxed( \
|
||||
&_PyRuntime.ceval.eval_breaker, \
|
||||
&interp->ceval.eval_breaker, \
|
||||
GIL_REQUEST | \
|
||||
_Py_atomic_load_relaxed(&_PyRuntime.ceval.signals_pending) | \
|
||||
_Py_atomic_load_relaxed(&_PyRuntime.ceval.pending.calls_to_do) | \
|
||||
_PyRuntime.ceval.pending.async_exc)
|
||||
_Py_atomic_load_relaxed(&interp->ceval.pending.calls_to_do) | \
|
||||
interp->ceval.pending.async_exc)
|
||||
|
||||
#define SET_GIL_DROP_REQUEST() \
|
||||
#define SET_GIL_DROP_REQUEST(interp) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 1); \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
|
||||
_Py_atomic_store_relaxed(&interp->ceval.eval_breaker, 1); \
|
||||
} while (0)
|
||||
|
||||
#define RESET_GIL_DROP_REQUEST() \
|
||||
#define RESET_GIL_DROP_REQUEST(interp) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 0); \
|
||||
COMPUTE_EVAL_BREAKER(); \
|
||||
COMPUTE_EVAL_BREAKER(interp); \
|
||||
} while (0)
|
||||
|
||||
/* Pending calls are only modified under pending_lock */
|
||||
#define SIGNAL_PENDING_CALLS() \
|
||||
#define SIGNAL_PENDING_CALLS(interp) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 1); \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
|
||||
_Py_atomic_store_relaxed(&interp->ceval.pending.calls_to_do, 1); \
|
||||
_Py_atomic_store_relaxed(&interp->ceval.eval_breaker, 1); \
|
||||
} while (0)
|
||||
|
||||
#define UNSIGNAL_PENDING_CALLS() \
|
||||
#define UNSIGNAL_PENDING_CALLS(interp) \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 0); \
|
||||
COMPUTE_EVAL_BREAKER(); \
|
||||
_Py_atomic_store_relaxed(&interp->ceval.pending.calls_to_do, 0); \
|
||||
COMPUTE_EVAL_BREAKER(interp); \
|
||||
} while (0)
|
||||
|
||||
#define SIGNAL_PENDING_SIGNALS() \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.signals_pending, 1); \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.interpreters.main->ceval.eval_breaker, 1); \
|
||||
} while (0)
|
||||
|
||||
#define UNSIGNAL_PENDING_SIGNALS() \
|
||||
do { \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.signals_pending, 0); \
|
||||
COMPUTE_EVAL_BREAKER(); \
|
||||
COMPUTE_EVAL_BREAKER(_PyRuntime.interpreters.main); \
|
||||
} while (0)
|
||||
|
||||
#define SIGNAL_ASYNC_EXC() \
|
||||
#define SIGNAL_ASYNC_EXC(interp) \
|
||||
do { \
|
||||
_PyRuntime.ceval.pending.async_exc = 1; \
|
||||
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
|
||||
interp->ceval.pending.async_exc = 1; \
|
||||
_Py_atomic_store_relaxed(&interp->ceval.eval_breaker, 1); \
|
||||
} while (0)
|
||||
|
||||
#define UNSIGNAL_ASYNC_EXC() \
|
||||
#define UNSIGNAL_ASYNC_EXC(interp) \
|
||||
do { \
|
||||
_PyRuntime.ceval.pending.async_exc = 0; \
|
||||
COMPUTE_EVAL_BREAKER(); \
|
||||
interp->ceval.pending.async_exc = 0; \
|
||||
COMPUTE_EVAL_BREAKER(interp); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
@ -174,9 +174,6 @@ PyEval_InitThreads(void)
|
|||
PyThread_init_thread();
|
||||
create_gil();
|
||||
take_gil(_PyThreadState_GET());
|
||||
_PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident();
|
||||
if (!_PyRuntime.ceval.pending.lock)
|
||||
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -243,9 +240,11 @@ PyEval_ReInitThreads(void)
|
|||
if (!gil_created())
|
||||
return;
|
||||
recreate_gil();
|
||||
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
|
||||
// This will be reset in make_pending_calls() below.
|
||||
current_tstate->interp->ceval.pending.lock = NULL;
|
||||
|
||||
take_gil(current_tstate);
|
||||
_PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident();
|
||||
_PyRuntime.main_thread = PyThread_get_thread_ident();
|
||||
|
||||
/* Destroy all threads except the current one */
|
||||
_PyThreadState_DeleteExcept(current_tstate);
|
||||
|
@ -255,9 +254,9 @@ PyEval_ReInitThreads(void)
|
|||
raised. */
|
||||
|
||||
void
|
||||
_PyEval_SignalAsyncExc(void)
|
||||
_PyEval_SignalAsyncExc(PyInterpreterState *interp)
|
||||
{
|
||||
SIGNAL_ASYNC_EXC();
|
||||
SIGNAL_ASYNC_EXC(interp);
|
||||
}
|
||||
|
||||
PyThreadState *
|
||||
|
@ -323,17 +322,58 @@ _PyEval_SignalReceived(void)
|
|||
SIGNAL_PENDING_SIGNALS();
|
||||
}
|
||||
|
||||
static int
|
||||
_add_pending_call(PyInterpreterState *interp, unsigned long thread_id, int (*func)(void *), void *arg)
|
||||
{
|
||||
int i = interp->ceval.pending.last;
|
||||
int j = (i + 1) % NPENDINGCALLS;
|
||||
if (j == interp->ceval.pending.first) {
|
||||
return -1; /* Queue full */
|
||||
}
|
||||
interp->ceval.pending.calls[i].thread_id = thread_id;
|
||||
interp->ceval.pending.calls[i].func = func;
|
||||
interp->ceval.pending.calls[i].arg = arg;
|
||||
interp->ceval.pending.last = j;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* pop one item off the queue while holding the lock */
|
||||
static void
|
||||
_pop_pending_call(PyInterpreterState *interp, int (**func)(void *), void **arg)
|
||||
{
|
||||
int i = interp->ceval.pending.first;
|
||||
if (i == interp->ceval.pending.last) {
|
||||
return; /* Queue empty */
|
||||
}
|
||||
|
||||
*func = interp->ceval.pending.calls[i].func;
|
||||
*arg = interp->ceval.pending.calls[i].arg;
|
||||
interp->ceval.pending.first = (i + 1) % NPENDINGCALLS;
|
||||
|
||||
unsigned long thread_id = interp->ceval.pending.calls[i].thread_id;
|
||||
if (thread_id && PyThread_get_thread_ident() != thread_id) {
|
||||
// Thread mismatch, so move it to the end of the list
|
||||
// and start over.
|
||||
_Py_AddPendingCall(interp, thread_id, *func, *arg);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
Py_AddPendingCall(int (*func)(void *), void *arg)
|
||||
{
|
||||
PyInterpreterState *interp = _PyRuntime.interpreters.main;
|
||||
return _Py_AddPendingCall(interp, _PyRuntime.main_thread, func, arg);
|
||||
}
|
||||
|
||||
/* This implementation is thread-safe. It allows
|
||||
scheduling to be made from any thread, and even from an executing
|
||||
callback.
|
||||
*/
|
||||
|
||||
int
|
||||
Py_AddPendingCall(int (*func)(void *), void *arg)
|
||||
_Py_AddPendingCall(PyInterpreterState *interp, unsigned long thread_id, int (*func)(void *), void *arg)
|
||||
{
|
||||
int i, j, result=0;
|
||||
PyThread_type_lock lock = _PyRuntime.ceval.pending.lock;
|
||||
|
||||
/* try a few times for the lock. Since this mechanism is used
|
||||
* for signal handling (on the main thread), there is a (slim)
|
||||
* chance that a signal is delivered on the same thread while we
|
||||
|
@ -345,7 +385,9 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
|
|||
* We also check for lock being NULL, in the unlikely case that
|
||||
* this function is called before any bytecode evaluation takes place.
|
||||
*/
|
||||
PyThread_type_lock lock = interp->ceval.pending.lock;
|
||||
if (lock != NULL) {
|
||||
int i;
|
||||
for (i = 0; i<100; i++) {
|
||||
if (PyThread_acquire_lock(lock, NOWAIT_LOCK))
|
||||
break;
|
||||
|
@ -354,17 +396,21 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
|
|||
return -1;
|
||||
}
|
||||
|
||||
i = _PyRuntime.ceval.pending.last;
|
||||
j = (i + 1) % NPENDINGCALLS;
|
||||
if (j == _PyRuntime.ceval.pending.first) {
|
||||
result = -1; /* Queue full */
|
||||
} else {
|
||||
_PyRuntime.ceval.pending.calls[i].func = func;
|
||||
_PyRuntime.ceval.pending.calls[i].arg = arg;
|
||||
_PyRuntime.ceval.pending.last = j;
|
||||
int result = -1;
|
||||
if (interp->finalizing) {
|
||||
PyObject *exc, *val, *tb;
|
||||
PyErr_Fetch(&exc, &val, &tb);
|
||||
PyErr_SetString(PyExc_SystemError, "Py_AddPendingCall: cannot add pending calls (interpreter shutting down)");
|
||||
PyErr_Print();
|
||||
PyErr_Restore(exc, val, tb);
|
||||
goto done;
|
||||
}
|
||||
|
||||
result = _add_pending_call(interp, thread_id, func, arg);
|
||||
/* signal main loop */
|
||||
SIGNAL_PENDING_CALLS();
|
||||
SIGNAL_PENDING_CALLS(interp);
|
||||
|
||||
done:
|
||||
if (lock != NULL)
|
||||
PyThread_release_lock(lock);
|
||||
return result;
|
||||
|
@ -374,9 +420,7 @@ static int
|
|||
handle_signals(void)
|
||||
{
|
||||
/* Only handle signals on main thread. */
|
||||
if (_PyRuntime.ceval.pending.main_thread &&
|
||||
PyThread_get_thread_ident() != _PyRuntime.ceval.pending.main_thread)
|
||||
{
|
||||
if (PyThread_get_thread_ident() != _PyRuntime.main_thread) {
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
|
@ -396,17 +440,10 @@ handle_signals(void)
|
|||
}
|
||||
|
||||
static int
|
||||
make_pending_calls(void)
|
||||
make_pending_calls(PyInterpreterState *interp)
|
||||
{
|
||||
static int busy = 0;
|
||||
|
||||
/* only service pending calls on main thread */
|
||||
if (_PyRuntime.ceval.pending.main_thread &&
|
||||
PyThread_get_thread_ident() != _PyRuntime.ceval.pending.main_thread)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* don't perform recursive pending calls */
|
||||
if (busy) {
|
||||
return 0;
|
||||
|
@ -414,13 +451,13 @@ make_pending_calls(void)
|
|||
busy = 1;
|
||||
/* unsignal before starting to call callbacks, so that any callback
|
||||
added in-between re-signals */
|
||||
UNSIGNAL_PENDING_CALLS();
|
||||
UNSIGNAL_PENDING_CALLS(interp);
|
||||
int res = 0;
|
||||
|
||||
if (!_PyRuntime.ceval.pending.lock) {
|
||||
if (!interp->ceval.pending.lock) {
|
||||
/* initial allocation of the lock */
|
||||
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
|
||||
if (_PyRuntime.ceval.pending.lock == NULL) {
|
||||
interp->ceval.pending.lock = PyThread_allocate_lock();
|
||||
if (interp->ceval.pending.lock == NULL) {
|
||||
res = -1;
|
||||
goto error;
|
||||
}
|
||||
|
@ -428,24 +465,18 @@ make_pending_calls(void)
|
|||
|
||||
/* perform a bounded number of calls, in case of recursion */
|
||||
for (int i=0; i<NPENDINGCALLS; i++) {
|
||||
int j;
|
||||
int (*func)(void *);
|
||||
int (*func)(void *) = NULL;
|
||||
void *arg = NULL;
|
||||
|
||||
/* pop one item off the queue while holding the lock */
|
||||
PyThread_acquire_lock(_PyRuntime.ceval.pending.lock, WAIT_LOCK);
|
||||
j = _PyRuntime.ceval.pending.first;
|
||||
if (j == _PyRuntime.ceval.pending.last) {
|
||||
func = NULL; /* Queue empty */
|
||||
} else {
|
||||
func = _PyRuntime.ceval.pending.calls[j].func;
|
||||
arg = _PyRuntime.ceval.pending.calls[j].arg;
|
||||
_PyRuntime.ceval.pending.first = (j + 1) % NPENDINGCALLS;
|
||||
}
|
||||
PyThread_release_lock(_PyRuntime.ceval.pending.lock);
|
||||
PyThread_acquire_lock(interp->ceval.pending.lock, WAIT_LOCK);
|
||||
_pop_pending_call(interp, &func, &arg);
|
||||
PyThread_release_lock(interp->ceval.pending.lock);
|
||||
|
||||
/* having released the lock, perform the callback */
|
||||
if (func == NULL)
|
||||
if (func == NULL) {
|
||||
break;
|
||||
}
|
||||
res = func(arg);
|
||||
if (res) {
|
||||
goto error;
|
||||
|
@ -457,10 +488,18 @@ make_pending_calls(void)
|
|||
|
||||
error:
|
||||
busy = 0;
|
||||
SIGNAL_PENDING_CALLS();
|
||||
SIGNAL_PENDING_CALLS(interp); /* We're not done yet */
|
||||
return res;
|
||||
}
|
||||
|
||||
int
|
||||
_Py_MakePendingCalls(PyInterpreterState *interp)
|
||||
{
|
||||
assert(PyGILState_Check());
|
||||
|
||||
return make_pending_calls(interp);
|
||||
}
|
||||
|
||||
/* Py_MakePendingCalls() is a simple wrapper for the sake
|
||||
of backward-compatibility. */
|
||||
int
|
||||
|
@ -475,12 +514,8 @@ Py_MakePendingCalls(void)
|
|||
return res;
|
||||
}
|
||||
|
||||
res = make_pending_calls();
|
||||
if (res != 0) {
|
||||
return res;
|
||||
}
|
||||
|
||||
return 0;
|
||||
PyInterpreterState *interp = _PyRuntime.interpreters.main;
|
||||
return make_pending_calls(interp);
|
||||
}
|
||||
|
||||
/* The interpreter's recursion limit */
|
||||
|
@ -687,7 +722,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
|
|||
|
||||
#define DISPATCH() \
|
||||
{ \
|
||||
if (!_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) { \
|
||||
if (!_Py_atomic_load_relaxed(&tstate->interp->ceval.eval_breaker)) { \
|
||||
FAST_DISPATCH(); \
|
||||
} \
|
||||
continue; \
|
||||
|
@ -989,7 +1024,7 @@ main_loop:
|
|||
async I/O handler); see Py_AddPendingCall() and
|
||||
Py_MakePendingCalls() above. */
|
||||
|
||||
if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) {
|
||||
if (_Py_atomic_load_relaxed(&(tstate->interp->ceval.eval_breaker))) {
|
||||
opcode = _Py_OPCODE(*next_instr);
|
||||
if (opcode == SETUP_FINALLY ||
|
||||
opcode == SETUP_WITH ||
|
||||
|
@ -1022,9 +1057,9 @@ main_loop:
|
|||
}
|
||||
}
|
||||
if (_Py_atomic_load_relaxed(
|
||||
&_PyRuntime.ceval.pending.calls_to_do))
|
||||
&(tstate->interp->ceval.pending.calls_to_do)))
|
||||
{
|
||||
if (make_pending_calls() != 0) {
|
||||
if (_Py_MakePendingCalls(tstate->interp) != 0) {
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
@ -1056,7 +1091,7 @@ main_loop:
|
|||
if (tstate->async_exc != NULL) {
|
||||
PyObject *exc = tstate->async_exc;
|
||||
tstate->async_exc = NULL;
|
||||
UNSIGNAL_ASYNC_EXC();
|
||||
UNSIGNAL_ASYNC_EXC(tstate->interp);
|
||||
PyErr_SetNone(exc);
|
||||
Py_DECREF(exc);
|
||||
goto error;
|
||||
|
|
|
@ -176,7 +176,7 @@ static void drop_gil(PyThreadState *tstate)
|
|||
&_PyRuntime.ceval.gil.last_holder)
|
||||
) == tstate)
|
||||
{
|
||||
RESET_GIL_DROP_REQUEST();
|
||||
RESET_GIL_DROP_REQUEST(tstate->interp);
|
||||
/* NOTE: if COND_WAIT does not atomically start waiting when
|
||||
releasing the mutex, another thread can run through, take
|
||||
the GIL and drop it again, and reset the condition
|
||||
|
@ -213,7 +213,7 @@ static void take_gil(PyThreadState *tstate)
|
|||
if (timed_out &&
|
||||
_Py_atomic_load_relaxed(&_PyRuntime.ceval.gil.locked) &&
|
||||
_PyRuntime.ceval.gil.switch_number == saved_switchnum) {
|
||||
SET_GIL_DROP_REQUEST();
|
||||
SET_GIL_DROP_REQUEST(tstate->interp);
|
||||
}
|
||||
}
|
||||
_ready:
|
||||
|
@ -239,10 +239,10 @@ _ready:
|
|||
MUTEX_UNLOCK(_PyRuntime.ceval.gil.switch_mutex);
|
||||
#endif
|
||||
if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)) {
|
||||
RESET_GIL_DROP_REQUEST();
|
||||
RESET_GIL_DROP_REQUEST(tstate->interp);
|
||||
}
|
||||
if (tstate->async_exc != NULL) {
|
||||
_PyEval_SignalAsyncExc();
|
||||
_PyEval_SignalAsyncExc(tstate->interp);
|
||||
}
|
||||
|
||||
MUTEX_UNLOCK(_PyRuntime.ceval.gil.mutex);
|
||||
|
|
|
@ -1459,8 +1459,32 @@ Py_EndInterpreter(PyThreadState *tstate)
|
|||
if (tstate->frame != NULL)
|
||||
Py_FatalError("Py_EndInterpreter: thread still has a frame");
|
||||
|
||||
// Mark as finalizing.
|
||||
if (interp->ceval.pending.lock != NULL) {
|
||||
PyThread_acquire_lock(interp->ceval.pending.lock, 1);
|
||||
}
|
||||
interp->finalizing = 1;
|
||||
if (interp->ceval.pending.lock != NULL) {
|
||||
PyThread_release_lock(interp->ceval.pending.lock);
|
||||
}
|
||||
|
||||
// Wrap up existing threads.
|
||||
wait_for_thread_shutdown();
|
||||
|
||||
// Make any pending calls.
|
||||
if (_Py_atomic_load_relaxed(
|
||||
&(interp->ceval.pending.calls_to_do)))
|
||||
{
|
||||
// XXX Ensure that the interpreter is running in the current thread?
|
||||
if (_Py_MakePendingCalls(interp) < 0) {
|
||||
PyObject *exc, *val, *tb;
|
||||
PyErr_Fetch(&exc, &val, &tb);
|
||||
PyErr_BadInternalCall();
|
||||
_PyErr_ChainExceptions(exc, val, tb);
|
||||
PyErr_Print();
|
||||
}
|
||||
}
|
||||
|
||||
call_py_exitfuncs(interp);
|
||||
|
||||
if (tstate != interp->tstate_head || tstate->next != NULL)
|
||||
|
|
|
@ -132,28 +132,19 @@ PyInterpreterState_New(void)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
memset(interp, 0, sizeof(*interp));
|
||||
interp->id_refcount = -1;
|
||||
interp->id_mutex = NULL;
|
||||
interp->modules = NULL;
|
||||
interp->modules_by_index = NULL;
|
||||
interp->sysdict = NULL;
|
||||
interp->builtins = NULL;
|
||||
interp->builtins_copy = NULL;
|
||||
interp->tstate_head = NULL;
|
||||
interp->check_interval = 100;
|
||||
interp->num_threads = 0;
|
||||
interp->pythread_stacksize = 0;
|
||||
interp->codec_search_path = NULL;
|
||||
interp->codec_search_cache = NULL;
|
||||
interp->codec_error_registry = NULL;
|
||||
interp->codecs_initialized = 0;
|
||||
interp->fscodec_initialized = 0;
|
||||
|
||||
interp->ceval.pending.lock = PyThread_allocate_lock();
|
||||
if (interp->ceval.pending.lock == NULL) {
|
||||
PyErr_SetString(PyExc_RuntimeError,
|
||||
"failed to create interpreter ceval pending mutex");
|
||||
return NULL;
|
||||
}
|
||||
interp->core_config = _PyCoreConfig_INIT;
|
||||
interp->config = _PyMainInterpreterConfig_INIT;
|
||||
interp->importlib = NULL;
|
||||
interp->import_func = NULL;
|
||||
interp->eval_frame = _PyEval_EvalFrameDefault;
|
||||
interp->co_extra_user_count = 0;
|
||||
#ifdef HAVE_DLOPEN
|
||||
#if HAVE_DECL_RTLD_NOW
|
||||
interp->dlopenflags = RTLD_NOW;
|
||||
|
@ -161,13 +152,10 @@ PyInterpreterState_New(void)
|
|||
interp->dlopenflags = RTLD_LAZY;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef HAVE_FORK
|
||||
interp->before_forkers = NULL;
|
||||
interp->after_forkers_parent = NULL;
|
||||
interp->after_forkers_child = NULL;
|
||||
#endif
|
||||
interp->pyexitfunc = NULL;
|
||||
interp->pyexitmodule = NULL;
|
||||
|
||||
if (_PyRuntime.main_thread == 0) {
|
||||
_PyRuntime.main_thread = PyThread_get_thread_ident();
|
||||
}
|
||||
|
||||
HEAD_LOCK();
|
||||
if (_PyRuntime.interpreters.next_id < 0) {
|
||||
|
@ -222,6 +210,9 @@ PyInterpreterState_Clear(PyInterpreterState *interp)
|
|||
Py_CLEAR(interp->after_forkers_parent);
|
||||
Py_CLEAR(interp->after_forkers_child);
|
||||
#endif
|
||||
// XXX Once we have one allocator per interpreter (i.e.
|
||||
// per-interpreter GC) we must ensure that all of the interpreter's
|
||||
// objects have been cleaned up at the point.
|
||||
}
|
||||
|
||||
|
||||
|
@ -262,6 +253,9 @@ PyInterpreterState_Delete(PyInterpreterState *interp)
|
|||
if (interp->id_mutex != NULL) {
|
||||
PyThread_free_lock(interp->id_mutex);
|
||||
}
|
||||
if (interp->ceval.pending.lock != NULL) {
|
||||
PyThread_free_lock(interp->ceval.pending.lock);
|
||||
}
|
||||
PyMem_RawFree(interp);
|
||||
}
|
||||
|
||||
|
@ -871,7 +865,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
|
|||
p->async_exc = exc;
|
||||
HEAD_UNLOCK();
|
||||
Py_XDECREF(old_exc);
|
||||
_PyEval_SignalAsyncExc();
|
||||
_PyEval_SignalAsyncExc(interp);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -1338,6 +1332,7 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
|
|||
}
|
||||
|
||||
// "Release" the data and/or the object.
|
||||
// XXX Use _Py_AddPendingCall().
|
||||
_call_in_interpreter(interp, _release_xidata, data);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue