GH-108866: Guarantee forward progress in executors. (GH-113006)

This commit is contained in:
Mark Shannon 2023-12-12 19:02:24 +00:00 committed by GitHub
parent 9898e61041
commit 956023826a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 19 additions and 21 deletions

View File

@ -32,7 +32,7 @@ typedef struct {
typedef struct _PyExecutorObject {
PyObject_VAR_HEAD
/* WARNING: execute consumes a reference to self. This is necessary to allow executors to tail call into each other. */
struct _PyInterpreterFrame *(*execute)(struct _PyExecutorObject *self, struct _PyInterpreterFrame *frame, PyObject **stack_pointer);
_Py_CODEUNIT *(*execute)(struct _PyExecutorObject *self, struct _PyInterpreterFrame *frame, PyObject **stack_pointer);
_PyVMData vm_data; /* Used by the VM, but opaque to the optimizer */
/* Data needed by the executor goes here, but is opaque to the VM */
} _PyExecutorObject;

View File

@ -1568,7 +1568,7 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = {
[JUMP_BACKWARD] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
[JUMP] = { true, 0, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
[JUMP_NO_INTERRUPT] = { true, 0, HAS_ARG_FLAG | HAS_JUMP_FLAG },
[ENTER_EXECUTOR] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG },
[ENTER_EXECUTOR] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
[_POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG },
[_POP_JUMP_IF_TRUE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG },
[_IS_NONE] = { true, INSTR_FMT_IX, 0 },

View File

@ -24,7 +24,7 @@ typedef struct {
_PyUOpInstruction trace[1];
} _PyUOpExecutorObject;
_PyInterpreterFrame *_PyUOpExecute(
_Py_CODEUNIT *_PyUOpExecute(
_PyExecutorObject *executor,
_PyInterpreterFrame *frame,
PyObject **stack_pointer);

View File

@ -0,0 +1,3 @@
Change the API and contract of ``_PyExecutorObject`` to return the
next_instr pointer, instead of the frame, and to always execute at least one
instruction.

View File

@ -2352,20 +2352,17 @@ dummy_func(
PyCodeObject *code = _PyFrame_GetCode(frame);
_PyExecutorObject *executor = (_PyExecutorObject *)code->co_executors->executors[oparg&255];
int original_oparg = executor->vm_data.oparg | (oparg & 0xfffff00);
JUMPBY(1-original_oparg);
frame->instr_ptr = next_instr;
Py_INCREF(executor);
if (executor->execute == _PyUOpExecute) {
current_executor = (_PyUOpExecutorObject *)executor;
GOTO_TIER_TWO();
}
frame = executor->execute(executor, frame, stack_pointer);
if (frame == NULL) {
frame = tstate->current_frame;
next_instr = executor->execute(executor, frame, stack_pointer);
frame = tstate->current_frame;
if (next_instr == NULL) {
goto resume_with_error;
}
goto enter_tier_one;
stack_pointer = _PyFrame_GetStackPointer(frame);
}
replaced op(_POP_JUMP_IF_FALSE, (unused/1, cond -- )) {

View File

@ -2328,20 +2328,18 @@
CHECK_EVAL_BREAKER();
PyCodeObject *code = _PyFrame_GetCode(frame);
_PyExecutorObject *executor = (_PyExecutorObject *)code->co_executors->executors[oparg&255];
int original_oparg = executor->vm_data.oparg | (oparg & 0xfffff00);
JUMPBY(1-original_oparg);
frame->instr_ptr = next_instr;
Py_INCREF(executor);
if (executor->execute == _PyUOpExecute) {
current_executor = (_PyUOpExecutorObject *)executor;
GOTO_TIER_TWO();
}
frame = executor->execute(executor, frame, stack_pointer);
if (frame == NULL) {
frame = tstate->current_frame;
next_instr = executor->execute(executor, frame, stack_pointer);
frame = tstate->current_frame;
if (next_instr == NULL) {
goto resume_with_error;
}
goto enter_tier_one;
stack_pointer = _PyFrame_GetStackPointer(frame);
DISPATCH();
}
TARGET(EXIT_INIT_CHECK) {

View File

@ -167,6 +167,7 @@ _PyOptimizer_BackEdge(_PyInterpreterFrame *frame, _Py_CODEUNIT *src, _Py_CODEUNI
}
_PyOptimizerObject *opt = interp->optimizer;
_PyExecutorObject *executor = NULL;
/* Start optimizing at the destination to guarantee forward progress */
int err = opt->optimize(opt, code, dest, &executor, (int)(stack_pointer - _PyFrame_Stackbase(frame)));
if (err <= 0) {
assert(executor == NULL);
@ -247,14 +248,13 @@ PyTypeObject _PyCounterExecutor_Type = {
.tp_methods = executor_methods,
};
static _PyInterpreterFrame *
static _Py_CODEUNIT *
counter_execute(_PyExecutorObject *self, _PyInterpreterFrame *frame, PyObject **stack_pointer)
{
((_PyCounterExecutorObject *)self)->optimizer->count++;
_PyFrame_SetStackPointer(frame, stack_pointer);
frame->instr_ptr = ((_PyCounterExecutorObject *)self)->next_instr;
Py_DECREF(self);
return frame;
return ((_PyCounterExecutorObject *)self)->next_instr;
}
static int
@ -891,7 +891,7 @@ uop_optimize(
/* Dummy execute() function for UOp Executor.
* The actual implementation is inlined in ceval.c,
* in _PyEval_EvalFrameDefault(). */
_PyInterpreterFrame *
_Py_CODEUNIT *
_PyUOpExecute(_PyExecutorObject *executor, _PyInterpreterFrame *frame, PyObject **stack_pointer)
{
Py_FatalError("Tier 2 is now inlined into Tier 1");