mirror of https://github.com/python/cpython
gh-115778: Add `tierN` annotation for instruction definitions (#115815)
This replaces the old `TIER_{ONE,TWO}_ONLY` macros. Note that `specialized` implies `tier1`. Co-authored-by: Alex Waygood <Alex.Waygood@Gmail.com>
This commit is contained in:
parent
59057ce55a
commit
e4561e0501
|
@ -0,0 +1 @@
|
|||
Add ``tierN`` annotation for instruction definition in interpreter DSL.
|
|
@ -141,8 +141,7 @@ dummy_func(
|
|||
RESUME_CHECK,
|
||||
};
|
||||
|
||||
inst(RESUME, (--)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(RESUME, (--)) {
|
||||
assert(frame == tstate->current_frame);
|
||||
uintptr_t global_version =
|
||||
_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) &
|
||||
|
@ -268,8 +267,7 @@ dummy_func(
|
|||
|
||||
macro(END_FOR) = POP_TOP;
|
||||
|
||||
inst(INSTRUMENTED_END_FOR, (receiver, value -- receiver)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(INSTRUMENTED_END_FOR, (receiver, value -- receiver)) {
|
||||
/* Need to create a fake StopIteration error here,
|
||||
* to conform to PEP 380 */
|
||||
if (PyGen_Check(receiver)) {
|
||||
|
@ -286,8 +284,7 @@ dummy_func(
|
|||
Py_DECREF(receiver);
|
||||
}
|
||||
|
||||
inst(INSTRUMENTED_END_SEND, (receiver, value -- value)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(INSTRUMENTED_END_SEND, (receiver, value -- value)) {
|
||||
if (PyGen_Check(receiver) || PyCoro_CheckExact(receiver)) {
|
||||
PyErr_SetObject(PyExc_StopIteration, value);
|
||||
if (monitor_stop_iteration(tstate, frame, this_instr)) {
|
||||
|
@ -319,7 +316,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_TO_BOOL, (counter/1, value -- value)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -506,8 +502,7 @@ dummy_func(
|
|||
// So the inputs are the same as for all BINARY_OP
|
||||
// specializations, but there is no output.
|
||||
// At the end we just skip over the STORE_FAST.
|
||||
op(_BINARY_OP_INPLACE_ADD_UNICODE, (left, right --)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 op(_BINARY_OP_INPLACE_ADD_UNICODE, (left, right --)) {
|
||||
assert(next_instr->op.code == STORE_FAST);
|
||||
PyObject **target_local = &GETLOCAL(next_instr->op.arg);
|
||||
DEOPT_IF(*target_local != left);
|
||||
|
@ -545,7 +540,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_BINARY_SUBSCR, (counter/1, container, sub -- container, sub)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -693,7 +687,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_STORE_SUBSCR, (counter/1, container, sub -- container, sub)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -762,8 +755,7 @@ dummy_func(
|
|||
ERROR_IF(res == NULL, error);
|
||||
}
|
||||
|
||||
inst(RAISE_VARARGS, (args[oparg] -- )) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(RAISE_VARARGS, (args[oparg] -- )) {
|
||||
PyObject *cause = NULL, *exc = NULL;
|
||||
switch (oparg) {
|
||||
case 2:
|
||||
|
@ -787,8 +779,7 @@ dummy_func(
|
|||
ERROR_IF(true, error);
|
||||
}
|
||||
|
||||
inst(INTERPRETER_EXIT, (retval --)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(INTERPRETER_EXIT, (retval --)) {
|
||||
assert(frame == &entry_frame);
|
||||
assert(_PyFrame_IsIncomplete(frame));
|
||||
/* Restore previous frame and return. */
|
||||
|
@ -980,7 +971,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_SEND, (counter/1, receiver, unused -- receiver, unused)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -1075,8 +1065,7 @@ dummy_func(
|
|||
goto resume_frame;
|
||||
}
|
||||
|
||||
inst(YIELD_VALUE, (retval -- unused)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(YIELD_VALUE, (retval -- unused)) {
|
||||
// NOTE: It's important that YIELD_VALUE never raises an exception!
|
||||
// The compiler treats any exception raised here as a failed close()
|
||||
// or throw() call.
|
||||
|
@ -1105,8 +1094,7 @@ dummy_func(
|
|||
Py_XSETREF(exc_info->exc_value, exc_value == Py_None ? NULL : exc_value);
|
||||
}
|
||||
|
||||
inst(RERAISE, (values[oparg], exc -- values[oparg])) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(RERAISE, (values[oparg], exc -- values[oparg])) {
|
||||
assert(oparg >= 0 && oparg <= 2);
|
||||
if (oparg) {
|
||||
PyObject *lasti = values[0];
|
||||
|
@ -1127,8 +1115,7 @@ dummy_func(
|
|||
goto exception_unwind;
|
||||
}
|
||||
|
||||
inst(END_ASYNC_FOR, (awaitable, exc -- )) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(END_ASYNC_FOR, (awaitable, exc -- )) {
|
||||
assert(exc && PyExceptionInstance_Check(exc));
|
||||
if (PyErr_GivenExceptionMatches(exc, PyExc_StopAsyncIteration)) {
|
||||
DECREF_INPUTS();
|
||||
|
@ -1141,8 +1128,7 @@ dummy_func(
|
|||
}
|
||||
}
|
||||
|
||||
inst(CLEANUP_THROW, (sub_iter, last_sent_val, exc_value -- none, value)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(CLEANUP_THROW, (sub_iter, last_sent_val, exc_value -- none, value)) {
|
||||
assert(throwflag);
|
||||
assert(exc_value && PyExceptionInstance_Check(exc_value));
|
||||
if (PyErr_GivenExceptionMatches(exc_value, PyExc_StopIteration)) {
|
||||
|
@ -1214,7 +1200,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_UNPACK_SEQUENCE, (counter/1, seq -- seq)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -1284,7 +1269,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_STORE_ATTR, (counter/1, owner -- owner)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
|
||||
|
@ -1403,7 +1387,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_LOAD_GLOBAL, (counter/1 -- )) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
|
||||
|
@ -1731,7 +1714,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_LOAD_SUPER_ATTR, (counter/1, global_super, class, unused -- global_super, class, unused)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
int load_method = oparg & 1;
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
|
@ -1744,8 +1726,7 @@ dummy_func(
|
|||
#endif /* ENABLE_SPECIALIZATION */
|
||||
}
|
||||
|
||||
op(_LOAD_SUPER_ATTR, (global_super, class, self -- attr, null if (oparg & 1))) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 op(_LOAD_SUPER_ATTR, (global_super, class, self -- attr, null if (oparg & 1))) {
|
||||
if (opcode == INSTRUMENTED_LOAD_SUPER_ATTR) {
|
||||
PyObject *arg = oparg & 2 ? class : &_PyInstrumentation_MISSING;
|
||||
int err = _Py_call_instrumentation_2args(
|
||||
|
@ -1847,7 +1828,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_LOAD_ATTR, (counter/1, owner -- owner)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
|
||||
|
@ -2172,7 +2152,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_COMPARE_OP, (counter/1, left, right -- left, right)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -2297,28 +2276,24 @@ dummy_func(
|
|||
b = res ? Py_True : Py_False;
|
||||
}
|
||||
|
||||
inst(IMPORT_NAME, (level, fromlist -- res)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(IMPORT_NAME, (level, fromlist -- res)) {
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
|
||||
res = import_name(tstate, frame, name, fromlist, level);
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(res == NULL, error);
|
||||
}
|
||||
|
||||
inst(IMPORT_FROM, (from -- from, res)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(IMPORT_FROM, (from -- from, res)) {
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
|
||||
res = import_from(tstate, from, name);
|
||||
ERROR_IF(res == NULL, error);
|
||||
}
|
||||
|
||||
inst(JUMP_FORWARD, (--)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(JUMP_FORWARD, (--)) {
|
||||
JUMPBY(oparg);
|
||||
}
|
||||
|
||||
inst(JUMP_BACKWARD, (unused/1 --)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(JUMP_BACKWARD, (unused/1 --)) {
|
||||
CHECK_EVAL_BREAKER();
|
||||
assert(oparg <= INSTR_OFFSET());
|
||||
JUMPBY(-oparg);
|
||||
|
@ -2373,8 +2348,7 @@ dummy_func(
|
|||
JUMP_BACKWARD_NO_INTERRUPT,
|
||||
};
|
||||
|
||||
inst(ENTER_EXECUTOR, (--)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(ENTER_EXECUTOR, (--)) {
|
||||
CHECK_EVAL_BREAKER();
|
||||
PyCodeObject *code = _PyFrame_GetCode(frame);
|
||||
_PyExecutorObject *executor = code->co_executors->executors[oparg & 255];
|
||||
|
@ -2423,8 +2397,7 @@ dummy_func(
|
|||
|
||||
macro(POP_JUMP_IF_NOT_NONE) = unused/1 + _IS_NONE + _POP_JUMP_IF_FALSE;
|
||||
|
||||
inst(JUMP_BACKWARD_NO_INTERRUPT, (--)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(JUMP_BACKWARD_NO_INTERRUPT, (--)) {
|
||||
/* This bytecode is used in the `yield from` or `await` loop.
|
||||
* If there is an interrupt, we want it handled in the innermost
|
||||
* generator or coroutine, so we deliberately do not check it here.
|
||||
|
@ -2520,7 +2493,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_FOR_ITER, (counter/1, iter -- iter)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -3018,7 +2990,6 @@ dummy_func(
|
|||
};
|
||||
|
||||
specializing op(_SPECIALIZE_CALL, (counter/1, callable, self_or_null, args[oparg] -- callable, self_or_null, args[oparg])) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -3480,8 +3451,7 @@ dummy_func(
|
|||
}
|
||||
|
||||
// This is secretly a super-instruction
|
||||
inst(CALL_LIST_APPEND, (unused/1, unused/2, callable, self, args[oparg] -- unused)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(CALL_LIST_APPEND, (unused/1, unused/2, callable, self, args[oparg] -- unused)) {
|
||||
assert(oparg == 1);
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
DEOPT_IF(callable != interp->callable_cache.list_append);
|
||||
|
@ -3819,8 +3789,7 @@ dummy_func(
|
|||
}
|
||||
}
|
||||
|
||||
inst(RETURN_GENERATOR, (--)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(RETURN_GENERATOR, (--)) {
|
||||
assert(PyFunction_Check(frame->f_funcobj));
|
||||
PyFunctionObject *func = (PyFunctionObject *)frame->f_funcobj;
|
||||
PyGenObject *gen = (PyGenObject *)_Py_MakeCoro(func);
|
||||
|
@ -3886,7 +3855,6 @@ dummy_func(
|
|||
}
|
||||
|
||||
specializing op(_SPECIALIZE_BINARY_OP, (counter/1, lhs, rhs -- lhs, rhs)) {
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -3992,8 +3960,7 @@ dummy_func(
|
|||
INSTRUMENTED_JUMP(this_instr, next_instr + offset, PY_MONITORING_EVENT_BRANCH);
|
||||
}
|
||||
|
||||
inst(EXTENDED_ARG, ( -- )) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(EXTENDED_ARG, ( -- )) {
|
||||
assert(oparg);
|
||||
opcode = next_instr->op.code;
|
||||
oparg = oparg << 8 | next_instr->op.arg;
|
||||
|
@ -4001,14 +3968,12 @@ dummy_func(
|
|||
DISPATCH_GOTO();
|
||||
}
|
||||
|
||||
inst(CACHE, (--)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(CACHE, (--)) {
|
||||
assert(0 && "Executing a cache.");
|
||||
Py_FatalError("Executing a cache.");
|
||||
}
|
||||
|
||||
inst(RESERVED, (--)) {
|
||||
TIER_ONE_ONLY
|
||||
tier1 inst(RESERVED, (--)) {
|
||||
assert(0 && "Executing RESERVED instruction.");
|
||||
Py_FatalError("Executing RESERVED instruction.");
|
||||
}
|
||||
|
@ -4048,8 +4013,7 @@ dummy_func(
|
|||
CHECK_EVAL_BREAKER();
|
||||
}
|
||||
|
||||
op(_SET_IP, (instr_ptr/4 --)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 op(_SET_IP, (instr_ptr/4 --)) {
|
||||
frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr;
|
||||
}
|
||||
|
||||
|
@ -4062,45 +4026,37 @@ dummy_func(
|
|||
#endif
|
||||
}
|
||||
|
||||
op(_EXIT_TRACE, (--)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 op(_EXIT_TRACE, (--)) {
|
||||
EXIT_IF(1);
|
||||
}
|
||||
|
||||
op(_CHECK_VALIDITY, (--)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 op(_CHECK_VALIDITY, (--)) {
|
||||
DEOPT_IF(!current_executor->vm_data.valid);
|
||||
}
|
||||
|
||||
pure op(_LOAD_CONST_INLINE, (ptr/4 -- value)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 pure op(_LOAD_CONST_INLINE, (ptr/4 -- value)) {
|
||||
value = Py_NewRef(ptr);
|
||||
}
|
||||
|
||||
pure op(_LOAD_CONST_INLINE_BORROW, (ptr/4 -- value)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 pure op(_LOAD_CONST_INLINE_BORROW, (ptr/4 -- value)) {
|
||||
value = ptr;
|
||||
}
|
||||
|
||||
pure op(_LOAD_CONST_INLINE_WITH_NULL, (ptr/4 -- value, null)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 pure op(_LOAD_CONST_INLINE_WITH_NULL, (ptr/4 -- value, null)) {
|
||||
value = Py_NewRef(ptr);
|
||||
null = NULL;
|
||||
}
|
||||
|
||||
pure op(_LOAD_CONST_INLINE_BORROW_WITH_NULL, (ptr/4 -- value, null)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 pure op(_LOAD_CONST_INLINE_BORROW_WITH_NULL, (ptr/4 -- value, null)) {
|
||||
value = ptr;
|
||||
null = NULL;
|
||||
}
|
||||
|
||||
op(_CHECK_GLOBALS, (dict/4 -- )) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 op(_CHECK_GLOBALS, (dict/4 -- )) {
|
||||
DEOPT_IF(GLOBALS() != dict);
|
||||
}
|
||||
|
||||
op(_CHECK_BUILTINS, (dict/4 -- )) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 op(_CHECK_BUILTINS, (dict/4 -- )) {
|
||||
DEOPT_IF(BUILTINS() != dict);
|
||||
}
|
||||
|
||||
|
@ -4113,8 +4069,7 @@ dummy_func(
|
|||
/* Only used for handling cold side exits, should never appear in
|
||||
* a normal trace or as part of an instruction.
|
||||
*/
|
||||
op(_COLD_EXIT, (--)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 op(_COLD_EXIT, (--)) {
|
||||
_PyExecutorObject *previous = (_PyExecutorObject *)tstate->previous_executor;
|
||||
_PyExitData *exit = &previous->exits[oparg];
|
||||
exit->temperature++;
|
||||
|
@ -4147,8 +4102,7 @@ dummy_func(
|
|||
GOTO_TIER_TWO(executor);
|
||||
}
|
||||
|
||||
op(_START_EXECUTOR, (executor/4 --)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 op(_START_EXECUTOR, (executor/4 --)) {
|
||||
Py_DECREF(tstate->previous_executor);
|
||||
tstate->previous_executor = NULL;
|
||||
#ifndef _Py_JIT
|
||||
|
@ -4156,14 +4110,12 @@ dummy_func(
|
|||
#endif
|
||||
}
|
||||
|
||||
op(_FATAL_ERROR, (--)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 op(_FATAL_ERROR, (--)) {
|
||||
assert(0);
|
||||
Py_FatalError("Fatal error uop executed.");
|
||||
}
|
||||
|
||||
op(_CHECK_VALIDITY_AND_SET_IP, (instr_ptr/4 --)) {
|
||||
TIER_TWO_ONLY
|
||||
tier2 op(_CHECK_VALIDITY_AND_SET_IP, (instr_ptr/4 --)) {
|
||||
DEOPT_IF(!current_executor->vm_data.valid);
|
||||
frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr;
|
||||
}
|
||||
|
|
|
@ -372,12 +372,6 @@ static inline void _Py_LeaveRecursiveCallPy(PyThreadState *tstate) {
|
|||
tstate->py_recursion_remaining++;
|
||||
}
|
||||
|
||||
/* Marker to specify tier 1 only instructions */
|
||||
#define TIER_ONE_ONLY
|
||||
|
||||
/* Marker to specify tier 2 only instructions */
|
||||
#define TIER_TWO_ONLY
|
||||
|
||||
/* Implementation of "macros" that modify the instruction pointer,
|
||||
* stack pointer, or frame pointer.
|
||||
* These need to treated differently by tier 1 and 2.
|
||||
|
|
|
@ -3716,7 +3716,6 @@
|
|||
|
||||
case _SET_IP: {
|
||||
PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND();
|
||||
TIER_TWO_ONLY
|
||||
frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr;
|
||||
break;
|
||||
}
|
||||
|
@ -3733,13 +3732,11 @@
|
|||
}
|
||||
|
||||
case _EXIT_TRACE: {
|
||||
TIER_TWO_ONLY
|
||||
if (1) goto side_exit;
|
||||
break;
|
||||
}
|
||||
|
||||
case _CHECK_VALIDITY: {
|
||||
TIER_TWO_ONLY
|
||||
if (!current_executor->vm_data.valid) goto deoptimize;
|
||||
break;
|
||||
}
|
||||
|
@ -3747,7 +3744,6 @@
|
|||
case _LOAD_CONST_INLINE: {
|
||||
PyObject *value;
|
||||
PyObject *ptr = (PyObject *)CURRENT_OPERAND();
|
||||
TIER_TWO_ONLY
|
||||
value = Py_NewRef(ptr);
|
||||
stack_pointer[0] = value;
|
||||
stack_pointer += 1;
|
||||
|
@ -3757,7 +3753,6 @@
|
|||
case _LOAD_CONST_INLINE_BORROW: {
|
||||
PyObject *value;
|
||||
PyObject *ptr = (PyObject *)CURRENT_OPERAND();
|
||||
TIER_TWO_ONLY
|
||||
value = ptr;
|
||||
stack_pointer[0] = value;
|
||||
stack_pointer += 1;
|
||||
|
@ -3768,7 +3763,6 @@
|
|||
PyObject *value;
|
||||
PyObject *null;
|
||||
PyObject *ptr = (PyObject *)CURRENT_OPERAND();
|
||||
TIER_TWO_ONLY
|
||||
value = Py_NewRef(ptr);
|
||||
null = NULL;
|
||||
stack_pointer[0] = value;
|
||||
|
@ -3781,7 +3775,6 @@
|
|||
PyObject *value;
|
||||
PyObject *null;
|
||||
PyObject *ptr = (PyObject *)CURRENT_OPERAND();
|
||||
TIER_TWO_ONLY
|
||||
value = ptr;
|
||||
null = NULL;
|
||||
stack_pointer[0] = value;
|
||||
|
@ -3792,14 +3785,12 @@
|
|||
|
||||
case _CHECK_GLOBALS: {
|
||||
PyObject *dict = (PyObject *)CURRENT_OPERAND();
|
||||
TIER_TWO_ONLY
|
||||
if (GLOBALS() != dict) goto deoptimize;
|
||||
break;
|
||||
}
|
||||
|
||||
case _CHECK_BUILTINS: {
|
||||
PyObject *dict = (PyObject *)CURRENT_OPERAND();
|
||||
TIER_TWO_ONLY
|
||||
if (BUILTINS() != dict) goto deoptimize;
|
||||
break;
|
||||
}
|
||||
|
@ -3815,7 +3806,6 @@
|
|||
|
||||
case _COLD_EXIT: {
|
||||
oparg = CURRENT_OPARG();
|
||||
TIER_TWO_ONLY
|
||||
_PyExecutorObject *previous = (_PyExecutorObject *)tstate->previous_executor;
|
||||
_PyExitData *exit = &previous->exits[oparg];
|
||||
exit->temperature++;
|
||||
|
@ -3851,7 +3841,6 @@
|
|||
|
||||
case _START_EXECUTOR: {
|
||||
PyObject *executor = (PyObject *)CURRENT_OPERAND();
|
||||
TIER_TWO_ONLY
|
||||
Py_DECREF(tstate->previous_executor);
|
||||
tstate->previous_executor = NULL;
|
||||
#ifndef _Py_JIT
|
||||
|
@ -3861,7 +3850,6 @@
|
|||
}
|
||||
|
||||
case _FATAL_ERROR: {
|
||||
TIER_TWO_ONLY
|
||||
assert(0);
|
||||
Py_FatalError("Fatal error uop executed.");
|
||||
break;
|
||||
|
@ -3869,7 +3857,6 @@
|
|||
|
||||
case _CHECK_VALIDITY_AND_SET_IP: {
|
||||
PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND();
|
||||
TIER_TWO_ONLY
|
||||
if (!current_executor->vm_data.valid) goto deoptimize;
|
||||
frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr;
|
||||
break;
|
||||
|
|
|
@ -112,7 +112,6 @@
|
|||
lhs = stack_pointer[-2];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -242,7 +241,6 @@
|
|||
/* Skip 1 cache entry */
|
||||
// _BINARY_OP_INPLACE_ADD_UNICODE
|
||||
{
|
||||
TIER_ONE_ONLY
|
||||
assert(next_instr->op.code == STORE_FAST);
|
||||
PyObject **target_local = &GETLOCAL(next_instr->op.arg);
|
||||
DEOPT_IF(*target_local != left, BINARY_OP);
|
||||
|
@ -429,7 +427,6 @@
|
|||
container = stack_pointer[-2];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -739,7 +736,6 @@
|
|||
frame->instr_ptr = next_instr;
|
||||
next_instr += 1;
|
||||
INSTRUCTION_STATS(CACHE);
|
||||
TIER_ONE_ONLY
|
||||
assert(0 && "Executing a cache.");
|
||||
Py_FatalError("Executing a cache.");
|
||||
DISPATCH();
|
||||
|
@ -761,7 +757,6 @@
|
|||
callable = stack_pointer[-2 - oparg];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -1477,7 +1472,6 @@
|
|||
args = &stack_pointer[-oparg];
|
||||
self = stack_pointer[-1 - oparg];
|
||||
callable = stack_pointer[-2 - oparg];
|
||||
TIER_ONE_ONLY
|
||||
assert(oparg == 1);
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
DEOPT_IF(callable != interp->callable_cache.list_append, CALL);
|
||||
|
@ -1953,7 +1947,6 @@
|
|||
exc_value = stack_pointer[-1];
|
||||
last_sent_val = stack_pointer[-2];
|
||||
sub_iter = stack_pointer[-3];
|
||||
TIER_ONE_ONLY
|
||||
assert(throwflag);
|
||||
assert(exc_value && PyExceptionInstance_Check(exc_value));
|
||||
if (PyErr_GivenExceptionMatches(exc_value, PyExc_StopIteration)) {
|
||||
|
@ -1988,7 +1981,6 @@
|
|||
left = stack_pointer[-2];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -2338,7 +2330,6 @@
|
|||
PyObject *awaitable;
|
||||
exc = stack_pointer[-1];
|
||||
awaitable = stack_pointer[-2];
|
||||
TIER_ONE_ONLY
|
||||
assert(exc && PyExceptionInstance_Check(exc));
|
||||
if (PyErr_GivenExceptionMatches(exc, PyExc_StopAsyncIteration)) {
|
||||
Py_DECREF(awaitable);
|
||||
|
@ -2383,7 +2374,6 @@
|
|||
frame->instr_ptr = next_instr;
|
||||
next_instr += 1;
|
||||
INSTRUCTION_STATS(ENTER_EXECUTOR);
|
||||
TIER_ONE_ONLY
|
||||
CHECK_EVAL_BREAKER();
|
||||
PyCodeObject *code = _PyFrame_GetCode(frame);
|
||||
_PyExecutorObject *executor = code->co_executors->executors[oparg & 255];
|
||||
|
@ -2418,7 +2408,6 @@
|
|||
frame->instr_ptr = next_instr;
|
||||
next_instr += 1;
|
||||
INSTRUCTION_STATS(EXTENDED_ARG);
|
||||
TIER_ONE_ONLY
|
||||
assert(oparg);
|
||||
opcode = next_instr->op.code;
|
||||
oparg = oparg << 8 | next_instr->op.arg;
|
||||
|
@ -2477,7 +2466,6 @@
|
|||
iter = stack_pointer[-1];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -2867,7 +2855,6 @@
|
|||
PyObject *from;
|
||||
PyObject *res;
|
||||
from = stack_pointer[-1];
|
||||
TIER_ONE_ONLY
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
|
||||
res = import_from(tstate, from, name);
|
||||
if (res == NULL) goto error;
|
||||
|
@ -2885,7 +2872,6 @@
|
|||
PyObject *res;
|
||||
fromlist = stack_pointer[-1];
|
||||
level = stack_pointer[-2];
|
||||
TIER_ONE_ONLY
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
|
||||
res = import_name(tstate, frame, name, fromlist, level);
|
||||
Py_DECREF(level);
|
||||
|
@ -2945,7 +2931,6 @@
|
|||
PyObject *receiver;
|
||||
value = stack_pointer[-1];
|
||||
receiver = stack_pointer[-2];
|
||||
TIER_ONE_ONLY
|
||||
/* Need to create a fake StopIteration error here,
|
||||
* to conform to PEP 380 */
|
||||
if (PyGen_Check(receiver)) {
|
||||
|
@ -2968,7 +2953,6 @@
|
|||
PyObject *receiver;
|
||||
value = stack_pointer[-1];
|
||||
receiver = stack_pointer[-2];
|
||||
TIER_ONE_ONLY
|
||||
if (PyGen_Check(receiver) || PyCoro_CheckExact(receiver)) {
|
||||
PyErr_SetObject(PyExc_StopIteration, value);
|
||||
if (monitor_stop_iteration(tstate, frame, this_instr)) {
|
||||
|
@ -3248,7 +3232,6 @@
|
|||
INSTRUCTION_STATS(INTERPRETER_EXIT);
|
||||
PyObject *retval;
|
||||
retval = stack_pointer[-1];
|
||||
TIER_ONE_ONLY
|
||||
assert(frame == &entry_frame);
|
||||
assert(_PyFrame_IsIncomplete(frame));
|
||||
/* Restore previous frame and return. */
|
||||
|
@ -3281,7 +3264,6 @@
|
|||
next_instr += 2;
|
||||
INSTRUCTION_STATS(JUMP_BACKWARD);
|
||||
/* Skip 1 cache entry */
|
||||
TIER_ONE_ONLY
|
||||
CHECK_EVAL_BREAKER();
|
||||
assert(oparg <= INSTR_OFFSET());
|
||||
JUMPBY(-oparg);
|
||||
|
@ -3331,7 +3313,6 @@
|
|||
frame->instr_ptr = next_instr;
|
||||
next_instr += 1;
|
||||
INSTRUCTION_STATS(JUMP_BACKWARD_NO_INTERRUPT);
|
||||
TIER_ONE_ONLY
|
||||
/* This bytecode is used in the `yield from` or `await` loop.
|
||||
* If there is an interrupt, we want it handled in the innermost
|
||||
* generator or coroutine, so we deliberately do not check it here.
|
||||
|
@ -3345,7 +3326,6 @@
|
|||
frame->instr_ptr = next_instr;
|
||||
next_instr += 1;
|
||||
INSTRUCTION_STATS(JUMP_FORWARD);
|
||||
TIER_ONE_ONLY
|
||||
JUMPBY(oparg);
|
||||
DISPATCH();
|
||||
}
|
||||
|
@ -3414,7 +3394,6 @@
|
|||
owner = stack_pointer[-1];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
|
||||
|
@ -4109,7 +4088,6 @@
|
|||
// _SPECIALIZE_LOAD_GLOBAL
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
|
||||
|
@ -4311,7 +4289,6 @@
|
|||
global_super = stack_pointer[-3];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
int load_method = oparg & 1;
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
|
@ -4326,7 +4303,6 @@
|
|||
// _LOAD_SUPER_ATTR
|
||||
self = stack_pointer[-1];
|
||||
{
|
||||
TIER_ONE_ONLY
|
||||
if (opcode == INSTRUMENTED_LOAD_SUPER_ATTR) {
|
||||
PyObject *arg = oparg & 2 ? class : &_PyInstrumentation_MISSING;
|
||||
int err = _Py_call_instrumentation_2args(
|
||||
|
@ -4737,7 +4713,6 @@
|
|||
INSTRUCTION_STATS(RAISE_VARARGS);
|
||||
PyObject **args;
|
||||
args = &stack_pointer[-oparg];
|
||||
TIER_ONE_ONLY
|
||||
PyObject *cause = NULL, *exc = NULL;
|
||||
switch (oparg) {
|
||||
case 2:
|
||||
|
@ -4769,7 +4744,6 @@
|
|||
PyObject **values;
|
||||
exc = stack_pointer[-1];
|
||||
values = &stack_pointer[-1 - oparg];
|
||||
TIER_ONE_ONLY
|
||||
assert(oparg >= 0 && oparg <= 2);
|
||||
if (oparg) {
|
||||
PyObject *lasti = values[0];
|
||||
|
@ -4794,7 +4768,6 @@
|
|||
frame->instr_ptr = next_instr;
|
||||
next_instr += 1;
|
||||
INSTRUCTION_STATS(RESERVED);
|
||||
TIER_ONE_ONLY
|
||||
assert(0 && "Executing RESERVED instruction.");
|
||||
Py_FatalError("Executing RESERVED instruction.");
|
||||
DISPATCH();
|
||||
|
@ -4806,7 +4779,6 @@
|
|||
INSTRUCTION_STATS(RESUME);
|
||||
PREDICTED(RESUME);
|
||||
_Py_CODEUNIT *this_instr = next_instr - 1;
|
||||
TIER_ONE_ONLY
|
||||
assert(frame == tstate->current_frame);
|
||||
uintptr_t global_version =
|
||||
_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) &
|
||||
|
@ -4884,7 +4856,6 @@
|
|||
frame->instr_ptr = next_instr;
|
||||
next_instr += 1;
|
||||
INSTRUCTION_STATS(RETURN_GENERATOR);
|
||||
TIER_ONE_ONLY
|
||||
assert(PyFunction_Check(frame->f_funcobj));
|
||||
PyFunctionObject *func = (PyFunctionObject *)frame->f_funcobj;
|
||||
PyGenObject *gen = (PyGenObject *)_Py_MakeCoro(func);
|
||||
|
@ -4951,7 +4922,6 @@
|
|||
receiver = stack_pointer[-2];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -5138,7 +5108,6 @@
|
|||
owner = stack_pointer[-1];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
|
||||
|
@ -5430,7 +5399,6 @@
|
|||
container = stack_pointer[-2];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -5532,7 +5500,6 @@
|
|||
value = stack_pointer[-1];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -5741,7 +5708,6 @@
|
|||
seq = stack_pointer[-1];
|
||||
{
|
||||
uint16_t counter = read_u16(&this_instr[1].cache);
|
||||
TIER_ONE_ONLY
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
|
||||
next_instr = this_instr;
|
||||
|
@ -5876,7 +5842,6 @@
|
|||
INSTRUCTION_STATS(YIELD_VALUE);
|
||||
PyObject *retval;
|
||||
retval = stack_pointer[-1];
|
||||
TIER_ONE_ONLY
|
||||
// NOTE: It's important that YIELD_VALUE never raises an exception!
|
||||
// The compiler treats any exception raised here as a failed close()
|
||||
// or throw() call.
|
||||
|
|
|
@ -17,7 +17,6 @@ class Properties:
|
|||
needs_this: bool
|
||||
always_exits: bool
|
||||
stores_sp: bool
|
||||
tier_one_only: bool
|
||||
uses_co_consts: bool
|
||||
uses_co_names: bool
|
||||
uses_locals: bool
|
||||
|
@ -25,6 +24,7 @@ class Properties:
|
|||
side_exit: bool
|
||||
pure: bool
|
||||
passthrough: bool
|
||||
tier: int | None = None
|
||||
oparg_and_1: bool = False
|
||||
const_oparg: int = -1
|
||||
|
||||
|
@ -46,7 +46,6 @@ class Properties:
|
|||
needs_this=any(p.needs_this for p in properties),
|
||||
always_exits=any(p.always_exits for p in properties),
|
||||
stores_sp=any(p.stores_sp for p in properties),
|
||||
tier_one_only=any(p.tier_one_only for p in properties),
|
||||
uses_co_consts=any(p.uses_co_consts for p in properties),
|
||||
uses_co_names=any(p.uses_co_names for p in properties),
|
||||
uses_locals=any(p.uses_locals for p in properties),
|
||||
|
@ -68,7 +67,6 @@ SKIP_PROPERTIES = Properties(
|
|||
needs_this=False,
|
||||
always_exits=False,
|
||||
stores_sp=False,
|
||||
tier_one_only=False,
|
||||
uses_co_consts=False,
|
||||
uses_co_names=False,
|
||||
uses_locals=False,
|
||||
|
@ -312,6 +310,15 @@ def variable_used(node: parser.InstDef, name: str) -> bool:
|
|||
token.kind == "IDENTIFIER" and token.text == name for token in node.tokens
|
||||
)
|
||||
|
||||
def tier_variable(node: parser.InstDef) -> int | None:
|
||||
"""Determine whether a tier variable is used in a node."""
|
||||
for token in node.tokens:
|
||||
if token.kind == "ANNOTATION":
|
||||
if token.text == "specializing":
|
||||
return 1
|
||||
if re.fullmatch(r"tier\d", token.text):
|
||||
return int(token.text[-1])
|
||||
return None
|
||||
|
||||
def is_infallible(op: parser.InstDef) -> bool:
|
||||
return not (
|
||||
|
@ -498,7 +505,6 @@ def compute_properties(op: parser.InstDef) -> Properties:
|
|||
needs_this=variable_used(op, "this_instr"),
|
||||
always_exits=always_exits(op),
|
||||
stores_sp=variable_used(op, "SYNC_SP"),
|
||||
tier_one_only=variable_used(op, "TIER_ONE_ONLY"),
|
||||
uses_co_consts=variable_used(op, "FRAME_CO_CONSTS"),
|
||||
uses_co_names=variable_used(op, "FRAME_CO_NAMES"),
|
||||
uses_locals=(variable_used(op, "GETLOCAL") or variable_used(op, "SETLOCAL"))
|
||||
|
@ -506,6 +512,7 @@ def compute_properties(op: parser.InstDef) -> Properties:
|
|||
has_free=has_free,
|
||||
pure="pure" in op.annotations,
|
||||
passthrough=passthrough,
|
||||
tier=tier_variable(op),
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -168,6 +168,7 @@ list of annotations and their meanings are as follows:
|
|||
* `override`. For external use by other interpreter definitions to override the current
|
||||
instruction definition.
|
||||
* `pure`. This instruction has no side effects.
|
||||
* 'tierN'. This instruction only used by tier N interpreter.
|
||||
|
||||
### Special functions/macros
|
||||
|
||||
|
|
|
@ -224,6 +224,8 @@ annotations = {
|
|||
"pure",
|
||||
"split",
|
||||
"replicate",
|
||||
"tier1",
|
||||
"tier2",
|
||||
}
|
||||
|
||||
__all__ = []
|
||||
|
|
|
@ -284,7 +284,7 @@ def is_viable_expansion(inst: Instruction) -> bool:
|
|||
continue
|
||||
if "replaced" in part.annotations:
|
||||
continue
|
||||
if part.properties.tier_one_only or not part.is_viable():
|
||||
if part.properties.tier == 1 or not part.is_viable():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ def generate_abstract_interpreter(
|
|||
if uop.name in abstract.uops:
|
||||
override = abstract.uops[uop.name]
|
||||
validate_uop(override, uop)
|
||||
if uop.properties.tier_one_only:
|
||||
if uop.properties.tier == 1:
|
||||
continue
|
||||
if uop.replicates:
|
||||
continue
|
||||
|
|
|
@ -194,7 +194,7 @@ def generate_tier2(
|
|||
out = CWriter(outfile, 2, lines)
|
||||
out.emit("\n")
|
||||
for name, uop in analysis.uops.items():
|
||||
if uop.properties.tier_one_only:
|
||||
if uop.properties.tier == 1:
|
||||
continue
|
||||
if uop.properties.oparg_and_1:
|
||||
out.emit(f"/* {uop.name} is split on (oparg & 1) */\n\n")
|
||||
|
|
|
@ -43,7 +43,7 @@ def generate_uop_ids(
|
|||
for name, uop in sorted(uops):
|
||||
if name in PRE_DEFINED:
|
||||
continue
|
||||
if uop.properties.tier_one_only:
|
||||
if uop.properties.tier == 1:
|
||||
continue
|
||||
if uop.implicitly_created and not distinct_namespace and not uop.replicated:
|
||||
out.emit(f"#define {name} {name[1:]}\n")
|
||||
|
|
|
@ -29,7 +29,7 @@ def generate_names_and_flags(analysis: Analysis, out: CWriter) -> None:
|
|||
out.emit("#ifdef NEED_OPCODE_METADATA\n")
|
||||
out.emit("const uint16_t _PyUop_Flags[MAX_UOP_ID+1] = {\n")
|
||||
for uop in analysis.uops.values():
|
||||
if uop.is_viable() and not uop.properties.tier_one_only:
|
||||
if uop.is_viable() and uop.properties.tier != 1:
|
||||
out.emit(f"[{uop.name}] = {cflags(uop.properties)},\n")
|
||||
|
||||
out.emit("};\n\n")
|
||||
|
@ -41,7 +41,7 @@ def generate_names_and_flags(analysis: Analysis, out: CWriter) -> None:
|
|||
out.emit("};\n\n")
|
||||
out.emit("const char *const _PyOpcode_uop_name[MAX_UOP_ID+1] = {\n")
|
||||
for uop in sorted(analysis.uops.values(), key=lambda t: t.name):
|
||||
if uop.is_viable() and not uop.properties.tier_one_only:
|
||||
if uop.is_viable() and uop.properties.tier != 1:
|
||||
out.emit(f'[{uop.name}] = "{uop.name}",\n')
|
||||
out.emit("};\n")
|
||||
out.emit("#endif // NEED_OPCODE_METADATA\n\n")
|
||||
|
|
Loading…
Reference in New Issue