2023-11-01 17:13:02 -03:00
|
|
|
// Macros and other things needed by ceval.c, and bytecodes.c
|
2023-01-18 14:41:07 -04:00
|
|
|
|
|
|
|
/* Computed GOTOs, or
|
|
|
|
the-optimization-commonly-but-improperly-known-as-"threaded code"
|
|
|
|
using gcc's labels-as-values extension
|
|
|
|
(http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
|
|
|
|
|
|
|
|
The traditional bytecode evaluation loop uses a "switch" statement, which
|
|
|
|
decent compilers will optimize as a single indirect branch instruction
|
|
|
|
combined with a lookup table of jump addresses. However, since the
|
|
|
|
indirect jump instruction is shared by all opcodes, the CPU will have a
|
|
|
|
hard time making the right prediction for where to jump next (actually,
|
|
|
|
it will be always wrong except in the uncommon case of a sequence of
|
|
|
|
several identical opcodes).
|
|
|
|
|
|
|
|
"Threaded code" in contrast, uses an explicit jump table and an explicit
|
|
|
|
indirect jump instruction at the end of each opcode. Since the jump
|
|
|
|
instruction is at a different address for each opcode, the CPU will make a
|
|
|
|
separate prediction for each of these instructions, which is equivalent to
|
|
|
|
predicting the second opcode of each opcode pair. These predictions have
|
|
|
|
a much better chance to turn out valid, especially in small bytecode loops.
|
|
|
|
|
|
|
|
A mispredicted branch on a modern CPU flushes the whole pipeline and
|
|
|
|
can cost several CPU cycles (depending on the pipeline depth),
|
|
|
|
and potentially many more instructions (depending on the pipeline width).
|
|
|
|
A correctly predicted branch, however, is nearly free.
|
|
|
|
|
|
|
|
At the time of this writing, the "threaded code" version is up to 15-20%
|
|
|
|
faster than the normal "switch" version, depending on the compiler and the
|
|
|
|
CPU architecture.
|
|
|
|
|
|
|
|
NOTE: care must be taken that the compiler doesn't try to "optimize" the
|
|
|
|
indirect jumps by sharing them between all opcodes. Such optimizations
|
|
|
|
can be disabled on gcc by using the -fno-gcse flag (or possibly
|
|
|
|
-fno-crossjumping).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Use macros rather than inline functions, to make it as clear as possible
|
|
|
|
* to the C compiler that the tracing check is a simple test then branch.
|
|
|
|
* We want to be sure that the compiler knows this before it generates
|
|
|
|
* the CFG.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef WITH_DTRACE
|
|
|
|
#define OR_DTRACE_LINE | (PyDTrace_LINE_ENABLED() ? 255 : 0)
|
|
|
|
#else
|
|
|
|
#define OR_DTRACE_LINE
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef HAVE_COMPUTED_GOTOS
|
|
|
|
#ifndef USE_COMPUTED_GOTOS
|
|
|
|
#define USE_COMPUTED_GOTOS 1
|
|
|
|
#endif
|
|
|
|
#else
|
|
|
|
#if defined(USE_COMPUTED_GOTOS) && USE_COMPUTED_GOTOS
|
|
|
|
#error "Computed gotos are not supported on this compiler."
|
|
|
|
#endif
|
|
|
|
#undef USE_COMPUTED_GOTOS
|
|
|
|
#define USE_COMPUTED_GOTOS 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef Py_STATS
|
2023-10-31 07:09:54 -03:00
|
|
|
#define INSTRUCTION_STATS(op) \
|
2023-01-18 14:41:07 -04:00
|
|
|
do { \
|
|
|
|
OPCODE_EXE_INC(op); \
|
2023-09-06 12:54:59 -03:00
|
|
|
if (_Py_stats) _Py_stats->opcode_stats[lastopcode].pair_count[op]++; \
|
2023-01-18 14:41:07 -04:00
|
|
|
lastopcode = op; \
|
|
|
|
} while (0)
|
|
|
|
#else
|
2023-10-31 07:09:54 -03:00
|
|
|
#define INSTRUCTION_STATS(op) ((void)0)
|
2023-01-18 14:41:07 -04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if USE_COMPUTED_GOTOS
|
2023-10-31 07:09:54 -03:00
|
|
|
# define TARGET(op) TARGET_##op:
|
2023-01-18 14:41:07 -04:00
|
|
|
# define DISPATCH_GOTO() goto *opcode_targets[opcode]
|
|
|
|
#else
|
2023-10-31 07:09:54 -03:00
|
|
|
# define TARGET(op) case op: TARGET_##op:
|
2023-01-18 14:41:07 -04:00
|
|
|
# define DISPATCH_GOTO() goto dispatch_opcode
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* PRE_DISPATCH_GOTO() does lltrace if enabled. Normally a no-op */
|
|
|
|
#ifdef LLTRACE
|
2023-11-01 17:13:02 -03:00
|
|
|
#define PRE_DISPATCH_GOTO() if (lltrace >= 5) { \
|
2023-12-11 20:42:30 -04:00
|
|
|
lltrace_instruction(frame, stack_pointer, next_instr, opcode, oparg); }
|
2023-01-18 14:41:07 -04:00
|
|
|
#else
|
|
|
|
#define PRE_DISPATCH_GOTO() ((void)0)
|
|
|
|
#endif
|
|
|
|
|
2024-04-25 07:32:47 -03:00
|
|
|
#if LLTRACE
|
|
|
|
#define LLTRACE_RESUME_FRAME() \
|
|
|
|
do { \
|
|
|
|
lltrace = maybe_lltrace_resume_frame(frame, &entry_frame, GLOBALS()); \
|
|
|
|
if (lltrace < 0) { \
|
|
|
|
goto exit_unwind; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define LLTRACE_RESUME_FRAME() ((void)0)
|
|
|
|
#endif
|
|
|
|
|
2024-02-16 16:25:19 -04:00
|
|
|
#ifdef Py_GIL_DISABLED
|
|
|
|
#define QSBR_QUIESCENT_STATE(tstate) _Py_qsbr_quiescent_state(((_PyThreadStateImpl *)tstate)->qsbr)
|
|
|
|
#else
|
|
|
|
#define QSBR_QUIESCENT_STATE(tstate)
|
|
|
|
#endif
|
|
|
|
|
2023-01-18 14:41:07 -04:00
|
|
|
|
|
|
|
/* Do interpreter dispatch accounting for tracing and instrumentation */
|
|
|
|
#define DISPATCH() \
|
|
|
|
{ \
|
2024-10-07 10:56:39 -03:00
|
|
|
assert(frame->stackpointer == NULL); \
|
2023-01-18 14:41:07 -04:00
|
|
|
NEXTOPARG(); \
|
|
|
|
PRE_DISPATCH_GOTO(); \
|
|
|
|
DISPATCH_GOTO(); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DISPATCH_SAME_OPARG() \
|
|
|
|
{ \
|
2023-02-20 10:56:48 -04:00
|
|
|
opcode = next_instr->op.code; \
|
2023-01-18 14:41:07 -04:00
|
|
|
PRE_DISPATCH_GOTO(); \
|
|
|
|
DISPATCH_GOTO(); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DISPATCH_INLINED(NEW_FRAME) \
|
|
|
|
do { \
|
2023-05-12 19:23:13 -03:00
|
|
|
assert(tstate->interp->eval_frame == NULL); \
|
2023-01-18 14:41:07 -04:00
|
|
|
_PyFrame_SetStackPointer(frame, stack_pointer); \
|
2024-08-20 12:52:58 -03:00
|
|
|
assert((NEW_FRAME)->previous == frame); \
|
2023-08-17 07:16:03 -03:00
|
|
|
frame = tstate->current_frame = (NEW_FRAME); \
|
2023-01-18 14:41:07 -04:00
|
|
|
CALL_STAT_INC(inlined_py_calls); \
|
|
|
|
goto start_frame; \
|
|
|
|
} while (0)
|
|
|
|
|
2023-11-01 17:13:02 -03:00
|
|
|
// Use this instead of 'goto error' so Tier 2 can go to a different label
|
|
|
|
#define GOTO_ERROR(LABEL) goto LABEL
|
|
|
|
|
2023-01-18 14:41:07 -04:00
|
|
|
/* Tuple access macros */
|
|
|
|
|
|
|
|
#ifndef Py_DEBUG
|
|
|
|
#define GETITEM(v, i) PyTuple_GET_ITEM((v), (i))
|
|
|
|
#else
|
|
|
|
static inline PyObject *
|
|
|
|
GETITEM(PyObject *v, Py_ssize_t i) {
|
|
|
|
assert(PyTuple_Check(v));
|
|
|
|
assert(i >= 0);
|
|
|
|
assert(i < PyTuple_GET_SIZE(v));
|
|
|
|
return PyTuple_GET_ITEM(v, i);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Code access macros */
|
|
|
|
|
|
|
|
/* The integer overflow is checked by an assertion below. */
|
2024-11-04 15:13:32 -04:00
|
|
|
#define INSTR_OFFSET() ((int)(next_instr - _PyFrame_GetBytecode(frame)))
|
2023-01-18 14:41:07 -04:00
|
|
|
#define NEXTOPARG() do { \
|
2024-04-30 15:38:05 -03:00
|
|
|
_Py_CODEUNIT word = {.cache = FT_ATOMIC_LOAD_UINT16_RELAXED(*(uint16_t*)next_instr)}; \
|
2023-02-20 10:56:48 -04:00
|
|
|
opcode = word.op.code; \
|
|
|
|
oparg = word.op.arg; \
|
2023-01-18 14:41:07 -04:00
|
|
|
} while (0)
|
2023-06-14 20:14:22 -03:00
|
|
|
|
|
|
|
/* JUMPBY makes the generator identify the instruction as a jump. SKIP_OVER is
|
|
|
|
* for advancing to the next instruction, taking into account cache entries
|
|
|
|
* and skipped instructions.
|
|
|
|
*/
|
2023-01-18 14:41:07 -04:00
|
|
|
#define JUMPBY(x) (next_instr += (x))
|
2023-06-14 20:14:22 -03:00
|
|
|
#define SKIP_OVER(x) (next_instr += (x))
|
2023-01-18 14:41:07 -04:00
|
|
|
|
|
|
|
/* OpCode prediction macros
|
|
|
|
Some opcodes tend to come in pairs thus making it possible to
|
|
|
|
predict the second code when the first is run. For example,
|
|
|
|
COMPARE_OP is often followed by POP_JUMP_IF_FALSE or POP_JUMP_IF_TRUE.
|
|
|
|
|
|
|
|
Verifying the prediction costs a single high-speed test of a register
|
|
|
|
variable against a constant. If the pairing was good, then the
|
|
|
|
processor's own internal branch predication has a high likelihood of
|
|
|
|
success, resulting in a nearly zero-overhead transition to the
|
|
|
|
next opcode. A successful prediction saves a trip through the eval-loop
|
|
|
|
including its unpredictable switch-case branch. Combined with the
|
|
|
|
processor's internal branch prediction, a successful PREDICT has the
|
|
|
|
effect of making the two opcodes run as if they were a single new opcode
|
|
|
|
with the bodies combined.
|
|
|
|
|
|
|
|
If collecting opcode statistics, your choices are to either keep the
|
|
|
|
predictions turned-on and interpret the results as if some opcodes
|
|
|
|
had been combined or turn-off predictions so that the opcode frequency
|
|
|
|
counter updates for both opcodes.
|
|
|
|
|
|
|
|
Opcode prediction is disabled with threaded code, since the latter allows
|
|
|
|
the CPU to record separate branch prediction information for each
|
|
|
|
opcode.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define PREDICT_ID(op) PRED_##op
|
|
|
|
#define PREDICTED(op) PREDICT_ID(op):
|
|
|
|
|
|
|
|
|
|
|
|
/* Stack manipulation macros */
|
|
|
|
|
|
|
|
/* The stack can grow at most MAXINT deep, as co_nlocals and
|
|
|
|
co_stacksize are ints. */
|
|
|
|
#define STACK_LEVEL() ((int)(stack_pointer - _PyFrame_Stackbase(frame)))
|
2023-06-14 09:46:37 -03:00
|
|
|
#define STACK_SIZE() (_PyFrame_GetCode(frame)->co_stacksize)
|
2023-01-18 14:41:07 -04:00
|
|
|
#define EMPTY() (STACK_LEVEL() == 0)
|
|
|
|
#define TOP() (stack_pointer[-1])
|
|
|
|
#define SECOND() (stack_pointer[-2])
|
|
|
|
#define THIRD() (stack_pointer[-3])
|
|
|
|
#define FOURTH() (stack_pointer[-4])
|
|
|
|
#define PEEK(n) (stack_pointer[-(n)])
|
|
|
|
#define POKE(n, v) (stack_pointer[-(n)] = (v))
|
|
|
|
#define SET_TOP(v) (stack_pointer[-1] = (v))
|
|
|
|
#define SET_SECOND(v) (stack_pointer[-2] = (v))
|
|
|
|
#define BASIC_STACKADJ(n) (stack_pointer += n)
|
|
|
|
#define BASIC_PUSH(v) (*stack_pointer++ = (v))
|
|
|
|
#define BASIC_POP() (*--stack_pointer)
|
|
|
|
|
|
|
|
#ifdef Py_DEBUG
|
|
|
|
#define PUSH(v) do { \
|
|
|
|
BASIC_PUSH(v); \
|
|
|
|
assert(STACK_LEVEL() <= STACK_SIZE()); \
|
|
|
|
} while (0)
|
|
|
|
#define POP() (assert(STACK_LEVEL() > 0), BASIC_POP())
|
|
|
|
#define STACK_GROW(n) do { \
|
|
|
|
assert(n >= 0); \
|
|
|
|
BASIC_STACKADJ(n); \
|
|
|
|
assert(STACK_LEVEL() <= STACK_SIZE()); \
|
|
|
|
} while (0)
|
|
|
|
#define STACK_SHRINK(n) do { \
|
|
|
|
assert(n >= 0); \
|
|
|
|
assert(STACK_LEVEL() >= n); \
|
|
|
|
BASIC_STACKADJ(-(n)); \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define PUSH(v) BASIC_PUSH(v)
|
|
|
|
#define POP() BASIC_POP()
|
|
|
|
#define STACK_GROW(n) BASIC_STACKADJ(n)
|
|
|
|
#define STACK_SHRINK(n) BASIC_STACKADJ(-(n))
|
|
|
|
#endif
|
|
|
|
|
2024-06-25 12:42:29 -03:00
|
|
|
#define WITHIN_STACK_BOUNDS() \
|
|
|
|
(frame == &entry_frame || (STACK_LEVEL() >= 0 && STACK_LEVEL() <= STACK_SIZE()))
|
2023-06-13 17:42:03 -03:00
|
|
|
|
|
|
|
/* Data access macros */
|
2023-06-14 09:46:37 -03:00
|
|
|
#define FRAME_CO_CONSTS (_PyFrame_GetCode(frame)->co_consts)
|
|
|
|
#define FRAME_CO_NAMES (_PyFrame_GetCode(frame)->co_names)
|
2023-06-13 17:42:03 -03:00
|
|
|
|
2023-01-18 14:41:07 -04:00
|
|
|
/* Local variable macros */
|
|
|
|
|
2023-07-18 15:42:44 -03:00
|
|
|
#define LOCALS_ARRAY (frame->localsplus)
|
2023-01-18 14:41:07 -04:00
|
|
|
#define GETLOCAL(i) (frame->localsplus[i])
|
|
|
|
|
|
|
|
/* The SETLOCAL() macro must not DECREF the local variable in-place and
|
|
|
|
then store the new value; it must copy the old value to a temporary
|
|
|
|
value, then store the new value, and then DECREF the temporary value.
|
|
|
|
This is because it is possible that during the DECREF the frame is
|
|
|
|
accessed by other code (e.g. a __del__ method or gc.collect()) and the
|
|
|
|
variable would be pointing to already-freed memory. */
|
2024-06-26 16:10:43 -03:00
|
|
|
#define SETLOCAL(i, value) do { _PyStackRef tmp = GETLOCAL(i); \
|
2023-01-18 14:41:07 -04:00
|
|
|
GETLOCAL(i) = value; \
|
2024-06-26 16:10:43 -03:00
|
|
|
PyStackRef_XCLOSE(tmp); } while (0)
|
2023-01-18 14:41:07 -04:00
|
|
|
|
|
|
|
#define GO_TO_INSTRUCTION(op) goto PREDICT_ID(op)
|
|
|
|
|
|
|
|
#ifdef Py_STATS
|
|
|
|
#define UPDATE_MISS_STATS(INSTNAME) \
|
|
|
|
do { \
|
|
|
|
STAT_INC(opcode, miss); \
|
|
|
|
STAT_INC((INSTNAME), miss); \
|
|
|
|
/* The counter is always the first cache entry: */ \
|
2024-04-04 12:03:27 -03:00
|
|
|
if (ADAPTIVE_COUNTER_TRIGGERS(next_instr->cache)) { \
|
2023-01-18 14:41:07 -04:00
|
|
|
STAT_INC((INSTNAME), deopt); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define UPDATE_MISS_STATS(INSTNAME) ((void)0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define DEOPT_IF(COND, INSTNAME) \
|
|
|
|
if ((COND)) { \
|
|
|
|
/* This is only a single jump on release builds! */ \
|
|
|
|
UPDATE_MISS_STATS((INSTNAME)); \
|
2023-06-27 18:17:41 -03:00
|
|
|
assert(_PyOpcode_Deopt[opcode] == (INSTNAME)); \
|
2023-01-18 14:41:07 -04:00
|
|
|
GO_TO_INSTRUCTION(INSTNAME); \
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define GLOBALS() frame->f_globals
|
|
|
|
#define BUILTINS() frame->f_builtins
|
|
|
|
#define LOCALS() frame->f_locals
|
2023-06-14 09:46:37 -03:00
|
|
|
#define CONSTS() _PyFrame_GetCode(frame)->co_consts
|
|
|
|
#define NAMES() _PyFrame_GetCode(frame)->co_names
|
2023-01-18 14:41:07 -04:00
|
|
|
|
|
|
|
#define DTRACE_FUNCTION_ENTRY() \
|
|
|
|
if (PyDTrace_FUNCTION_ENTRY_ENABLED()) { \
|
|
|
|
dtrace_function_entry(frame); \
|
|
|
|
}
|
|
|
|
|
2024-04-04 12:03:27 -03:00
|
|
|
/* This takes a uint16_t instead of a _Py_BackoffCounter,
|
|
|
|
* because it is used directly on the cache entry in generated code,
|
|
|
|
* which is always an integral type. */
|
|
|
|
#define ADAPTIVE_COUNTER_TRIGGERS(COUNTER) \
|
|
|
|
backoff_counter_triggers(forge_backoff_counter((COUNTER)))
|
2023-01-18 14:41:07 -04:00
|
|
|
|
2024-04-04 12:03:27 -03:00
|
|
|
#define ADVANCE_ADAPTIVE_COUNTER(COUNTER) \
|
|
|
|
do { \
|
|
|
|
(COUNTER) = advance_backoff_counter((COUNTER)); \
|
2023-01-18 14:41:07 -04:00
|
|
|
} while (0);
|
|
|
|
|
2024-04-04 12:03:27 -03:00
|
|
|
#define PAUSE_ADAPTIVE_COUNTER(COUNTER) \
|
|
|
|
do { \
|
|
|
|
(COUNTER) = pause_backoff_counter((COUNTER)); \
|
2023-01-18 14:41:07 -04:00
|
|
|
} while (0);
|
2024-11-04 15:13:32 -04:00
|
|
|
|
|
|
|
#ifdef ENABLE_SPECIALIZATION_FT
|
|
|
|
/* Multiple threads may execute these concurrently if thread-local bytecode is
|
|
|
|
* disabled and they all execute the main copy of the bytecode. Specialization
|
|
|
|
* is disabled in that case so the value is unused, but the RMW cycle should be
|
|
|
|
* free of data races.
|
|
|
|
*/
|
|
|
|
#define RECORD_BRANCH_TAKEN(bitset, flag) \
|
|
|
|
FT_ATOMIC_STORE_UINT16_RELAXED( \
|
|
|
|
bitset, (FT_ATOMIC_LOAD_UINT16_RELAXED(bitset) << 1) | (flag))
|
|
|
|
#else
|
|
|
|
#define RECORD_BRANCH_TAKEN(bitset, flag)
|
2024-07-30 14:53:47 -03:00
|
|
|
#endif
|
2023-01-18 14:41:07 -04:00
|
|
|
|
2023-07-20 17:37:19 -03:00
|
|
|
#define UNBOUNDLOCAL_ERROR_MSG \
|
|
|
|
"cannot access local variable '%s' where it is not associated with a value"
|
|
|
|
#define UNBOUNDFREE_ERROR_MSG \
|
|
|
|
"cannot access free variable '%s' where it is not associated with a value" \
|
|
|
|
" in enclosing scope"
|
2023-01-18 14:41:07 -04:00
|
|
|
#define NAME_ERROR_MSG "name '%.200s' is not defined"
|
2023-02-08 15:40:10 -04:00
|
|
|
|
2023-04-12 08:04:55 -03:00
|
|
|
// If a trace function sets a new f_lineno and
|
|
|
|
// *then* raises, we use the destination when searching
|
|
|
|
// for an exception handler, displaying the traceback, and so on
|
|
|
|
#define INSTRUMENTED_JUMP(src, dest, event) \
|
|
|
|
do { \
|
2024-05-03 15:49:24 -03:00
|
|
|
if (tstate->tracing) {\
|
|
|
|
next_instr = dest; \
|
|
|
|
} else { \
|
|
|
|
_PyFrame_SetStackPointer(frame, stack_pointer); \
|
|
|
|
next_instr = _Py_call_instrumentation_jump(tstate, event, frame, src, dest); \
|
|
|
|
stack_pointer = _PyFrame_GetStackPointer(frame); \
|
|
|
|
if (next_instr == NULL) { \
|
|
|
|
next_instr = (dest)+1; \
|
|
|
|
goto error; \
|
|
|
|
} \
|
2023-04-12 08:04:55 -03:00
|
|
|
} \
|
|
|
|
} while (0);
|
2023-06-26 23:02:57 -03:00
|
|
|
|
2023-07-17 15:02:58 -03:00
|
|
|
|
2023-08-16 20:26:43 -03:00
|
|
|
static inline int _Py_EnterRecursivePy(PyThreadState *tstate) {
|
|
|
|
return (tstate->py_recursion_remaining-- <= 0) &&
|
|
|
|
_Py_CheckRecursiveCallPy(tstate);
|
|
|
|
}
|
2023-08-17 15:29:58 -03:00
|
|
|
|
|
|
|
static inline void _Py_LeaveRecursiveCallPy(PyThreadState *tstate) {
|
|
|
|
tstate->py_recursion_remaining++;
|
|
|
|
}
|
2023-08-31 07:34:52 -03:00
|
|
|
|
|
|
|
/* Implementation of "macros" that modify the instruction pointer,
|
|
|
|
* stack pointer, or frame pointer.
|
2023-11-01 17:13:02 -03:00
|
|
|
* These need to treated differently by tier 1 and 2.
|
|
|
|
* The Tier 1 version is here; Tier 2 is inlined in ceval.c. */
|
2023-08-31 07:34:52 -03:00
|
|
|
|
2023-10-26 10:43:10 -03:00
|
|
|
#define LOAD_IP(OFFSET) do { \
|
|
|
|
next_instr = frame->instr_ptr + (OFFSET); \
|
|
|
|
} while (0)
|
2023-08-31 07:34:52 -03:00
|
|
|
|
2023-11-01 17:13:02 -03:00
|
|
|
/* There's no STORE_IP(), it's inlined by the code generator. */
|
2023-08-31 07:34:52 -03:00
|
|
|
|
|
|
|
#define LOAD_SP() \
|
2024-07-26 08:24:12 -03:00
|
|
|
stack_pointer = _PyFrame_GetStackPointer(frame)
|
|
|
|
|
|
|
|
#define SAVE_SP() \
|
|
|
|
_PyFrame_SetStackPointer(frame, stack_pointer)
|
2023-08-31 07:34:52 -03:00
|
|
|
|
2023-11-01 17:13:02 -03:00
|
|
|
/* Tier-switching macros. */
|
2023-08-31 07:34:52 -03:00
|
|
|
|
2024-02-20 05:39:55 -04:00
|
|
|
#ifdef _Py_JIT
|
|
|
|
#define GOTO_TIER_TWO(EXECUTOR) \
|
|
|
|
do { \
|
2024-03-28 19:23:08 -03:00
|
|
|
OPT_STAT_INC(traces_executed); \
|
2024-02-20 05:39:55 -04:00
|
|
|
jit_func jitted = (EXECUTOR)->jit_code; \
|
|
|
|
next_instr = jitted(frame, stack_pointer, tstate); \
|
|
|
|
Py_DECREF(tstate->previous_executor); \
|
|
|
|
tstate->previous_executor = NULL; \
|
|
|
|
frame = tstate->current_frame; \
|
|
|
|
if (next_instr == NULL) { \
|
|
|
|
goto resume_with_error; \
|
|
|
|
} \
|
|
|
|
stack_pointer = _PyFrame_GetStackPointer(frame); \
|
|
|
|
DISPATCH(); \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define GOTO_TIER_TWO(EXECUTOR) \
|
|
|
|
do { \
|
2024-03-28 19:23:08 -03:00
|
|
|
OPT_STAT_INC(traces_executed); \
|
2024-02-20 05:39:55 -04:00
|
|
|
next_uop = (EXECUTOR)->trace; \
|
2024-07-01 17:17:40 -03:00
|
|
|
assert(next_uop->opcode == _START_EXECUTOR); \
|
2024-02-20 05:39:55 -04:00
|
|
|
goto enter_tier_two; \
|
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define GOTO_TIER_ONE(TARGET) \
|
|
|
|
do { \
|
|
|
|
Py_DECREF(tstate->previous_executor); \
|
|
|
|
tstate->previous_executor = NULL; \
|
|
|
|
next_instr = target; \
|
|
|
|
DISPATCH(); \
|
|
|
|
} while (0)
|
2023-08-31 07:34:52 -03:00
|
|
|
|
2023-11-20 15:25:32 -04:00
|
|
|
#define CURRENT_OPARG() (next_uop[-1].oparg)
|
|
|
|
|
2024-11-08 23:35:33 -04:00
|
|
|
#define CURRENT_OPERAND0() (next_uop[-1].operand0)
|
|
|
|
#define CURRENT_OPERAND1() (next_uop[-1].operand1)
|
2024-03-26 06:35:11 -03:00
|
|
|
|
|
|
|
#define JUMP_TO_JUMP_TARGET() goto jump_to_jump_target
|
|
|
|
#define JUMP_TO_ERROR() goto jump_to_error_target
|
|
|
|
#define GOTO_UNWIND() goto error_tier_two
|
|
|
|
#define EXIT_TO_TIER1() goto exit_to_tier1
|
2024-04-26 14:08:50 -03:00
|
|
|
#define EXIT_TO_TIER1_DYNAMIC() goto exit_to_tier1_dynamic;
|
2024-06-26 16:10:43 -03:00
|
|
|
|
|
|
|
/* Stackref macros */
|
|
|
|
|
|
|
|
/* How much scratch space to give stackref to PyObject* conversion. */
|
|
|
|
#define MAX_STACKREF_SCRATCH 10
|
|
|
|
|
|
|
|
#ifdef Py_GIL_DISABLED
|
|
|
|
#define STACKREFS_TO_PYOBJECTS(ARGS, ARG_COUNT, NAME) \
|
|
|
|
/* +1 because vectorcall might use -1 to write self */ \
|
|
|
|
PyObject *NAME##_temp[MAX_STACKREF_SCRATCH+1]; \
|
|
|
|
PyObject **NAME = _PyObjectArray_FromStackRefArray(ARGS, ARG_COUNT, NAME##_temp + 1);
|
|
|
|
#else
|
|
|
|
#define STACKREFS_TO_PYOBJECTS(ARGS, ARG_COUNT, NAME) \
|
|
|
|
PyObject **NAME = (PyObject **)ARGS; \
|
|
|
|
assert(NAME != NULL);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef Py_GIL_DISABLED
|
|
|
|
#define STACKREFS_TO_PYOBJECTS_CLEANUP(NAME) \
|
|
|
|
/* +1 because we +1 previously */ \
|
|
|
|
_PyObjectArray_Free(NAME - 1, NAME##_temp);
|
|
|
|
#else
|
|
|
|
#define STACKREFS_TO_PYOBJECTS_CLEANUP(NAME) \
|
|
|
|
(void)(NAME);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef Py_GIL_DISABLED
|
|
|
|
#define CONVERSION_FAILED(NAME) ((NAME) == NULL)
|
|
|
|
#else
|
|
|
|
#define CONVERSION_FAILED(NAME) (0)
|
|
|
|
#endif
|