2024-06-17 16:58:56 -03:00
|
|
|
from dataclasses import dataclass
|
2023-12-07 08:49:40 -04:00
|
|
|
import lexer
|
|
|
|
import parser
|
2024-02-20 06:50:59 -04:00
|
|
|
import re
|
2023-12-07 08:49:40 -04:00
|
|
|
from typing import Optional
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Properties:
|
|
|
|
escapes: bool
|
2024-03-26 06:35:11 -03:00
|
|
|
error_with_pop: bool
|
|
|
|
error_without_pop: bool
|
2023-12-07 08:49:40 -04:00
|
|
|
deopts: bool
|
|
|
|
oparg: bool
|
|
|
|
jumps: bool
|
2023-12-20 10:27:25 -04:00
|
|
|
eval_breaker: bool
|
2023-12-07 08:49:40 -04:00
|
|
|
needs_this: bool
|
|
|
|
always_exits: bool
|
|
|
|
stores_sp: bool
|
2023-12-20 10:27:25 -04:00
|
|
|
uses_co_consts: bool
|
|
|
|
uses_co_names: bool
|
|
|
|
uses_locals: bool
|
|
|
|
has_free: bool
|
2024-02-20 05:39:55 -04:00
|
|
|
side_exit: bool
|
2024-01-12 13:30:27 -04:00
|
|
|
pure: bool
|
2024-02-23 13:31:57 -04:00
|
|
|
tier: int | None = None
|
2024-02-20 06:50:59 -04:00
|
|
|
oparg_and_1: bool = False
|
|
|
|
const_oparg: int = -1
|
2024-07-26 08:24:12 -03:00
|
|
|
needs_prev: bool = False
|
2024-01-12 13:30:27 -04:00
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
def dump(self, indent: str) -> None:
|
|
|
|
print(indent, end="")
|
|
|
|
text = ", ".join([f"{key}: {value}" for (key, value) in self.__dict__.items()])
|
|
|
|
print(indent, text, sep="")
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def from_list(properties: list["Properties"]) -> "Properties":
|
|
|
|
return Properties(
|
|
|
|
escapes=any(p.escapes for p in properties),
|
2024-03-26 06:35:11 -03:00
|
|
|
error_with_pop=any(p.error_with_pop for p in properties),
|
|
|
|
error_without_pop=any(p.error_without_pop for p in properties),
|
2023-12-07 08:49:40 -04:00
|
|
|
deopts=any(p.deopts for p in properties),
|
|
|
|
oparg=any(p.oparg for p in properties),
|
|
|
|
jumps=any(p.jumps for p in properties),
|
2023-12-20 10:27:25 -04:00
|
|
|
eval_breaker=any(p.eval_breaker for p in properties),
|
2023-12-07 08:49:40 -04:00
|
|
|
needs_this=any(p.needs_this for p in properties),
|
|
|
|
always_exits=any(p.always_exits for p in properties),
|
|
|
|
stores_sp=any(p.stores_sp for p in properties),
|
2023-12-20 10:27:25 -04:00
|
|
|
uses_co_consts=any(p.uses_co_consts for p in properties),
|
|
|
|
uses_co_names=any(p.uses_co_names for p in properties),
|
|
|
|
uses_locals=any(p.uses_locals for p in properties),
|
|
|
|
has_free=any(p.has_free for p in properties),
|
2024-02-20 05:39:55 -04:00
|
|
|
side_exit=any(p.side_exit for p in properties),
|
2024-01-12 13:30:27 -04:00
|
|
|
pure=all(p.pure for p in properties),
|
2024-07-26 08:24:12 -03:00
|
|
|
needs_prev=any(p.needs_prev for p in properties),
|
2023-12-07 08:49:40 -04:00
|
|
|
)
|
|
|
|
|
2024-03-26 06:35:11 -03:00
|
|
|
@property
|
|
|
|
def infallible(self) -> bool:
|
|
|
|
return not self.error_with_pop and not self.error_without_pop
|
|
|
|
|
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
SKIP_PROPERTIES = Properties(
|
|
|
|
escapes=False,
|
2024-03-26 06:35:11 -03:00
|
|
|
error_with_pop=False,
|
|
|
|
error_without_pop=False,
|
2023-12-07 08:49:40 -04:00
|
|
|
deopts=False,
|
|
|
|
oparg=False,
|
|
|
|
jumps=False,
|
2023-12-20 10:27:25 -04:00
|
|
|
eval_breaker=False,
|
2023-12-07 08:49:40 -04:00
|
|
|
needs_this=False,
|
|
|
|
always_exits=False,
|
|
|
|
stores_sp=False,
|
2023-12-20 10:27:25 -04:00
|
|
|
uses_co_consts=False,
|
|
|
|
uses_co_names=False,
|
|
|
|
uses_locals=False,
|
|
|
|
has_free=False,
|
2024-02-20 05:39:55 -04:00
|
|
|
side_exit=False,
|
2024-07-18 08:49:24 -03:00
|
|
|
pure=True,
|
2023-12-07 08:49:40 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Skip:
|
|
|
|
"Unused cache entry"
|
|
|
|
size: int
|
|
|
|
|
|
|
|
@property
|
|
|
|
def name(self) -> str:
|
|
|
|
return f"unused/{self.size}"
|
|
|
|
|
|
|
|
@property
|
|
|
|
def properties(self) -> Properties:
|
|
|
|
return SKIP_PROPERTIES
|
|
|
|
|
|
|
|
|
2024-07-18 08:49:24 -03:00
|
|
|
class Flush:
|
|
|
|
@property
|
|
|
|
def properties(self) -> Properties:
|
|
|
|
return SKIP_PROPERTIES
|
|
|
|
|
|
|
|
@property
|
|
|
|
def name(self) -> str:
|
|
|
|
return "flush"
|
|
|
|
|
|
|
|
@property
|
|
|
|
def size(self) -> int:
|
|
|
|
return 0
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
@dataclass
|
|
|
|
class StackItem:
|
|
|
|
name: str
|
|
|
|
type: str | None
|
|
|
|
condition: str | None
|
|
|
|
size: str
|
|
|
|
peek: bool = False
|
2024-07-18 08:49:24 -03:00
|
|
|
used: bool = False
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
def __str__(self) -> str:
|
|
|
|
cond = f" if ({self.condition})" if self.condition else ""
|
2024-07-09 07:33:56 -03:00
|
|
|
size = f"[{self.size}]" if self.size else ""
|
2023-12-07 08:49:40 -04:00
|
|
|
type = "" if self.type is None else f"{self.type} "
|
|
|
|
return f"{type}{self.name}{size}{cond} {self.peek}"
|
|
|
|
|
|
|
|
def is_array(self) -> bool:
|
2024-07-09 07:33:56 -03:00
|
|
|
return self.size != ""
|
2023-12-07 08:49:40 -04:00
|
|
|
|
2024-07-09 07:33:56 -03:00
|
|
|
def get_size(self) -> str:
|
|
|
|
return self.size if self.size else "1"
|
2023-12-07 08:49:40 -04:00
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
@dataclass
|
|
|
|
class StackEffect:
|
|
|
|
inputs: list[StackItem]
|
|
|
|
outputs: list[StackItem]
|
|
|
|
|
|
|
|
def __str__(self) -> str:
|
|
|
|
return f"({', '.join([str(i) for i in self.inputs])} -- {', '.join([str(i) for i in self.outputs])})"
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class CacheEntry:
|
|
|
|
name: str
|
|
|
|
size: int
|
|
|
|
|
|
|
|
def __str__(self) -> str:
|
|
|
|
return f"{self.name}/{self.size}"
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
@dataclass
|
|
|
|
class Uop:
|
|
|
|
name: str
|
|
|
|
context: parser.Context | None
|
|
|
|
annotations: list[str]
|
|
|
|
stack: StackEffect
|
|
|
|
caches: list[CacheEntry]
|
2024-08-07 14:23:53 -03:00
|
|
|
deferred_refs: dict[lexer.Token, str | None]
|
2023-12-07 08:49:40 -04:00
|
|
|
body: list[lexer.Token]
|
|
|
|
properties: Properties
|
|
|
|
_size: int = -1
|
2023-12-11 10:14:36 -04:00
|
|
|
implicitly_created: bool = False
|
2024-02-20 06:50:59 -04:00
|
|
|
replicated = 0
|
2024-08-08 06:57:59 -03:00
|
|
|
replicates: "Uop | None" = None
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
def dump(self, indent: str) -> None:
|
|
|
|
print(
|
|
|
|
indent, self.name, ", ".join(self.annotations) if self.annotations else ""
|
|
|
|
)
|
|
|
|
print(indent, self.stack, ", ".join([str(c) for c in self.caches]))
|
|
|
|
self.properties.dump(" " + indent)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def size(self) -> int:
|
|
|
|
if self._size < 0:
|
|
|
|
self._size = sum(c.size for c in self.caches)
|
|
|
|
return self._size
|
|
|
|
|
2024-03-26 06:35:11 -03:00
|
|
|
def why_not_viable(self) -> str | None:
|
2023-12-12 08:12:17 -04:00
|
|
|
if self.name == "_SAVE_RETURN_OFFSET":
|
2024-03-26 06:35:11 -03:00
|
|
|
return None # Adjusts next_instr, but only in tier 1 code
|
2023-12-12 08:12:17 -04:00
|
|
|
if "INSTRUMENTED" in self.name:
|
2024-03-26 06:35:11 -03:00
|
|
|
return "is instrumented"
|
2023-12-12 08:12:17 -04:00
|
|
|
if "replaced" in self.annotations:
|
2024-03-26 06:35:11 -03:00
|
|
|
return "is replaced"
|
2023-12-12 08:12:17 -04:00
|
|
|
if self.name in ("INTERPRETER_EXIT", "JUMP_BACKWARD"):
|
2024-03-26 06:35:11 -03:00
|
|
|
return "has tier 1 control flow"
|
|
|
|
if self.properties.needs_this:
|
|
|
|
return "uses the 'this_instr' variable"
|
2023-12-12 08:12:17 -04:00
|
|
|
if len([c for c in self.caches if c.name != "unused"]) > 1:
|
2024-03-26 06:35:11 -03:00
|
|
|
return "has unused cache entries"
|
|
|
|
if self.properties.error_with_pop and self.properties.error_without_pop:
|
|
|
|
return "has both popping and not-popping errors"
|
|
|
|
return None
|
|
|
|
|
|
|
|
def is_viable(self) -> bool:
|
|
|
|
return self.why_not_viable() is None
|
2023-12-12 08:12:17 -04:00
|
|
|
|
2023-12-20 10:27:25 -04:00
|
|
|
def is_super(self) -> bool:
|
|
|
|
for tkn in self.body:
|
|
|
|
if tkn.kind == "IDENTIFIER" and tkn.text == "oparg1":
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
|
2024-07-18 08:49:24 -03:00
|
|
|
Part = Uop | Skip | Flush
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Instruction:
|
2024-08-01 05:27:26 -03:00
|
|
|
where: lexer.Token
|
2023-12-07 08:49:40 -04:00
|
|
|
name: str
|
|
|
|
parts: list[Part]
|
|
|
|
_properties: Properties | None
|
|
|
|
is_target: bool = False
|
|
|
|
family: Optional["Family"] = None
|
2023-12-20 10:27:25 -04:00
|
|
|
opcode: int = -1
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
@property
|
|
|
|
def properties(self) -> Properties:
|
|
|
|
if self._properties is None:
|
|
|
|
self._properties = self._compute_properties()
|
|
|
|
return self._properties
|
|
|
|
|
|
|
|
def _compute_properties(self) -> Properties:
|
|
|
|
return Properties.from_list([part.properties for part in self.parts])
|
|
|
|
|
|
|
|
def dump(self, indent: str) -> None:
|
|
|
|
print(indent, self.name, "=", ", ".join([part.name for part in self.parts]))
|
|
|
|
self.properties.dump(" " + indent)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def size(self) -> int:
|
|
|
|
return 1 + sum(part.size for part in self.parts)
|
|
|
|
|
2023-12-20 10:27:25 -04:00
|
|
|
def is_super(self) -> bool:
|
|
|
|
if len(self.parts) != 1:
|
|
|
|
return False
|
|
|
|
uop = self.parts[0]
|
|
|
|
if isinstance(uop, Uop):
|
|
|
|
return uop.is_super()
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class PseudoInstruction:
|
|
|
|
name: str
|
2024-05-29 06:47:56 -03:00
|
|
|
stack: StackEffect
|
2023-12-07 08:49:40 -04:00
|
|
|
targets: list[Instruction]
|
|
|
|
flags: list[str]
|
2023-12-20 10:27:25 -04:00
|
|
|
opcode: int = -1
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
def dump(self, indent: str) -> None:
|
|
|
|
print(indent, self.name, "->", " or ".join([t.name for t in self.targets]))
|
|
|
|
|
2023-12-20 10:27:25 -04:00
|
|
|
@property
|
|
|
|
def properties(self) -> Properties:
|
|
|
|
return Properties.from_list([i.properties for i in self.targets])
|
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Family:
|
|
|
|
name: str
|
|
|
|
size: str
|
|
|
|
members: list[Instruction]
|
|
|
|
|
|
|
|
def dump(self, indent: str) -> None:
|
|
|
|
print(indent, self.name, "= ", ", ".join([m.name for m in self.members]))
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Analysis:
|
|
|
|
instructions: dict[str, Instruction]
|
|
|
|
uops: dict[str, Uop]
|
|
|
|
families: dict[str, Family]
|
|
|
|
pseudos: dict[str, PseudoInstruction]
|
2023-12-20 10:27:25 -04:00
|
|
|
opmap: dict[str, int]
|
|
|
|
have_arg: int
|
|
|
|
min_instrumented: int
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
|
|
|
|
def analysis_error(message: str, tkn: lexer.Token) -> SyntaxError:
|
|
|
|
# To do -- support file and line output
|
|
|
|
# Construct a SyntaxError instance from message and token
|
2023-12-20 10:27:25 -04:00
|
|
|
return lexer.make_syntax_error(message, tkn.filename, tkn.line, tkn.column, "")
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
|
|
|
|
def override_error(
|
|
|
|
name: str,
|
|
|
|
context: parser.Context | None,
|
|
|
|
prev_context: parser.Context | None,
|
|
|
|
token: lexer.Token,
|
|
|
|
) -> SyntaxError:
|
|
|
|
return analysis_error(
|
|
|
|
f"Duplicate definition of '{name}' @ {context} "
|
|
|
|
f"previous definition @ {prev_context}",
|
|
|
|
token,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
def convert_stack_item(
|
|
|
|
item: parser.StackEffect, replace_op_arg_1: str | None
|
|
|
|
) -> StackItem:
|
2024-02-20 06:50:59 -04:00
|
|
|
cond = item.cond
|
|
|
|
if replace_op_arg_1 and OPARG_AND_1.match(item.cond):
|
|
|
|
cond = replace_op_arg_1
|
2024-08-08 06:57:59 -03:00
|
|
|
return StackItem(item.name, item.type, cond, item.size)
|
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
def analyze_stack(
|
|
|
|
op: parser.InstDef | parser.Pseudo, replace_op_arg_1: str | None = None
|
|
|
|
) -> StackEffect:
|
2023-12-07 08:49:40 -04:00
|
|
|
inputs: list[StackItem] = [
|
2024-08-08 06:57:59 -03:00
|
|
|
convert_stack_item(i, replace_op_arg_1)
|
|
|
|
for i in op.inputs
|
|
|
|
if isinstance(i, parser.StackEffect)
|
|
|
|
]
|
|
|
|
outputs: list[StackItem] = [
|
|
|
|
convert_stack_item(i, replace_op_arg_1) for i in op.outputs
|
2023-12-07 08:49:40 -04:00
|
|
|
]
|
2024-07-23 10:12:06 -03:00
|
|
|
# Mark variables with matching names at the base of the stack as "peek"
|
|
|
|
modified = False
|
2023-12-07 08:49:40 -04:00
|
|
|
for input, output in zip(inputs, outputs):
|
2024-07-23 10:12:06 -03:00
|
|
|
if input.name == output.name and not modified:
|
2023-12-07 08:49:40 -04:00
|
|
|
input.peek = output.peek = True
|
2024-07-23 10:12:06 -03:00
|
|
|
else:
|
|
|
|
modified = True
|
2024-07-18 08:49:24 -03:00
|
|
|
if isinstance(op, parser.InstDef):
|
|
|
|
output_names = [out.name for out in outputs]
|
|
|
|
for input in inputs:
|
2024-08-08 06:57:59 -03:00
|
|
|
if (
|
|
|
|
variable_used(op, input.name)
|
|
|
|
or variable_used(op, "DECREF_INPUTS")
|
|
|
|
or (not input.peek and input.name in output_names)
|
|
|
|
):
|
2024-07-18 08:49:24 -03:00
|
|
|
input.used = True
|
|
|
|
for output in outputs:
|
|
|
|
if variable_used(op, output.name):
|
|
|
|
output.used = True
|
2023-12-07 08:49:40 -04:00
|
|
|
return StackEffect(inputs, outputs)
|
|
|
|
|
|
|
|
|
2023-12-18 09:16:45 -04:00
|
|
|
def analyze_caches(inputs: list[parser.InputEffect]) -> list[CacheEntry]:
|
2023-12-07 08:49:40 -04:00
|
|
|
caches: list[parser.CacheEffect] = [
|
2023-12-18 09:16:45 -04:00
|
|
|
i for i in inputs if isinstance(i, parser.CacheEffect)
|
2023-12-07 08:49:40 -04:00
|
|
|
]
|
2023-12-20 10:27:25 -04:00
|
|
|
for cache in caches:
|
|
|
|
if cache.name == "unused":
|
|
|
|
raise analysis_error(
|
|
|
|
"Unused cache entry in op. Move to enclosing macro.", cache.tokens[0]
|
|
|
|
)
|
2023-12-07 08:49:40 -04:00
|
|
|
return [CacheEntry(i.name, int(i.size)) for i in caches]
|
|
|
|
|
|
|
|
|
2024-08-07 14:23:53 -03:00
|
|
|
def analyze_deferred_refs(node: parser.InstDef) -> dict[lexer.Token, str | None]:
|
|
|
|
"""Look for PyStackRef_FromPyObjectNew() calls"""
|
|
|
|
|
|
|
|
def find_assignment_target(idx: int) -> list[lexer.Token]:
|
|
|
|
"""Find the tokens that make up the left-hand side of an assignment"""
|
|
|
|
offset = 1
|
2024-08-08 06:57:59 -03:00
|
|
|
for tkn in reversed(node.block.tokens[: idx - 1]):
|
2024-08-07 14:23:53 -03:00
|
|
|
if tkn.kind == "SEMI" or tkn.kind == "LBRACE" or tkn.kind == "RBRACE":
|
2024-08-08 06:57:59 -03:00
|
|
|
return node.block.tokens[idx - offset : idx - 1]
|
2024-08-07 14:23:53 -03:00
|
|
|
offset += 1
|
|
|
|
return []
|
|
|
|
|
|
|
|
refs: dict[lexer.Token, str | None] = {}
|
|
|
|
for idx, tkn in enumerate(node.block.tokens):
|
|
|
|
if tkn.kind != "IDENTIFIER" or tkn.text != "PyStackRef_FromPyObjectNew":
|
|
|
|
continue
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
if idx == 0 or node.block.tokens[idx - 1].kind != "EQUALS":
|
2024-08-07 14:23:53 -03:00
|
|
|
raise analysis_error("Expected '=' before PyStackRef_FromPyObjectNew", tkn)
|
|
|
|
|
|
|
|
lhs = find_assignment_target(idx)
|
|
|
|
if len(lhs) == 0:
|
2024-08-08 06:57:59 -03:00
|
|
|
raise analysis_error(
|
|
|
|
"PyStackRef_FromPyObjectNew() must be assigned to an output", tkn
|
|
|
|
)
|
2024-08-07 14:23:53 -03:00
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
if lhs[0].kind == "TIMES" or any(
|
|
|
|
t.kind == "ARROW" or t.kind == "LBRACKET" for t in lhs[1:]
|
|
|
|
):
|
2024-08-07 14:23:53 -03:00
|
|
|
# Don't handle: *ptr = ..., ptr->field = ..., or ptr[field] = ...
|
|
|
|
# Assume that they are visible to the GC.
|
|
|
|
refs[tkn] = None
|
|
|
|
continue
|
|
|
|
|
|
|
|
if len(lhs) != 1 or lhs[0].kind != "IDENTIFIER":
|
2024-08-08 06:57:59 -03:00
|
|
|
raise analysis_error(
|
|
|
|
"PyStackRef_FromPyObjectNew() must be assigned to an output", tkn
|
|
|
|
)
|
2024-08-07 14:23:53 -03:00
|
|
|
|
|
|
|
name = lhs[0].text
|
|
|
|
if not any(var.name == name for var in node.outputs):
|
2024-08-08 06:57:59 -03:00
|
|
|
raise analysis_error(
|
|
|
|
f"PyStackRef_FromPyObjectNew() must be assigned to an output, not '{name}'",
|
|
|
|
tkn,
|
|
|
|
)
|
2024-08-07 14:23:53 -03:00
|
|
|
|
|
|
|
refs[tkn] = name
|
|
|
|
|
|
|
|
return refs
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
def variable_used(node: parser.InstDef, name: str) -> bool:
|
|
|
|
"""Determine whether a variable with a given name is used in a node."""
|
|
|
|
return any(
|
2024-07-18 08:49:24 -03:00
|
|
|
token.kind == "IDENTIFIER" and token.text == name for token in node.block.tokens
|
|
|
|
)
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2024-07-18 08:49:24 -03:00
|
|
|
def oparg_used(node: parser.InstDef) -> bool:
|
|
|
|
"""Determine whether `oparg` is used in a node."""
|
|
|
|
return any(
|
|
|
|
token.kind == "IDENTIFIER" and token.text == "oparg" for token in node.tokens
|
2023-12-07 08:49:40 -04:00
|
|
|
)
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2024-02-23 13:31:57 -04:00
|
|
|
def tier_variable(node: parser.InstDef) -> int | None:
|
|
|
|
"""Determine whether a tier variable is used in a node."""
|
|
|
|
for token in node.tokens:
|
|
|
|
if token.kind == "ANNOTATION":
|
|
|
|
if token.text == "specializing":
|
|
|
|
return 1
|
|
|
|
if re.fullmatch(r"tier\d", token.text):
|
|
|
|
return int(token.text[-1])
|
|
|
|
return None
|
2023-12-07 08:49:40 -04:00
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2024-03-26 06:35:11 -03:00
|
|
|
def has_error_with_pop(op: parser.InstDef) -> bool:
|
|
|
|
return (
|
2023-12-07 08:49:40 -04:00
|
|
|
variable_used(op, "ERROR_IF")
|
2024-03-26 06:35:11 -03:00
|
|
|
or variable_used(op, "pop_1_error")
|
|
|
|
or variable_used(op, "exception_unwind")
|
|
|
|
or variable_used(op, "resume_with_error")
|
|
|
|
)
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2024-03-26 06:35:11 -03:00
|
|
|
def has_error_without_pop(op: parser.InstDef) -> bool:
|
|
|
|
return (
|
|
|
|
variable_used(op, "ERROR_NO_POP")
|
2023-12-07 08:49:40 -04:00
|
|
|
or variable_used(op, "pop_1_error")
|
|
|
|
or variable_used(op, "exception_unwind")
|
|
|
|
or variable_used(op, "resume_with_error")
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-12-21 08:46:28 -04:00
|
|
|
NON_ESCAPING_FUNCTIONS = (
|
2024-06-26 16:10:43 -03:00
|
|
|
"PyStackRef_FromPyObjectSteal",
|
|
|
|
"PyStackRef_AsPyObjectBorrow",
|
|
|
|
"PyStackRef_AsPyObjectSteal",
|
|
|
|
"PyStackRef_CLOSE",
|
|
|
|
"PyStackRef_DUP",
|
|
|
|
"PyStackRef_CLEAR",
|
|
|
|
"PyStackRef_IsNull",
|
|
|
|
"PyStackRef_TYPE",
|
|
|
|
"PyStackRef_False",
|
|
|
|
"PyStackRef_True",
|
|
|
|
"PyStackRef_None",
|
|
|
|
"PyStackRef_Is",
|
|
|
|
"PyStackRef_FromPyObjectNew",
|
|
|
|
"PyStackRef_AsPyObjectNew",
|
|
|
|
"PyStackRef_FromPyObjectImmortal",
|
2023-12-21 08:46:28 -04:00
|
|
|
"Py_INCREF",
|
2024-04-02 07:59:21 -03:00
|
|
|
"_PyManagedDictPointer_IsValues",
|
2024-04-22 02:57:05 -03:00
|
|
|
"_PyObject_GetManagedDict",
|
2024-04-02 07:59:21 -03:00
|
|
|
"_PyObject_ManagedDictPointer",
|
|
|
|
"_PyObject_InlineValues",
|
2024-03-11 10:37:48 -03:00
|
|
|
"_PyDictValues_AddToInsertionOrder",
|
2023-12-21 08:46:28 -04:00
|
|
|
"Py_DECREF",
|
2024-04-19 05:25:07 -03:00
|
|
|
"Py_XDECREF",
|
2023-12-21 08:46:28 -04:00
|
|
|
"_Py_DECREF_SPECIALIZED",
|
|
|
|
"DECREF_INPUTS_AND_REUSE_FLOAT",
|
|
|
|
"PyUnicode_Append",
|
|
|
|
"_PyLong_IsZero",
|
|
|
|
"Py_SIZE",
|
|
|
|
"Py_TYPE",
|
|
|
|
"PyList_GET_ITEM",
|
2024-04-19 05:25:07 -03:00
|
|
|
"PyList_SET_ITEM",
|
2023-12-21 08:46:28 -04:00
|
|
|
"PyTuple_GET_ITEM",
|
|
|
|
"PyList_GET_SIZE",
|
|
|
|
"PyTuple_GET_SIZE",
|
|
|
|
"Py_ARRAY_LENGTH",
|
|
|
|
"Py_Unicode_GET_LENGTH",
|
|
|
|
"PyUnicode_READ_CHAR",
|
|
|
|
"_Py_SINGLETON",
|
|
|
|
"PyUnicode_GET_LENGTH",
|
|
|
|
"_PyLong_IsCompact",
|
|
|
|
"_PyLong_IsNonNegativeCompact",
|
|
|
|
"_PyLong_CompactValue",
|
2024-03-11 10:37:48 -03:00
|
|
|
"_PyLong_DigitCount",
|
2023-12-21 08:46:28 -04:00
|
|
|
"_Py_NewRef",
|
|
|
|
"_Py_IsImmortal",
|
2024-03-11 10:37:48 -03:00
|
|
|
"PyLong_FromLong",
|
2023-12-21 08:46:28 -04:00
|
|
|
"_Py_STR",
|
|
|
|
"_PyLong_Add",
|
|
|
|
"_PyLong_Multiply",
|
|
|
|
"_PyLong_Subtract",
|
|
|
|
"Py_NewRef",
|
|
|
|
"_PyList_ITEMS",
|
|
|
|
"_PyTuple_ITEMS",
|
|
|
|
"_PyList_AppendTakeRef",
|
|
|
|
"_Py_atomic_load_uintptr_relaxed",
|
|
|
|
"_PyFrame_GetCode",
|
|
|
|
"_PyThreadState_HasStackSpace",
|
2024-03-11 10:37:48 -03:00
|
|
|
"_PyUnicode_Equal",
|
|
|
|
"_PyFrame_SetStackPointer",
|
|
|
|
"_PyType_HasFeature",
|
|
|
|
"PyUnicode_Concat",
|
|
|
|
"PySlice_New",
|
|
|
|
"_Py_LeaveRecursiveCallPy",
|
|
|
|
"CALL_STAT_INC",
|
2024-04-19 05:25:07 -03:00
|
|
|
"STAT_INC",
|
2024-03-11 10:37:48 -03:00
|
|
|
"maybe_lltrace_resume_frame",
|
|
|
|
"_PyUnicode_JoinArray",
|
2024-04-19 05:25:07 -03:00
|
|
|
"_PyEval_FrameClearAndPop",
|
|
|
|
"_PyFrame_StackPush",
|
|
|
|
"PyCell_New",
|
|
|
|
"PyFloat_AS_DOUBLE",
|
|
|
|
"_PyFrame_PushUnchecked",
|
2024-04-30 22:26:34 -03:00
|
|
|
"Py_FatalError",
|
2024-06-26 16:10:43 -03:00
|
|
|
"STACKREFS_TO_PYOBJECTS",
|
|
|
|
"STACKREFS_TO_PYOBJECTS_CLEANUP",
|
|
|
|
"CONVERSION_FAILED",
|
2024-08-12 15:49:49 -03:00
|
|
|
"_PyList_FromStackRefSteal",
|
2024-06-26 16:10:43 -03:00
|
|
|
"_PyTuple_FromArraySteal",
|
2024-07-02 13:30:14 -03:00
|
|
|
"_PyTuple_FromStackRefSteal",
|
2023-12-21 08:46:28 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
ESCAPING_FUNCTIONS = (
|
|
|
|
"import_name",
|
|
|
|
"import_from",
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def makes_escaping_api_call(instr: parser.InstDef) -> bool:
|
|
|
|
if "CALL_INTRINSIC" in instr.name:
|
|
|
|
return True
|
2024-03-11 10:37:48 -03:00
|
|
|
if instr.name == "_BINARY_OP":
|
|
|
|
return True
|
2023-12-21 08:46:28 -04:00
|
|
|
tkns = iter(instr.tokens)
|
|
|
|
for tkn in tkns:
|
|
|
|
if tkn.kind != lexer.IDENTIFIER:
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
next_tkn = next(tkns)
|
|
|
|
except StopIteration:
|
|
|
|
return False
|
|
|
|
if next_tkn.kind != lexer.LPAREN:
|
|
|
|
continue
|
|
|
|
if tkn.text in ESCAPING_FUNCTIONS:
|
|
|
|
return True
|
2024-04-19 05:25:07 -03:00
|
|
|
if tkn.text == "tp_vectorcall":
|
|
|
|
return True
|
2023-12-21 08:46:28 -04:00
|
|
|
if not tkn.text.startswith("Py") and not tkn.text.startswith("_Py"):
|
|
|
|
continue
|
|
|
|
if tkn.text.endswith("Check"):
|
|
|
|
continue
|
|
|
|
if tkn.text.startswith("Py_Is"):
|
|
|
|
continue
|
|
|
|
if tkn.text.endswith("CheckExact"):
|
|
|
|
continue
|
|
|
|
if tkn.text in NON_ESCAPING_FUNCTIONS:
|
|
|
|
continue
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
EXITS = {
|
|
|
|
"DISPATCH",
|
|
|
|
"GO_TO_INSTRUCTION",
|
|
|
|
"Py_UNREACHABLE",
|
|
|
|
"DISPATCH_INLINED",
|
|
|
|
"DISPATCH_GOTO",
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def always_exits(op: parser.InstDef) -> bool:
|
|
|
|
depth = 0
|
|
|
|
tkn_iter = iter(op.tokens)
|
|
|
|
for tkn in tkn_iter:
|
|
|
|
if tkn.kind == "LBRACE":
|
|
|
|
depth += 1
|
|
|
|
elif tkn.kind == "RBRACE":
|
|
|
|
depth -= 1
|
|
|
|
elif depth > 1:
|
|
|
|
continue
|
|
|
|
elif tkn.kind == "GOTO" or tkn.kind == "RETURN":
|
|
|
|
return True
|
|
|
|
elif tkn.kind == "KEYWORD":
|
|
|
|
if tkn.text in EXITS:
|
|
|
|
return True
|
|
|
|
elif tkn.kind == "IDENTIFIER":
|
|
|
|
if tkn.text in EXITS:
|
|
|
|
return True
|
|
|
|
if tkn.text == "DEOPT_IF" or tkn.text == "ERROR_IF":
|
|
|
|
next(tkn_iter) # '('
|
|
|
|
t = next(tkn_iter)
|
|
|
|
if t.text == "true":
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2024-01-12 13:30:27 -04:00
|
|
|
def stack_effect_only_peeks(instr: parser.InstDef) -> bool:
|
|
|
|
stack_inputs = [s for s in instr.inputs if not isinstance(s, parser.CacheEffect)]
|
|
|
|
if len(stack_inputs) != len(instr.outputs):
|
|
|
|
return False
|
|
|
|
if len(stack_inputs) == 0:
|
|
|
|
return False
|
|
|
|
if any(s.cond for s in stack_inputs) or any(s.cond for s in instr.outputs):
|
|
|
|
return False
|
|
|
|
return all(
|
|
|
|
(s.name == other.name and s.type == other.type and s.size == other.size)
|
|
|
|
for s, other in zip(stack_inputs, instr.outputs)
|
|
|
|
)
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2024-02-20 06:50:59 -04:00
|
|
|
OPARG_AND_1 = re.compile("\\(*oparg *& *1")
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2024-02-20 06:50:59 -04:00
|
|
|
def effect_depends_on_oparg_1(op: parser.InstDef) -> bool:
|
|
|
|
for effect in op.inputs:
|
|
|
|
if isinstance(effect, parser.CacheEffect):
|
|
|
|
continue
|
|
|
|
if not effect.cond:
|
|
|
|
continue
|
|
|
|
if OPARG_AND_1.match(effect.cond):
|
|
|
|
return True
|
|
|
|
for effect in op.outputs:
|
|
|
|
if not effect.cond:
|
|
|
|
continue
|
|
|
|
if OPARG_AND_1.match(effect.cond):
|
|
|
|
return True
|
|
|
|
return False
|
2024-01-12 13:30:27 -04:00
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
def compute_properties(op: parser.InstDef) -> Properties:
|
2023-12-20 10:27:25 -04:00
|
|
|
has_free = (
|
|
|
|
variable_used(op, "PyCell_New")
|
2024-03-29 14:35:43 -03:00
|
|
|
or variable_used(op, "PyCell_GetRef")
|
|
|
|
or variable_used(op, "PyCell_SetTakeRef")
|
|
|
|
or variable_used(op, "PyCell_SwapTakeRef")
|
2023-12-20 10:27:25 -04:00
|
|
|
)
|
2024-02-20 05:39:55 -04:00
|
|
|
deopts_if = variable_used(op, "DEOPT_IF")
|
|
|
|
exits_if = variable_used(op, "EXIT_IF")
|
|
|
|
if deopts_if and exits_if:
|
|
|
|
tkn = op.tokens[0]
|
|
|
|
raise lexer.make_syntax_error(
|
|
|
|
"Op cannot contain both EXIT_IF and DEOPT_IF",
|
|
|
|
tkn.filename,
|
|
|
|
tkn.line,
|
|
|
|
tkn.column,
|
|
|
|
op.name,
|
|
|
|
)
|
2024-03-26 06:35:11 -03:00
|
|
|
error_with_pop = has_error_with_pop(op)
|
|
|
|
error_without_pop = has_error_without_pop(op)
|
2023-12-07 08:49:40 -04:00
|
|
|
return Properties(
|
|
|
|
escapes=makes_escaping_api_call(op),
|
2024-03-26 06:35:11 -03:00
|
|
|
error_with_pop=error_with_pop,
|
|
|
|
error_without_pop=error_without_pop,
|
|
|
|
deopts=deopts_if,
|
2024-02-20 05:39:55 -04:00
|
|
|
side_exit=exits_if,
|
2024-07-18 08:49:24 -03:00
|
|
|
oparg=oparg_used(op),
|
2023-12-07 08:49:40 -04:00
|
|
|
jumps=variable_used(op, "JUMPBY"),
|
2024-08-14 08:04:05 -03:00
|
|
|
eval_breaker="CHECK_PERIODIC" in op.name,
|
2023-12-07 08:49:40 -04:00
|
|
|
needs_this=variable_used(op, "this_instr"),
|
|
|
|
always_exits=always_exits(op),
|
2024-01-15 07:41:06 -04:00
|
|
|
stores_sp=variable_used(op, "SYNC_SP"),
|
2023-12-20 10:27:25 -04:00
|
|
|
uses_co_consts=variable_used(op, "FRAME_CO_CONSTS"),
|
|
|
|
uses_co_names=variable_used(op, "FRAME_CO_NAMES"),
|
|
|
|
uses_locals=(variable_used(op, "GETLOCAL") or variable_used(op, "SETLOCAL"))
|
|
|
|
and not has_free,
|
|
|
|
has_free=has_free,
|
2024-01-12 13:30:27 -04:00
|
|
|
pure="pure" in op.annotations,
|
2024-02-23 13:31:57 -04:00
|
|
|
tier=tier_variable(op),
|
2024-07-26 08:24:12 -03:00
|
|
|
needs_prev=variable_used(op, "prev_instr"),
|
2023-12-07 08:49:40 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-08-08 06:57:59 -03:00
|
|
|
def make_uop(
|
|
|
|
name: str,
|
|
|
|
op: parser.InstDef,
|
|
|
|
inputs: list[parser.InputEffect],
|
|
|
|
uops: dict[str, Uop],
|
|
|
|
) -> Uop:
|
2024-02-20 06:50:59 -04:00
|
|
|
result = Uop(
|
2023-12-07 08:49:40 -04:00
|
|
|
name=name,
|
|
|
|
context=op.context,
|
|
|
|
annotations=op.annotations,
|
|
|
|
stack=analyze_stack(op),
|
2023-12-18 09:16:45 -04:00
|
|
|
caches=analyze_caches(inputs),
|
2024-08-07 14:23:53 -03:00
|
|
|
deferred_refs=analyze_deferred_refs(op),
|
2023-12-07 08:49:40 -04:00
|
|
|
body=op.block.tokens,
|
|
|
|
properties=compute_properties(op),
|
|
|
|
)
|
2024-02-20 06:50:59 -04:00
|
|
|
if effect_depends_on_oparg_1(op) and "split" in op.annotations:
|
|
|
|
result.properties.oparg_and_1 = True
|
|
|
|
for bit in ("0", "1"):
|
|
|
|
name_x = name + "_" + bit
|
|
|
|
properties = compute_properties(op)
|
|
|
|
if properties.oparg:
|
|
|
|
# May not need oparg anymore
|
2024-08-08 06:57:59 -03:00
|
|
|
properties.oparg = any(
|
|
|
|
token.text == "oparg" for token in op.block.tokens
|
|
|
|
)
|
2024-02-20 06:50:59 -04:00
|
|
|
rep = Uop(
|
|
|
|
name=name_x,
|
|
|
|
context=op.context,
|
|
|
|
annotations=op.annotations,
|
|
|
|
stack=analyze_stack(op, bit),
|
|
|
|
caches=analyze_caches(inputs),
|
2024-08-07 14:23:53 -03:00
|
|
|
deferred_refs=analyze_deferred_refs(op),
|
2024-02-20 06:50:59 -04:00
|
|
|
body=op.block.tokens,
|
|
|
|
properties=properties,
|
|
|
|
)
|
|
|
|
rep.replicates = result
|
|
|
|
uops[name_x] = rep
|
|
|
|
for anno in op.annotations:
|
|
|
|
if anno.startswith("replicate"):
|
|
|
|
result.replicated = int(anno[10:-1])
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
return result
|
|
|
|
for oparg in range(result.replicated):
|
|
|
|
name_x = name + "_" + str(oparg)
|
|
|
|
properties = compute_properties(op)
|
|
|
|
properties.oparg = False
|
|
|
|
properties.const_oparg = oparg
|
|
|
|
rep = Uop(
|
|
|
|
name=name_x,
|
|
|
|
context=op.context,
|
|
|
|
annotations=op.annotations,
|
|
|
|
stack=analyze_stack(op),
|
|
|
|
caches=analyze_caches(inputs),
|
2024-08-07 14:23:53 -03:00
|
|
|
deferred_refs=analyze_deferred_refs(op),
|
2024-02-20 06:50:59 -04:00
|
|
|
body=op.block.tokens,
|
|
|
|
properties=properties,
|
|
|
|
)
|
|
|
|
rep.replicates = result
|
|
|
|
uops[name_x] = rep
|
|
|
|
|
|
|
|
return result
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
|
|
|
|
def add_op(op: parser.InstDef, uops: dict[str, Uop]) -> None:
|
|
|
|
assert op.kind == "op"
|
|
|
|
if op.name in uops:
|
|
|
|
if "override" not in op.annotations:
|
|
|
|
raise override_error(
|
|
|
|
op.name, op.context, uops[op.name].context, op.tokens[0]
|
|
|
|
)
|
2024-02-20 06:50:59 -04:00
|
|
|
uops[op.name] = make_uop(op.name, op, op.inputs, uops)
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
|
|
|
|
def add_instruction(
|
2024-08-08 06:57:59 -03:00
|
|
|
where: lexer.Token,
|
|
|
|
name: str,
|
|
|
|
parts: list[Part],
|
|
|
|
instructions: dict[str, Instruction],
|
2023-12-07 08:49:40 -04:00
|
|
|
) -> None:
|
2024-08-01 05:27:26 -03:00
|
|
|
instructions[name] = Instruction(where, name, parts, None)
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
|
|
|
|
def desugar_inst(
|
|
|
|
inst: parser.InstDef, instructions: dict[str, Instruction], uops: dict[str, Uop]
|
|
|
|
) -> None:
|
|
|
|
assert inst.kind == "inst"
|
|
|
|
name = inst.name
|
2023-12-18 09:16:45 -04:00
|
|
|
op_inputs: list[parser.InputEffect] = []
|
|
|
|
parts: list[Part] = []
|
|
|
|
uop_index = -1
|
|
|
|
# Move unused cache entries to the Instruction, removing them from the Uop.
|
|
|
|
for input in inst.inputs:
|
|
|
|
if isinstance(input, parser.CacheEffect) and input.name == "unused":
|
|
|
|
parts.append(Skip(input.size))
|
|
|
|
else:
|
|
|
|
op_inputs.append(input)
|
|
|
|
if uop_index < 0:
|
|
|
|
uop_index = len(parts)
|
|
|
|
# Place holder for the uop.
|
|
|
|
parts.append(Skip(0))
|
2024-02-20 06:50:59 -04:00
|
|
|
uop = make_uop("_" + inst.name, inst, op_inputs, uops)
|
2023-12-11 10:14:36 -04:00
|
|
|
uop.implicitly_created = True
|
2023-12-07 08:49:40 -04:00
|
|
|
uops[inst.name] = uop
|
2023-12-18 09:16:45 -04:00
|
|
|
if uop_index < 0:
|
|
|
|
parts.append(uop)
|
|
|
|
else:
|
|
|
|
parts[uop_index] = uop
|
2024-08-01 05:27:26 -03:00
|
|
|
add_instruction(inst.first_token, name, parts, instructions)
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
|
|
|
|
def add_macro(
|
|
|
|
macro: parser.Macro, instructions: dict[str, Instruction], uops: dict[str, Uop]
|
|
|
|
) -> None:
|
2024-07-18 08:49:24 -03:00
|
|
|
parts: list[Part] = []
|
2023-12-07 08:49:40 -04:00
|
|
|
for part in macro.uops:
|
|
|
|
match part:
|
|
|
|
case parser.OpName():
|
2024-07-18 08:49:24 -03:00
|
|
|
if part.name == "flush":
|
|
|
|
parts.append(Flush())
|
|
|
|
else:
|
|
|
|
if part.name not in uops:
|
2024-08-08 06:57:59 -03:00
|
|
|
raise analysis_error(
|
|
|
|
f"No Uop named {part.name}", macro.tokens[0]
|
|
|
|
)
|
2024-07-18 08:49:24 -03:00
|
|
|
parts.append(uops[part.name])
|
2023-12-07 08:49:40 -04:00
|
|
|
case parser.CacheEffect():
|
|
|
|
parts.append(Skip(part.size))
|
|
|
|
case _:
|
|
|
|
assert False
|
|
|
|
assert parts
|
2024-08-01 05:27:26 -03:00
|
|
|
add_instruction(macro.first_token, macro.name, parts, instructions)
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
|
|
|
|
def add_family(
|
|
|
|
pfamily: parser.Family,
|
|
|
|
instructions: dict[str, Instruction],
|
|
|
|
families: dict[str, Family],
|
|
|
|
) -> None:
|
|
|
|
family = Family(
|
|
|
|
pfamily.name,
|
|
|
|
pfamily.size,
|
|
|
|
[instructions[member_name] for member_name in pfamily.members],
|
|
|
|
)
|
|
|
|
for member in family.members:
|
|
|
|
member.family = family
|
|
|
|
# The head of the family is an implicit jump target for DEOPTs
|
|
|
|
instructions[family.name].is_target = True
|
|
|
|
families[family.name] = family
|
|
|
|
|
|
|
|
|
|
|
|
def add_pseudo(
|
|
|
|
pseudo: parser.Pseudo,
|
|
|
|
instructions: dict[str, Instruction],
|
|
|
|
pseudos: dict[str, PseudoInstruction],
|
|
|
|
) -> None:
|
|
|
|
pseudos[pseudo.name] = PseudoInstruction(
|
|
|
|
pseudo.name,
|
2024-05-29 06:47:56 -03:00
|
|
|
analyze_stack(pseudo),
|
2023-12-07 08:49:40 -04:00
|
|
|
[instructions[target] for target in pseudo.targets],
|
|
|
|
pseudo.flags,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-12-20 10:27:25 -04:00
|
|
|
def assign_opcodes(
|
|
|
|
instructions: dict[str, Instruction],
|
|
|
|
families: dict[str, Family],
|
|
|
|
pseudos: dict[str, PseudoInstruction],
|
|
|
|
) -> tuple[dict[str, int], int, int]:
|
|
|
|
"""Assigns opcodes, then returns the opmap,
|
|
|
|
have_arg and min_instrumented values"""
|
|
|
|
instmap: dict[str, int] = {}
|
|
|
|
|
|
|
|
# 0 is reserved for cache entries. This helps debugging.
|
|
|
|
instmap["CACHE"] = 0
|
|
|
|
|
|
|
|
# 17 is reserved as it is the initial value for the specializing counter.
|
|
|
|
# This helps catch cases where we attempt to execute a cache.
|
|
|
|
instmap["RESERVED"] = 17
|
|
|
|
|
|
|
|
# 149 is RESUME - it is hard coded as such in Tools/build/deepfreeze.py
|
|
|
|
instmap["RESUME"] = 149
|
|
|
|
|
|
|
|
# This is an historical oddity.
|
|
|
|
instmap["BINARY_OP_INPLACE_ADD_UNICODE"] = 3
|
|
|
|
|
|
|
|
instmap["INSTRUMENTED_LINE"] = 254
|
2024-08-13 10:22:57 -03:00
|
|
|
instmap["ENTER_EXECUTOR"] = 255
|
2023-12-20 10:27:25 -04:00
|
|
|
|
|
|
|
instrumented = [name for name in instructions if name.startswith("INSTRUMENTED")]
|
|
|
|
|
|
|
|
specialized: set[str] = set()
|
|
|
|
no_arg: list[str] = []
|
|
|
|
has_arg: list[str] = []
|
|
|
|
|
|
|
|
for family in families.values():
|
|
|
|
specialized.update(inst.name for inst in family.members)
|
|
|
|
|
|
|
|
for inst in instructions.values():
|
|
|
|
name = inst.name
|
|
|
|
if name in specialized:
|
|
|
|
continue
|
|
|
|
if name in instrumented:
|
|
|
|
continue
|
|
|
|
if inst.properties.oparg:
|
|
|
|
has_arg.append(name)
|
|
|
|
else:
|
|
|
|
no_arg.append(name)
|
|
|
|
|
|
|
|
# Specialized ops appear in their own section
|
|
|
|
# Instrumented opcodes are at the end of the valid range
|
|
|
|
min_internal = 150
|
|
|
|
min_instrumented = 254 - (len(instrumented) - 1)
|
|
|
|
assert min_internal + len(specialized) < min_instrumented
|
|
|
|
|
|
|
|
next_opcode = 1
|
|
|
|
|
|
|
|
def add_instruction(name: str) -> None:
|
|
|
|
nonlocal next_opcode
|
|
|
|
if name in instmap:
|
|
|
|
return # Pre-defined name
|
|
|
|
while next_opcode in instmap.values():
|
|
|
|
next_opcode += 1
|
|
|
|
instmap[name] = next_opcode
|
|
|
|
next_opcode += 1
|
|
|
|
|
|
|
|
for name in sorted(no_arg):
|
|
|
|
add_instruction(name)
|
|
|
|
for name in sorted(has_arg):
|
|
|
|
add_instruction(name)
|
|
|
|
# For compatibility
|
|
|
|
next_opcode = min_internal
|
|
|
|
for name in sorted(specialized):
|
|
|
|
add_instruction(name)
|
|
|
|
next_opcode = min_instrumented
|
|
|
|
for name in instrumented:
|
|
|
|
add_instruction(name)
|
|
|
|
|
|
|
|
for name in instructions:
|
|
|
|
instructions[name].opcode = instmap[name]
|
|
|
|
|
|
|
|
for op, name in enumerate(sorted(pseudos), 256):
|
|
|
|
instmap[name] = op
|
|
|
|
pseudos[name].opcode = op
|
|
|
|
|
|
|
|
return instmap, len(no_arg), min_instrumented
|
|
|
|
|
|
|
|
|
2023-12-07 08:49:40 -04:00
|
|
|
def analyze_forest(forest: list[parser.AstNode]) -> Analysis:
|
|
|
|
instructions: dict[str, Instruction] = {}
|
|
|
|
uops: dict[str, Uop] = {}
|
|
|
|
families: dict[str, Family] = {}
|
|
|
|
pseudos: dict[str, PseudoInstruction] = {}
|
|
|
|
for node in forest:
|
|
|
|
match node:
|
|
|
|
case parser.InstDef(name):
|
|
|
|
if node.kind == "inst":
|
|
|
|
desugar_inst(node, instructions, uops)
|
|
|
|
else:
|
|
|
|
assert node.kind == "op"
|
|
|
|
add_op(node, uops)
|
|
|
|
case parser.Macro():
|
|
|
|
pass
|
|
|
|
case parser.Family():
|
|
|
|
pass
|
|
|
|
case parser.Pseudo():
|
|
|
|
pass
|
|
|
|
case _:
|
|
|
|
assert False
|
|
|
|
for node in forest:
|
|
|
|
if isinstance(node, parser.Macro):
|
|
|
|
add_macro(node, instructions, uops)
|
|
|
|
for node in forest:
|
|
|
|
match node:
|
|
|
|
case parser.Family():
|
|
|
|
add_family(node, instructions, families)
|
|
|
|
case parser.Pseudo():
|
|
|
|
add_pseudo(node, instructions, pseudos)
|
|
|
|
case _:
|
|
|
|
pass
|
|
|
|
for uop in uops.values():
|
|
|
|
tkn_iter = iter(uop.body)
|
|
|
|
for tkn in tkn_iter:
|
|
|
|
if tkn.kind == "IDENTIFIER" and tkn.text == "GO_TO_INSTRUCTION":
|
|
|
|
if next(tkn_iter).kind != "LPAREN":
|
|
|
|
continue
|
|
|
|
target = next(tkn_iter)
|
|
|
|
if target.kind != "IDENTIFIER":
|
|
|
|
continue
|
|
|
|
if target.text in instructions:
|
|
|
|
instructions[target.text].is_target = True
|
2023-12-20 10:27:25 -04:00
|
|
|
# Special case BINARY_OP_INPLACE_ADD_UNICODE
|
|
|
|
# BINARY_OP_INPLACE_ADD_UNICODE is not a normal family member,
|
|
|
|
# as it is the wrong size, but we need it to maintain an
|
|
|
|
# historical optimization.
|
2023-12-18 07:14:40 -04:00
|
|
|
if "BINARY_OP_INPLACE_ADD_UNICODE" in instructions:
|
2023-12-20 10:27:25 -04:00
|
|
|
inst = instructions["BINARY_OP_INPLACE_ADD_UNICODE"]
|
|
|
|
inst.family = families["BINARY_OP"]
|
|
|
|
families["BINARY_OP"].members.append(inst)
|
2024-01-12 13:30:27 -04:00
|
|
|
opmap, first_arg, min_instrumented = assign_opcodes(instructions, families, pseudos)
|
2023-12-20 10:27:25 -04:00
|
|
|
return Analysis(
|
|
|
|
instructions, uops, families, pseudos, opmap, first_arg, min_instrumented
|
|
|
|
)
|
2023-12-07 08:49:40 -04:00
|
|
|
|
|
|
|
|
|
|
|
def analyze_files(filenames: list[str]) -> Analysis:
|
|
|
|
return analyze_forest(parser.parse_files(filenames))
|
|
|
|
|
|
|
|
|
|
|
|
def dump_analysis(analysis: Analysis) -> None:
|
|
|
|
print("Uops:")
|
|
|
|
for u in analysis.uops.values():
|
|
|
|
u.dump(" ")
|
|
|
|
print("Instructions:")
|
|
|
|
for i in analysis.instructions.values():
|
|
|
|
i.dump(" ")
|
|
|
|
print("Families:")
|
|
|
|
for f in analysis.families.values():
|
|
|
|
f.dump(" ")
|
|
|
|
print("Pseudos:")
|
|
|
|
for p in analysis.pseudos.values():
|
|
|
|
p.dump(" ")
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
import sys
|
|
|
|
|
|
|
|
if len(sys.argv) < 2:
|
|
|
|
print("No input")
|
|
|
|
else:
|
|
|
|
filenames = sys.argv[1:]
|
|
|
|
dump_analysis(analyze_files(filenames))
|