2013-10-19 04:09:25 -03:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
#
|
|
|
|
# Argument Clinic
|
|
|
|
# Copyright 2012-2013 by Larry Hastings.
|
|
|
|
# Licensed to the PSF under a contributor agreement.
|
|
|
|
#
|
2023-07-03 11:03:31 -03:00
|
|
|
from __future__ import annotations
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
import abc
|
2023-08-01 13:24:23 -03:00
|
|
|
import argparse
|
2013-10-19 04:09:25 -03:00
|
|
|
import ast
|
2023-05-15 05:49:28 -03:00
|
|
|
import builtins as bltns
|
2013-10-19 04:09:25 -03:00
|
|
|
import collections
|
|
|
|
import contextlib
|
2014-02-01 02:03:12 -04:00
|
|
|
import copy
|
|
|
|
import cpp
|
2023-07-06 18:17:12 -03:00
|
|
|
import dataclasses as dc
|
2023-05-18 18:58:42 -03:00
|
|
|
import enum
|
2013-10-19 04:09:25 -03:00
|
|
|
import functools
|
|
|
|
import hashlib
|
|
|
|
import inspect
|
|
|
|
import io
|
|
|
|
import itertools
|
|
|
|
import os
|
2014-01-17 21:47:17 -04:00
|
|
|
import pprint
|
2013-10-19 04:09:25 -03:00
|
|
|
import re
|
|
|
|
import shlex
|
2014-01-28 09:00:08 -04:00
|
|
|
import string
|
2013-10-19 04:09:25 -03:00
|
|
|
import sys
|
|
|
|
import textwrap
|
|
|
|
|
2023-07-04 19:15:10 -03:00
|
|
|
from collections.abc import (
|
|
|
|
Callable,
|
|
|
|
Iterable,
|
|
|
|
Iterator,
|
|
|
|
Sequence,
|
|
|
|
)
|
2023-05-16 09:02:18 -03:00
|
|
|
from types import FunctionType, NoneType
|
2023-07-02 20:42:38 -03:00
|
|
|
from typing import (
|
2023-08-01 17:10:54 -03:00
|
|
|
TYPE_CHECKING,
|
2023-07-02 20:42:38 -03:00
|
|
|
Any,
|
|
|
|
Final,
|
|
|
|
Literal,
|
|
|
|
NamedTuple,
|
|
|
|
NoReturn,
|
2023-07-16 21:04:10 -03:00
|
|
|
Protocol,
|
2023-07-25 05:18:19 -03:00
|
|
|
TypeVar,
|
2023-07-25 18:08:52 -03:00
|
|
|
cast,
|
2023-07-02 20:42:38 -03:00
|
|
|
overload,
|
|
|
|
)
|
2015-05-04 10:59:46 -03:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# TODO:
|
|
|
|
#
|
|
|
|
# soon:
|
|
|
|
#
|
|
|
|
# * allow mixing any two of {positional-only, positional-or-keyword,
|
|
|
|
# keyword-only}
|
|
|
|
# * dict constructor uses positional-only and keyword-only
|
|
|
|
# * max and min use positional only with an optional group
|
|
|
|
# and keyword-only
|
|
|
|
#
|
|
|
|
|
2013-11-23 18:54:00 -04:00
|
|
|
version = '1'
|
|
|
|
|
2021-07-16 12:43:02 -03:00
|
|
|
NO_VARARG = "PY_SSIZE_T_MAX"
|
|
|
|
CLINIC_PREFIX = "__clinic_"
|
2023-05-05 08:40:18 -03:00
|
|
|
CLINIC_PREFIXED_ARGS = {
|
|
|
|
"_keywords",
|
|
|
|
"_parser",
|
|
|
|
"args",
|
|
|
|
"argsbuf",
|
|
|
|
"fastargs",
|
|
|
|
"kwargs",
|
|
|
|
"kwnames",
|
|
|
|
"nargs",
|
|
|
|
"noptargs",
|
|
|
|
"return_value",
|
|
|
|
}
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-18 18:58:42 -03:00
|
|
|
|
|
|
|
class Sentinels(enum.Enum):
|
|
|
|
unspecified = "unspecified"
|
|
|
|
unknown = "unknown"
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def __repr__(self) -> str:
|
2023-05-18 18:58:42 -03:00
|
|
|
return f"<{self.value.capitalize()}>"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-05-18 18:58:42 -03:00
|
|
|
unspecified: Final = Sentinels.unspecified
|
|
|
|
unknown: Final = Sentinels.unknown
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-18 18:58:42 -03:00
|
|
|
|
|
|
|
# This one needs to be a distinct class, unlike the other two
|
2013-10-19 04:09:25 -03:00
|
|
|
class Null:
|
2023-05-16 14:18:28 -03:00
|
|
|
def __repr__(self) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
return '<Null>'
|
|
|
|
|
2014-01-16 15:32:01 -04:00
|
|
|
|
2023-05-18 18:58:42 -03:00
|
|
|
NULL = Null()
|
2014-01-16 15:32:01 -04:00
|
|
|
|
2015-04-13 20:22:35 -03:00
|
|
|
sig_end_marker = '--'
|
|
|
|
|
2023-05-15 05:49:28 -03:00
|
|
|
Appender = Callable[[str], None]
|
2023-05-16 14:18:28 -03:00
|
|
|
Outputter = Callable[[], str]
|
2023-05-16 19:32:11 -03:00
|
|
|
TemplateDict = dict[str, str]
|
2014-01-16 15:32:01 -04:00
|
|
|
|
2023-05-15 05:49:28 -03:00
|
|
|
class _TextAccumulator(NamedTuple):
|
|
|
|
text: list[str]
|
|
|
|
append: Appender
|
|
|
|
output: Outputter
|
2015-04-03 17:09:02 -03:00
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def _text_accumulator() -> _TextAccumulator:
|
|
|
|
text: list[str] = []
|
2023-07-17 08:47:08 -03:00
|
|
|
def output() -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
s = ''.join(text)
|
|
|
|
text.clear()
|
|
|
|
return s
|
2023-05-15 05:49:28 -03:00
|
|
|
return _TextAccumulator(text, text.append, output)
|
2015-04-03 17:09:02 -03:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-15 05:49:28 -03:00
|
|
|
class TextAccumulator(NamedTuple):
|
|
|
|
append: Appender
|
2023-05-16 14:18:28 -03:00
|
|
|
output: Outputter
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def text_accumulator() -> TextAccumulator:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Creates a simple text accumulator / joiner.
|
|
|
|
|
|
|
|
Returns a pair of callables:
|
|
|
|
append, output
|
|
|
|
"append" appends a string to the accumulator.
|
|
|
|
"output" returns the contents of the accumulator
|
|
|
|
joined together (''.join(accumulator)) and
|
|
|
|
empties the accumulator.
|
|
|
|
"""
|
|
|
|
text, append, output = _text_accumulator()
|
2023-05-15 05:49:28 -03:00
|
|
|
return TextAccumulator(append, output)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-08-02 21:00:06 -03:00
|
|
|
|
|
|
|
@dc.dataclass
|
|
|
|
class ClinicError(Exception):
|
|
|
|
message: str
|
|
|
|
_: dc.KW_ONLY
|
|
|
|
lineno: int | None = None
|
|
|
|
filename: str | None = None
|
|
|
|
|
|
|
|
def __post_init__(self) -> None:
|
|
|
|
super().__init__(self.message)
|
|
|
|
|
|
|
|
def report(self, *, warn_only: bool = False) -> str:
|
|
|
|
msg = "Warning" if warn_only else "Error"
|
|
|
|
if self.filename is not None:
|
|
|
|
msg += f" in file {self.filename!r}"
|
|
|
|
if self.lineno is not None:
|
|
|
|
msg += f" on line {self.lineno}"
|
|
|
|
msg += ":\n"
|
|
|
|
msg += f"{self.message}\n"
|
|
|
|
return msg
|
|
|
|
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
@overload
|
|
|
|
def warn_or_fail(
|
|
|
|
*args: object,
|
|
|
|
fail: Literal[True],
|
|
|
|
filename: str | None = None,
|
|
|
|
line_number: int | None = None,
|
|
|
|
) -> NoReturn: ...
|
|
|
|
|
|
|
|
@overload
|
|
|
|
def warn_or_fail(
|
|
|
|
*args: object,
|
|
|
|
fail: Literal[False] = False,
|
|
|
|
filename: str | None = None,
|
|
|
|
line_number: int | None = None,
|
|
|
|
) -> None: ...
|
|
|
|
|
|
|
|
def warn_or_fail(
|
|
|
|
*args: object,
|
|
|
|
fail: bool = False,
|
|
|
|
filename: str | None = None,
|
|
|
|
line_number: int | None = None,
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
joined = " ".join([str(a) for a in args])
|
|
|
|
if clinic:
|
|
|
|
if filename is None:
|
|
|
|
filename = clinic.filename
|
2014-01-28 09:00:08 -04:00
|
|
|
if getattr(clinic, 'block_parser', None) and (line_number is None):
|
2013-10-19 04:09:25 -03:00
|
|
|
line_number = clinic.block_parser.line_number
|
2023-08-02 21:00:06 -03:00
|
|
|
error = ClinicError(joined, filename=filename, lineno=line_number)
|
2014-01-17 21:47:17 -04:00
|
|
|
if fail:
|
2023-08-02 21:00:06 -03:00
|
|
|
raise error
|
|
|
|
else:
|
|
|
|
print(error.report(warn_only=True))
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def warn(
|
|
|
|
*args: object,
|
|
|
|
filename: str | None = None,
|
|
|
|
line_number: int | None = None,
|
|
|
|
) -> None:
|
|
|
|
return warn_or_fail(*args, filename=filename, line_number=line_number, fail=False)
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def fail(
|
|
|
|
*args: object,
|
|
|
|
filename: str | None = None,
|
|
|
|
line_number: int | None = None,
|
|
|
|
) -> NoReturn:
|
|
|
|
warn_or_fail(*args, filename=filename, line_number=line_number, fail=True)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def quoted_for_c_string(s: str) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
for old, new in (
|
2014-01-25 05:26:20 -04:00
|
|
|
('\\', '\\\\'), # must be first!
|
2013-10-19 04:09:25 -03:00
|
|
|
('"', '\\"'),
|
|
|
|
("'", "\\'"),
|
|
|
|
):
|
|
|
|
s = s.replace(old, new)
|
|
|
|
return s
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def c_repr(s: str) -> str:
|
2014-01-18 04:26:16 -04:00
|
|
|
return '"' + s + '"'
|
|
|
|
|
|
|
|
|
2013-10-27 06:49:39 -03:00
|
|
|
is_legal_c_identifier = re.compile('^[A-Za-z_][A-Za-z0-9_]*$').match
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def is_legal_py_identifier(s: str) -> bool:
|
2013-10-27 06:49:39 -03:00
|
|
|
return all(is_legal_c_identifier(field) for field in s.split('.'))
|
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
# identifiers that are okay in Python but aren't a good idea in C.
|
|
|
|
# so if they're used Argument Clinic will add "_value" to the end
|
|
|
|
# of the name in C.
|
2013-10-19 04:09:25 -03:00
|
|
|
c_keywords = set("""
|
2014-01-24 10:17:25 -04:00
|
|
|
asm auto break case char const continue default do double
|
|
|
|
else enum extern float for goto if inline int long
|
|
|
|
register return short signed sizeof static struct switch
|
2014-01-17 21:47:17 -04:00
|
|
|
typedef typeof union unsigned void volatile while
|
2013-10-19 04:09:25 -03:00
|
|
|
""".strip().split())
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def ensure_legal_c_identifier(s: str) -> str:
|
2013-10-27 06:49:39 -03:00
|
|
|
# for now, just complain if what we're given isn't legal
|
|
|
|
if not is_legal_c_identifier(s):
|
2023-05-20 17:16:49 -03:00
|
|
|
fail("Illegal C identifier:", s)
|
2013-10-27 06:49:39 -03:00
|
|
|
# but if we picked a C keyword, pick something else
|
2013-10-19 04:09:25 -03:00
|
|
|
if s in c_keywords:
|
|
|
|
return s + "_value"
|
|
|
|
return s
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def rstrip_lines(s: str) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
text, add, output = _text_accumulator()
|
|
|
|
for line in s.split('\n'):
|
|
|
|
add(line.rstrip())
|
|
|
|
add('\n')
|
|
|
|
text.pop()
|
|
|
|
return output()
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def format_escape(s: str) -> str:
|
2016-06-09 10:02:15 -03:00
|
|
|
# double up curly-braces, this string will be used
|
|
|
|
# as part of a format_map() template later
|
|
|
|
s = s.replace('{', '{{')
|
|
|
|
s = s.replace('}', '}}')
|
|
|
|
return s
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def linear_format(s: str, **kwargs: str) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Perform str.format-like substitution, except:
|
|
|
|
* The strings substituted must be on lines by
|
|
|
|
themselves. (This line is the "source line".)
|
|
|
|
* If the substitution text is empty, the source line
|
|
|
|
is removed in the output.
|
2014-01-19 03:50:21 -04:00
|
|
|
* If the field is not recognized, the original line
|
|
|
|
is passed unmodified through to the output.
|
2013-10-19 04:09:25 -03:00
|
|
|
* If the substitution text is not empty:
|
|
|
|
* Each line of the substituted text is indented
|
|
|
|
by the indent of the source line.
|
|
|
|
* A newline will be added to the end.
|
|
|
|
"""
|
|
|
|
|
|
|
|
add, output = text_accumulator()
|
|
|
|
for line in s.split('\n'):
|
|
|
|
indent, curly, trailing = line.partition('{')
|
|
|
|
if not curly:
|
|
|
|
add(line)
|
|
|
|
add('\n')
|
|
|
|
continue
|
|
|
|
|
2016-02-13 23:23:13 -04:00
|
|
|
name, curly, trailing = trailing.partition('}')
|
2013-10-19 04:09:25 -03:00
|
|
|
if not curly or name not in kwargs:
|
|
|
|
add(line)
|
|
|
|
add('\n')
|
|
|
|
continue
|
|
|
|
|
|
|
|
if trailing:
|
|
|
|
fail("Text found after {" + name + "} block marker! It must be on a line by itself.")
|
|
|
|
if indent.strip():
|
|
|
|
fail("Non-whitespace characters found before {" + name + "} block marker! It must be on a line by itself.")
|
|
|
|
|
|
|
|
value = kwargs[name]
|
|
|
|
if not value:
|
|
|
|
continue
|
|
|
|
|
|
|
|
value = textwrap.indent(rstrip_lines(value), indent)
|
|
|
|
add(value)
|
|
|
|
add('\n')
|
|
|
|
|
|
|
|
return output()[:-1]
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def indent_all_lines(s: str, prefix: str) -> str:
|
2014-01-17 21:47:17 -04:00
|
|
|
"""
|
|
|
|
Returns 's', with 'prefix' prepended to all lines.
|
|
|
|
|
|
|
|
If the last line is empty, prefix is not prepended
|
|
|
|
to it. (If s is blank, returns s unchanged.)
|
|
|
|
|
|
|
|
(textwrap.indent only adds to non-blank lines.)
|
|
|
|
"""
|
|
|
|
split = s.split('\n')
|
|
|
|
last = split.pop()
|
|
|
|
final = []
|
|
|
|
for line in split:
|
|
|
|
final.append(prefix)
|
|
|
|
final.append(line)
|
|
|
|
final.append('\n')
|
|
|
|
if last:
|
|
|
|
final.append(prefix)
|
|
|
|
final.append(last)
|
|
|
|
return ''.join(final)
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def suffix_all_lines(s: str, suffix: str) -> str:
|
2014-01-17 21:47:17 -04:00
|
|
|
"""
|
|
|
|
Returns 's', with 'suffix' appended to all lines.
|
|
|
|
|
|
|
|
If the last line is empty, suffix is not appended
|
|
|
|
to it. (If s is blank, returns s unchanged.)
|
|
|
|
"""
|
|
|
|
split = s.split('\n')
|
|
|
|
last = split.pop()
|
|
|
|
final = []
|
|
|
|
for line in split:
|
|
|
|
final.append(line)
|
|
|
|
final.append(suffix)
|
|
|
|
final.append('\n')
|
|
|
|
if last:
|
|
|
|
final.append(last)
|
|
|
|
final.append(suffix)
|
|
|
|
return ''.join(final)
|
|
|
|
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def version_splitter(s: str) -> tuple[int, ...]:
|
2013-11-23 18:54:00 -04:00
|
|
|
"""Splits a version string into a tuple of integers.
|
|
|
|
|
|
|
|
The following ASCII characters are allowed, and employ
|
|
|
|
the following conversions:
|
|
|
|
a -> -3
|
|
|
|
b -> -2
|
|
|
|
c -> -1
|
|
|
|
(This permits Python-style version strings such as "1.4b3".)
|
|
|
|
"""
|
2023-05-20 18:55:02 -03:00
|
|
|
version: list[int] = []
|
2023-05-16 14:18:28 -03:00
|
|
|
accumulator: list[str] = []
|
2023-05-20 18:55:02 -03:00
|
|
|
def flush() -> None:
|
2013-11-23 18:54:00 -04:00
|
|
|
if not accumulator:
|
2023-08-02 21:00:06 -03:00
|
|
|
fail(f'Unsupported version string: {s!r}')
|
2013-11-23 18:54:00 -04:00
|
|
|
version.append(int(''.join(accumulator)))
|
|
|
|
accumulator.clear()
|
|
|
|
|
|
|
|
for c in s:
|
|
|
|
if c.isdigit():
|
|
|
|
accumulator.append(c)
|
|
|
|
elif c == '.':
|
|
|
|
flush()
|
|
|
|
elif c in 'abc':
|
|
|
|
flush()
|
|
|
|
version.append('abc'.index(c) - 3)
|
|
|
|
else:
|
2023-08-02 21:00:06 -03:00
|
|
|
fail(f'Illegal character {c!r} in version string {s!r}')
|
2013-11-23 18:54:00 -04:00
|
|
|
flush()
|
|
|
|
return tuple(version)
|
|
|
|
|
2023-05-16 14:18:28 -03:00
|
|
|
def version_comparitor(version1: str, version2: str) -> Literal[-1, 0, 1]:
|
2023-08-03 21:17:17 -03:00
|
|
|
iterator = itertools.zip_longest(
|
|
|
|
version_splitter(version1), version_splitter(version2), fillvalue=0
|
|
|
|
)
|
|
|
|
for a, b in iterator:
|
2013-11-23 18:54:00 -04:00
|
|
|
if a < b:
|
|
|
|
return -1
|
|
|
|
if a > b:
|
|
|
|
return 1
|
|
|
|
return 0
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
class CRenderData:
|
2023-07-24 18:38:50 -03:00
|
|
|
def __init__(self) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# The C statements to declare variables.
|
|
|
|
# Should be full lines with \n eol characters.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.declarations: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# The C statements required to initialize the variables before the parse call.
|
|
|
|
# Should be full lines with \n eol characters.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.initializers: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-26 00:43:29 -04:00
|
|
|
# The C statements needed to dynamically modify the values
|
|
|
|
# parsed by the parse call, before calling the impl.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.modifications: list[str] = []
|
2014-01-26 00:43:29 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# The entries for the "keywords" array for PyArg_ParseTuple.
|
|
|
|
# Should be individual strings representing the names.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.keywords: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# The "format units" for PyArg_ParseTuple.
|
|
|
|
# Should be individual strings that will get
|
2023-07-24 18:38:50 -03:00
|
|
|
self.format_units: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# The varargs arguments for PyArg_ParseTuple.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.parse_arguments: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# The parameter declarations for the impl function.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.impl_parameters: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# The arguments to the impl function at the time it's called.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.impl_arguments: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# For return converters: the name of the variable that
|
|
|
|
# should receive the value returned by the impl.
|
|
|
|
self.return_value = "return_value"
|
|
|
|
|
|
|
|
# For return converters: the code to convert the return
|
|
|
|
# value from the parse function. This is also where
|
|
|
|
# you should check the _return_value for errors, and
|
|
|
|
# "goto exit" if there are any.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.return_conversion: list[str] = []
|
2023-05-07 18:55:37 -03:00
|
|
|
self.converter_retval = "_return_value"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2022-11-24 10:01:26 -04:00
|
|
|
# The C statements required to do some operations
|
|
|
|
# after the end of parsing but before cleaning up.
|
|
|
|
# These operations may be, for example, memory deallocations which
|
|
|
|
# can only be done without any error happening during argument parsing.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.post_parsing: list[str] = []
|
2022-11-24 10:01:26 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# The C statements required to clean up after the impl call.
|
2023-07-24 18:38:50 -03:00
|
|
|
self.cleanup: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2014-01-28 09:00:08 -04:00
|
|
|
class FormatCounterFormatter(string.Formatter):
|
|
|
|
"""
|
|
|
|
This counts how many instances of each formatter
|
|
|
|
"replacement string" appear in the format string.
|
|
|
|
|
|
|
|
e.g. after evaluating "string {a}, {b}, {c}, {a}"
|
|
|
|
the counts dict would now look like
|
|
|
|
{'a': 2, 'b': 1, 'c': 1}
|
|
|
|
"""
|
2023-07-17 08:47:08 -03:00
|
|
|
def __init__(self) -> None:
|
|
|
|
self.counts = collections.Counter[str]()
|
2014-01-28 09:00:08 -04:00
|
|
|
|
2023-07-25 05:49:07 -03:00
|
|
|
def get_value(
|
|
|
|
self, key: str, args: object, kwargs: object # type: ignore[override]
|
|
|
|
) -> Literal['']:
|
2014-01-28 09:00:08 -04:00
|
|
|
self.counts[key] += 1
|
|
|
|
return ''
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class Language(metaclass=abc.ABCMeta):
|
|
|
|
|
|
|
|
start_line = ""
|
|
|
|
body_prefix = ""
|
|
|
|
stop_line = ""
|
|
|
|
checksum_line = ""
|
|
|
|
|
2023-07-17 08:47:08 -03:00
|
|
|
def __init__(self, filename: str) -> None:
|
2014-02-01 02:03:12 -04:00
|
|
|
pass
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
@abc.abstractmethod
|
2023-07-17 08:47:08 -03:00
|
|
|
def render(
|
|
|
|
self,
|
|
|
|
clinic: Clinic | None,
|
|
|
|
signatures: Iterable[Module | Class | Function]
|
|
|
|
) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
pass
|
|
|
|
|
2023-07-17 08:47:08 -03:00
|
|
|
def parse_line(self, line: str) -> None:
|
2014-02-01 02:03:12 -04:00
|
|
|
pass
|
|
|
|
|
2023-07-17 08:47:08 -03:00
|
|
|
def validate(self) -> None:
|
|
|
|
def assert_only_one(
|
|
|
|
attr: str,
|
|
|
|
*additional_fields: str
|
|
|
|
) -> None:
|
2014-01-28 09:00:08 -04:00
|
|
|
"""
|
|
|
|
Ensures that the string found at getattr(self, attr)
|
|
|
|
contains exactly one formatter replacement string for
|
|
|
|
each valid field. The list of valid fields is
|
|
|
|
['dsl_name'] extended by additional_fields.
|
|
|
|
|
|
|
|
e.g.
|
|
|
|
self.fmt = "{dsl_name} {a} {b}"
|
|
|
|
|
|
|
|
# this passes
|
|
|
|
self.assert_only_one('fmt', 'a', 'b')
|
|
|
|
|
|
|
|
# this fails, the format string has a {b} in it
|
|
|
|
self.assert_only_one('fmt', 'a')
|
|
|
|
|
|
|
|
# this fails, the format string doesn't have a {c} in it
|
|
|
|
self.assert_only_one('fmt', 'a', 'b', 'c')
|
|
|
|
|
|
|
|
# this fails, the format string has two {a}s in it,
|
|
|
|
# it must contain exactly one
|
|
|
|
self.fmt2 = '{dsl_name} {a} {a}'
|
|
|
|
self.assert_only_one('fmt2', 'a')
|
|
|
|
|
|
|
|
"""
|
|
|
|
fields = ['dsl_name']
|
|
|
|
fields.extend(additional_fields)
|
2023-07-17 08:47:08 -03:00
|
|
|
line: str = getattr(self, attr)
|
2014-01-28 09:00:08 -04:00
|
|
|
fcf = FormatCounterFormatter()
|
|
|
|
fcf.format(line)
|
2023-07-17 08:47:08 -03:00
|
|
|
def local_fail(should_be_there_but_isnt: bool) -> None:
|
2014-01-28 09:00:08 -04:00
|
|
|
if should_be_there_but_isnt:
|
|
|
|
fail("{} {} must contain {{{}}} exactly once!".format(
|
|
|
|
self.__class__.__name__, attr, name))
|
|
|
|
else:
|
|
|
|
fail("{} {} must not contain {{{}}}!".format(
|
|
|
|
self.__class__.__name__, attr, name))
|
|
|
|
|
|
|
|
for name, count in fcf.counts.items():
|
|
|
|
if name in fields:
|
|
|
|
if count > 1:
|
|
|
|
local_fail(True)
|
|
|
|
else:
|
|
|
|
local_fail(False)
|
|
|
|
for name in fields:
|
|
|
|
if fcf.counts.get(name) != 1:
|
|
|
|
local_fail(True)
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
assert_only_one('start_line')
|
|
|
|
assert_only_one('stop_line')
|
|
|
|
|
2014-01-28 09:00:08 -04:00
|
|
|
field = "arguments" if "{arguments}" in self.checksum_line else "checksum"
|
|
|
|
assert_only_one('checksum_line', field)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class PythonLanguage(Language):
|
|
|
|
|
|
|
|
language = 'Python'
|
2014-01-07 16:41:53 -04:00
|
|
|
start_line = "#/*[{dsl_name} input]"
|
2013-10-19 04:09:25 -03:00
|
|
|
body_prefix = "#"
|
2014-01-07 16:41:53 -04:00
|
|
|
stop_line = "#[{dsl_name} start generated code]*/"
|
2014-01-28 09:00:08 -04:00
|
|
|
checksum_line = "#/*[{dsl_name} end generated code: {arguments}]*/"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-07-04 19:15:10 -03:00
|
|
|
ParamTuple = tuple["Parameter", ...]
|
|
|
|
|
|
|
|
|
|
|
|
def permute_left_option_groups(
|
2023-07-20 18:33:33 -03:00
|
|
|
l: Sequence[Iterable[Parameter]]
|
2023-07-04 19:15:10 -03:00
|
|
|
) -> Iterator[ParamTuple]:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
2023-07-04 08:36:40 -03:00
|
|
|
Given [(1,), (2,), (3,)], should yield:
|
2013-10-19 04:09:25 -03:00
|
|
|
()
|
|
|
|
(3,)
|
|
|
|
(2, 3)
|
|
|
|
(1, 2, 3)
|
|
|
|
"""
|
|
|
|
yield tuple()
|
2023-07-04 19:15:10 -03:00
|
|
|
accumulator: list[Parameter] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
for group in reversed(l):
|
|
|
|
accumulator = list(group) + accumulator
|
|
|
|
yield tuple(accumulator)
|
|
|
|
|
|
|
|
|
2023-07-04 19:15:10 -03:00
|
|
|
def permute_right_option_groups(
|
2023-07-20 18:33:33 -03:00
|
|
|
l: Sequence[Iterable[Parameter]]
|
2023-07-04 19:15:10 -03:00
|
|
|
) -> Iterator[ParamTuple]:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
2023-07-04 08:36:40 -03:00
|
|
|
Given [(1,), (2,), (3,)], should yield:
|
2013-10-19 04:09:25 -03:00
|
|
|
()
|
|
|
|
(1,)
|
|
|
|
(1, 2)
|
|
|
|
(1, 2, 3)
|
|
|
|
"""
|
|
|
|
yield tuple()
|
2023-07-04 19:15:10 -03:00
|
|
|
accumulator: list[Parameter] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
for group in l:
|
|
|
|
accumulator.extend(group)
|
|
|
|
yield tuple(accumulator)
|
|
|
|
|
|
|
|
|
2023-07-04 19:15:10 -03:00
|
|
|
def permute_optional_groups(
|
2023-07-20 18:33:33 -03:00
|
|
|
left: Sequence[Iterable[Parameter]],
|
|
|
|
required: Iterable[Parameter],
|
|
|
|
right: Sequence[Iterable[Parameter]]
|
2023-07-04 19:15:10 -03:00
|
|
|
) -> tuple[ParamTuple, ...]:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Generator function that computes the set of acceptable
|
|
|
|
argument lists for the provided iterables of
|
|
|
|
argument groups. (Actually it generates a tuple of tuples.)
|
|
|
|
|
|
|
|
Algorithm: prefer left options over right options.
|
|
|
|
|
|
|
|
If required is empty, left must also be empty.
|
|
|
|
"""
|
|
|
|
required = tuple(required)
|
|
|
|
if not required:
|
2022-10-07 18:41:35 -03:00
|
|
|
if left:
|
|
|
|
raise ValueError("required is empty but left is not")
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-04 19:15:10 -03:00
|
|
|
accumulator: list[ParamTuple] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
counts = set()
|
|
|
|
for r in permute_right_option_groups(right):
|
|
|
|
for l in permute_left_option_groups(left):
|
|
|
|
t = l + required + r
|
|
|
|
if len(t) in counts:
|
|
|
|
continue
|
|
|
|
counts.add(len(t))
|
|
|
|
accumulator.append(t)
|
|
|
|
|
|
|
|
accumulator.sort(key=len)
|
|
|
|
return tuple(accumulator)
|
|
|
|
|
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
def strip_leading_and_trailing_blank_lines(s: str) -> str:
|
2014-02-01 02:03:12 -04:00
|
|
|
lines = s.rstrip().split('\n')
|
|
|
|
while lines:
|
|
|
|
line = lines[0]
|
|
|
|
if line.strip():
|
|
|
|
break
|
|
|
|
del lines[0]
|
|
|
|
return '\n'.join(lines)
|
|
|
|
|
|
|
|
@functools.lru_cache()
|
2023-07-04 19:13:30 -03:00
|
|
|
def normalize_snippet(
|
|
|
|
s: str,
|
|
|
|
*,
|
|
|
|
indent: int = 0
|
|
|
|
) -> str:
|
2014-02-01 02:03:12 -04:00
|
|
|
"""
|
|
|
|
Reformats s:
|
|
|
|
* removes leading and trailing blank lines
|
|
|
|
* ensures that it does not end with a newline
|
|
|
|
* dedents so the first nonwhite character on any line is at column "indent"
|
|
|
|
"""
|
|
|
|
s = strip_leading_and_trailing_blank_lines(s)
|
|
|
|
s = textwrap.dedent(s)
|
|
|
|
if indent:
|
|
|
|
s = textwrap.indent(s, ' ' * indent)
|
|
|
|
return s
|
|
|
|
|
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
def declare_parser(
|
|
|
|
f: Function,
|
|
|
|
*,
|
|
|
|
hasformat: bool = False
|
|
|
|
) -> str:
|
2022-08-11 18:25:49 -03:00
|
|
|
"""
|
|
|
|
Generates the code template for a static local PyArg_Parser variable,
|
|
|
|
with an initializer. For core code (incl. builtin modules) the
|
|
|
|
kwtuple field is also statically initialized. Otherwise
|
|
|
|
it is initialized at runtime.
|
|
|
|
"""
|
|
|
|
if hasformat:
|
|
|
|
fname = ''
|
|
|
|
format_ = '.format = "{format_units}:{name}",'
|
|
|
|
else:
|
|
|
|
fname = '.fname = "{name}",'
|
|
|
|
format_ = ''
|
2022-08-13 07:09:40 -03:00
|
|
|
|
|
|
|
num_keywords = len([
|
|
|
|
p for p in f.parameters.values()
|
|
|
|
if not p.is_positional_only() and not p.is_vararg()
|
|
|
|
])
|
|
|
|
if num_keywords == 0:
|
|
|
|
declarations = """
|
|
|
|
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
|
|
|
|
# define KWTUPLE (PyObject *)&_Py_SINGLETON(tuple_empty)
|
|
|
|
#else
|
|
|
|
# define KWTUPLE NULL
|
|
|
|
#endif
|
|
|
|
"""
|
|
|
|
else:
|
|
|
|
declarations = """
|
|
|
|
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
|
|
|
|
|
|
|
|
#define NUM_KEYWORDS %d
|
|
|
|
static struct {{
|
|
|
|
PyGC_Head _this_is_not_used;
|
|
|
|
PyObject_VAR_HEAD
|
|
|
|
PyObject *ob_item[NUM_KEYWORDS];
|
|
|
|
}} _kwtuple = {{
|
|
|
|
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
|
|
|
|
.ob_item = {{ {keywords_py} }},
|
|
|
|
}};
|
|
|
|
#undef NUM_KEYWORDS
|
|
|
|
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
|
|
|
|
|
|
|
|
#else // !Py_BUILD_CORE
|
|
|
|
# define KWTUPLE NULL
|
|
|
|
#endif // !Py_BUILD_CORE
|
|
|
|
""" % num_keywords
|
|
|
|
|
|
|
|
declarations += """
|
|
|
|
static const char * const _keywords[] = {{{keywords_c} NULL}};
|
|
|
|
static _PyArg_Parser _parser = {{
|
|
|
|
.keywords = _keywords,
|
|
|
|
%s
|
|
|
|
.kwtuple = KWTUPLE,
|
|
|
|
}};
|
|
|
|
#undef KWTUPLE
|
|
|
|
""" % (format_ or fname)
|
2022-08-11 18:25:49 -03:00
|
|
|
return normalize_snippet(declarations)
|
|
|
|
|
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
def wrap_declarations(
|
|
|
|
text: str,
|
|
|
|
length: int = 78
|
|
|
|
) -> str:
|
2015-04-14 19:07:59 -03:00
|
|
|
"""
|
|
|
|
A simple-minded text wrapper for C function declarations.
|
|
|
|
|
|
|
|
It views a declaration line as looking like this:
|
|
|
|
xxxxxxxx(xxxxxxxxx,xxxxxxxxx)
|
|
|
|
If called with length=30, it would wrap that line into
|
|
|
|
xxxxxxxx(xxxxxxxxx,
|
|
|
|
xxxxxxxxx)
|
|
|
|
(If the declaration has zero or one parameters, this
|
|
|
|
function won't wrap it.)
|
|
|
|
|
|
|
|
If this doesn't work properly, it's probably better to
|
|
|
|
start from scratch with a more sophisticated algorithm,
|
|
|
|
rather than try and improve/debug this dumb little function.
|
|
|
|
"""
|
|
|
|
lines = []
|
|
|
|
for line in text.split('\n'):
|
|
|
|
prefix, _, after_l_paren = line.partition('(')
|
|
|
|
if not after_l_paren:
|
|
|
|
lines.append(line)
|
|
|
|
continue
|
2023-07-04 19:13:30 -03:00
|
|
|
in_paren, _, after_r_paren = after_l_paren.partition(')')
|
2015-04-14 19:07:59 -03:00
|
|
|
if not _:
|
|
|
|
lines.append(line)
|
|
|
|
continue
|
2023-07-04 19:13:30 -03:00
|
|
|
if ',' not in in_paren:
|
2015-04-14 19:07:59 -03:00
|
|
|
lines.append(line)
|
|
|
|
continue
|
2023-07-04 19:13:30 -03:00
|
|
|
parameters = [x.strip() + ", " for x in in_paren.split(',')]
|
2015-04-14 19:07:59 -03:00
|
|
|
prefix += "("
|
|
|
|
if len(prefix) < length:
|
|
|
|
spaces = " " * len(prefix)
|
|
|
|
else:
|
|
|
|
spaces = " " * 4
|
|
|
|
|
|
|
|
while parameters:
|
|
|
|
line = prefix
|
|
|
|
first = True
|
|
|
|
while parameters:
|
|
|
|
if (not first and
|
|
|
|
(len(line) + len(parameters[0]) > length)):
|
|
|
|
break
|
|
|
|
line += parameters.pop(0)
|
|
|
|
first = False
|
|
|
|
if not parameters:
|
|
|
|
line = line.rstrip(", ") + ")" + after_r_paren
|
|
|
|
lines.append(line.rstrip())
|
|
|
|
prefix = spaces
|
|
|
|
return "\n".join(lines)
|
|
|
|
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class CLanguage(Language):
|
|
|
|
|
2014-01-07 16:41:53 -04:00
|
|
|
body_prefix = "#"
|
2013-10-19 04:09:25 -03:00
|
|
|
language = 'C'
|
2014-01-07 16:41:53 -04:00
|
|
|
start_line = "/*[{dsl_name} input]"
|
2013-10-19 04:09:25 -03:00
|
|
|
body_prefix = ""
|
2014-01-07 16:41:53 -04:00
|
|
|
stop_line = "[{dsl_name} start generated code]*/"
|
2014-01-28 09:00:08 -04:00
|
|
|
checksum_line = "/*[{dsl_name} end generated code: {arguments}]*/"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-08-02 16:37:36 -03:00
|
|
|
PARSER_PROTOTYPE_KEYWORD: Final[str] = normalize_snippet("""
|
|
|
|
static PyObject *
|
|
|
|
{c_basename}({self_type}{self_name}, PyObject *args, PyObject *kwargs)
|
|
|
|
""")
|
|
|
|
PARSER_PROTOTYPE_KEYWORD___INIT__: Final[str] = normalize_snippet("""
|
|
|
|
static int
|
|
|
|
{c_basename}({self_type}{self_name}, PyObject *args, PyObject *kwargs)
|
|
|
|
""")
|
|
|
|
PARSER_PROTOTYPE_VARARGS: Final[str] = normalize_snippet("""
|
|
|
|
static PyObject *
|
|
|
|
{c_basename}({self_type}{self_name}, PyObject *args)
|
|
|
|
""")
|
|
|
|
PARSER_PROTOTYPE_FASTCALL: Final[str] = normalize_snippet("""
|
|
|
|
static PyObject *
|
|
|
|
{c_basename}({self_type}{self_name}, PyObject *const *args, Py_ssize_t nargs)
|
|
|
|
""")
|
|
|
|
PARSER_PROTOTYPE_FASTCALL_KEYWORDS: Final[str] = normalize_snippet("""
|
|
|
|
static PyObject *
|
|
|
|
{c_basename}({self_type}{self_name}, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
|
|
|
|
""")
|
|
|
|
PARSER_PROTOTYPE_DEF_CLASS: Final[str] = normalize_snippet("""
|
|
|
|
static PyObject *
|
|
|
|
{c_basename}({self_type}{self_name}, PyTypeObject *{defining_class_name}, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
|
|
|
|
""")
|
|
|
|
PARSER_PROTOTYPE_NOARGS: Final[str] = normalize_snippet("""
|
|
|
|
static PyObject *
|
|
|
|
{c_basename}({self_type}{self_name}, PyObject *Py_UNUSED(ignored))
|
|
|
|
""")
|
|
|
|
METH_O_PROTOTYPE: Final[str] = normalize_snippet("""
|
|
|
|
static PyObject *
|
|
|
|
{c_basename}({impl_parameters})
|
|
|
|
""")
|
|
|
|
DOCSTRING_PROTOTYPE_VAR: Final[str] = normalize_snippet("""
|
|
|
|
PyDoc_VAR({c_basename}__doc__);
|
|
|
|
""")
|
|
|
|
DOCSTRING_PROTOTYPE_STRVAR: Final[str] = normalize_snippet("""
|
|
|
|
PyDoc_STRVAR({c_basename}__doc__,
|
|
|
|
{docstring});
|
|
|
|
""")
|
|
|
|
IMPL_DEFINITION_PROTOTYPE: Final[str] = normalize_snippet("""
|
|
|
|
static {impl_return_type}
|
|
|
|
{c_basename}_impl({impl_parameters})
|
|
|
|
""")
|
|
|
|
METHODDEF_PROTOTYPE_DEFINE: Final[str] = normalize_snippet(r"""
|
|
|
|
#define {methoddef_name} \
|
|
|
|
{{"{name}", {methoddef_cast}{c_basename}{methoddef_cast_end}, {methoddef_flags}, {c_basename}__doc__}},
|
|
|
|
""")
|
|
|
|
METHODDEF_PROTOTYPE_IFNDEF: Final[str] = normalize_snippet("""
|
|
|
|
#ifndef {methoddef_name}
|
|
|
|
#define {methoddef_name}
|
|
|
|
#endif /* !defined({methoddef_name}) */
|
|
|
|
""")
|
|
|
|
|
2023-07-17 08:47:08 -03:00
|
|
|
def __init__(self, filename: str) -> None:
|
2014-02-01 02:03:12 -04:00
|
|
|
super().__init__(filename)
|
|
|
|
self.cpp = cpp.Monitor(filename)
|
2023-07-17 08:47:08 -03:00
|
|
|
self.cpp.fail = fail # type: ignore[method-assign]
|
2014-02-01 02:03:12 -04:00
|
|
|
|
2023-07-04 20:07:57 -03:00
|
|
|
def parse_line(self, line: str) -> None:
|
2014-02-01 02:03:12 -04:00
|
|
|
self.cpp.writeline(line)
|
|
|
|
|
2023-07-04 20:07:57 -03:00
|
|
|
def render(
|
|
|
|
self,
|
|
|
|
clinic: Clinic | None,
|
2023-07-11 18:21:14 -03:00
|
|
|
signatures: Iterable[Module | Class | Function]
|
2023-07-04 20:07:57 -03:00
|
|
|
) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
function = None
|
|
|
|
for o in signatures:
|
|
|
|
if isinstance(o, Function):
|
|
|
|
if function:
|
|
|
|
fail("You may specify at most one function per block.\nFound a block containing at least two:\n\t" + repr(function) + " and " + repr(o))
|
|
|
|
function = o
|
2014-01-17 21:47:17 -04:00
|
|
|
return self.render_function(clinic, function)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-04 20:07:57 -03:00
|
|
|
def docstring_for_c_string(
|
|
|
|
self,
|
|
|
|
f: Function
|
|
|
|
) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
text, add, output = _text_accumulator()
|
|
|
|
# turn docstring into a properly quoted C string
|
|
|
|
for line in f.docstring.split('\n'):
|
|
|
|
add('"')
|
|
|
|
add(quoted_for_c_string(line))
|
|
|
|
add('\\n"\n')
|
|
|
|
|
2015-04-13 20:22:35 -03:00
|
|
|
if text[-2] == sig_end_marker:
|
|
|
|
# If we only have a signature, add the blank line that the
|
|
|
|
# __text_signature__ getter expects to be there.
|
|
|
|
add('"\\n"')
|
|
|
|
else:
|
|
|
|
text.pop()
|
|
|
|
add('"')
|
2013-10-19 04:09:25 -03:00
|
|
|
return ''.join(text)
|
|
|
|
|
2023-07-14 09:20:58 -03:00
|
|
|
def output_templates(
|
|
|
|
self,
|
2023-07-29 15:47:42 -03:00
|
|
|
f: Function,
|
|
|
|
clinic: Clinic
|
2023-07-14 09:20:58 -03:00
|
|
|
) -> dict[str, str]:
|
2014-01-17 21:47:17 -04:00
|
|
|
parameters = list(f.parameters.values())
|
2014-01-24 10:17:25 -04:00
|
|
|
assert parameters
|
2023-07-14 09:20:58 -03:00
|
|
|
first_param = parameters.pop(0)
|
|
|
|
assert isinstance(first_param.converter, self_converter)
|
2022-04-30 07:15:02 -03:00
|
|
|
requires_defining_class = False
|
|
|
|
if parameters and isinstance(parameters[0].converter, defining_class_converter):
|
|
|
|
requires_defining_class = True
|
|
|
|
del parameters[0]
|
2014-01-17 21:47:17 -04:00
|
|
|
converters = [p.converter for p in parameters]
|
|
|
|
|
|
|
|
has_option_groups = parameters and (parameters[0].group or parameters[-1].group)
|
|
|
|
default_return_converter = (not f.return_converter or
|
|
|
|
f.return_converter.type == 'PyObject *')
|
|
|
|
|
2023-07-13 19:54:05 -03:00
|
|
|
new_or_init = f.kind.new_or_init
|
2014-01-19 03:50:21 -04:00
|
|
|
|
2023-07-14 09:20:58 -03:00
|
|
|
vararg: int | str = NO_VARARG
|
2021-07-16 12:43:02 -03:00
|
|
|
pos_only = min_pos = max_pos = min_kw_only = pseudo_args = 0
|
2019-03-14 05:32:22 -03:00
|
|
|
for i, p in enumerate(parameters, 1):
|
2022-11-24 15:56:50 -04:00
|
|
|
if p.is_keyword_only():
|
2019-03-14 05:32:22 -03:00
|
|
|
assert not p.is_positional_only()
|
|
|
|
if not p.is_optional():
|
|
|
|
min_kw_only = i - max_pos
|
2021-07-16 12:43:02 -03:00
|
|
|
elif p.is_vararg():
|
|
|
|
if vararg != NO_VARARG:
|
|
|
|
fail("Too many var args")
|
|
|
|
pseudo_args += 1
|
|
|
|
vararg = i - 1
|
2019-03-14 05:32:22 -03:00
|
|
|
else:
|
2021-07-16 12:43:02 -03:00
|
|
|
if vararg == NO_VARARG:
|
|
|
|
max_pos = i
|
2019-03-14 05:32:22 -03:00
|
|
|
if p.is_positional_only():
|
|
|
|
pos_only = i
|
|
|
|
if not p.is_optional():
|
|
|
|
min_pos = i
|
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
meth_o = (len(parameters) == 1 and
|
2016-06-09 10:30:29 -03:00
|
|
|
parameters[0].is_positional_only() and
|
2014-01-17 21:47:17 -04:00
|
|
|
not converters[0].is_optional() and
|
2020-05-07 10:39:59 -03:00
|
|
|
not requires_defining_class and
|
2014-01-19 03:50:21 -04:00
|
|
|
not new_or_init)
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2014-02-01 02:03:12 -04:00
|
|
|
# we have to set these things before we're done:
|
2014-01-17 21:47:17 -04:00
|
|
|
#
|
|
|
|
# docstring_prototype
|
|
|
|
# docstring_definition
|
|
|
|
# impl_prototype
|
|
|
|
# methoddef_define
|
|
|
|
# parser_prototype
|
|
|
|
# parser_definition
|
|
|
|
# impl_definition
|
2014-02-01 02:03:12 -04:00
|
|
|
# cpp_if
|
|
|
|
# cpp_endif
|
|
|
|
# methoddef_ifndef
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
return_value_declaration = "PyObject *return_value = NULL;"
|
2023-08-02 16:37:36 -03:00
|
|
|
methoddef_define = self.METHODDEF_PROTOTYPE_DEFINE
|
2014-01-24 10:17:25 -04:00
|
|
|
if new_or_init and not f.docstring:
|
|
|
|
docstring_prototype = docstring_definition = ''
|
|
|
|
else:
|
2023-08-02 16:37:36 -03:00
|
|
|
docstring_prototype = self.DOCSTRING_PROTOTYPE_VAR
|
|
|
|
docstring_definition = self.DOCSTRING_PROTOTYPE_STRVAR
|
|
|
|
impl_definition = self.IMPL_DEFINITION_PROTOTYPE
|
2014-01-17 21:47:17 -04:00
|
|
|
impl_prototype = parser_prototype = parser_definition = None
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-02-01 02:03:12 -04:00
|
|
|
# parser_body_fields remembers the fields passed in to the
|
|
|
|
# previous call to parser_body. this is used for an awful hack.
|
2023-07-14 09:20:58 -03:00
|
|
|
parser_body_fields: tuple[str, ...] = ()
|
2023-07-12 19:49:30 -03:00
|
|
|
def parser_body(
|
|
|
|
prototype: str,
|
|
|
|
*fields: str,
|
|
|
|
declarations: str = ''
|
|
|
|
) -> str:
|
2023-05-05 18:35:24 -03:00
|
|
|
nonlocal parser_body_fields
|
2014-01-19 03:50:21 -04:00
|
|
|
add, output = text_accumulator()
|
|
|
|
add(prototype)
|
|
|
|
parser_body_fields = fields
|
2014-02-01 02:03:12 -04:00
|
|
|
|
2023-07-12 19:49:30 -03:00
|
|
|
preamble = normalize_snippet("""
|
2014-02-01 02:03:12 -04:00
|
|
|
{{
|
|
|
|
{return_value_declaration}
|
2019-03-14 05:32:22 -03:00
|
|
|
{parser_declarations}
|
2014-02-01 02:03:12 -04:00
|
|
|
{declarations}
|
|
|
|
{initializers}
|
2023-07-12 19:49:30 -03:00
|
|
|
""") + "\n"
|
|
|
|
finale = normalize_snippet("""
|
2014-02-01 02:03:12 -04:00
|
|
|
{modifications}
|
|
|
|
{return_value} = {c_basename}_impl({impl_arguments});
|
|
|
|
{return_conversion}
|
2022-11-24 10:01:26 -04:00
|
|
|
{post_parsing}
|
2014-02-01 02:03:12 -04:00
|
|
|
|
|
|
|
{exit_label}
|
|
|
|
{cleanup}
|
|
|
|
return return_value;
|
|
|
|
}}
|
2023-07-12 19:49:30 -03:00
|
|
|
""")
|
|
|
|
for field in preamble, *fields, finale:
|
2014-01-19 03:50:21 -04:00
|
|
|
add('\n')
|
2014-02-01 02:03:12 -04:00
|
|
|
add(field)
|
2019-03-14 05:32:22 -03:00
|
|
|
return linear_format(output(), parser_declarations=declarations)
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2023-07-17 08:47:08 -03:00
|
|
|
parsearg: str | None
|
2014-01-17 21:47:17 -04:00
|
|
|
if not parameters:
|
2023-07-14 09:20:58 -03:00
|
|
|
parser_code: list[str] | None
|
2022-04-30 07:15:02 -03:00
|
|
|
if not requires_defining_class:
|
|
|
|
# no parameters, METH_NOARGS
|
|
|
|
flags = "METH_NOARGS"
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_NOARGS
|
2022-04-30 07:15:02 -03:00
|
|
|
parser_code = []
|
|
|
|
else:
|
|
|
|
assert not new_or_init
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2022-04-30 07:15:02 -03:00
|
|
|
flags = "METH_METHOD|METH_FASTCALL|METH_KEYWORDS"
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_DEF_CLASS
|
2022-04-30 07:15:02 -03:00
|
|
|
return_error = ('return NULL;' if default_return_converter
|
|
|
|
else 'goto exit;')
|
|
|
|
parser_code = [normalize_snippet("""
|
|
|
|
if (nargs) {{
|
|
|
|
PyErr_SetString(PyExc_TypeError, "{name}() takes no arguments");
|
|
|
|
%s
|
2014-02-01 02:03:12 -04:00
|
|
|
}}
|
2022-04-30 07:15:02 -03:00
|
|
|
""" % return_error, indent=4)]
|
|
|
|
|
|
|
|
if default_return_converter:
|
|
|
|
parser_definition = '\n'.join([
|
|
|
|
parser_prototype,
|
|
|
|
'{{',
|
|
|
|
*parser_code,
|
|
|
|
' return {c_basename}_impl({impl_arguments});',
|
|
|
|
'}}'])
|
2014-01-17 21:47:17 -04:00
|
|
|
else:
|
2022-04-30 07:15:02 -03:00
|
|
|
parser_definition = parser_body(parser_prototype, *parser_code)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
elif meth_o:
|
2014-01-19 03:50:21 -04:00
|
|
|
flags = "METH_O"
|
2014-02-01 02:03:12 -04:00
|
|
|
|
2015-04-03 18:12:11 -03:00
|
|
|
if (isinstance(converters[0], object_converter) and
|
|
|
|
converters[0].format_unit == 'O'):
|
2023-08-02 16:37:36 -03:00
|
|
|
meth_o_prototype = self.METH_O_PROTOTYPE
|
2015-04-03 18:12:11 -03:00
|
|
|
|
|
|
|
if default_return_converter:
|
|
|
|
# maps perfectly to METH_O, doesn't need a return converter.
|
|
|
|
# so we skip making a parse function
|
|
|
|
# and call directly into the impl function.
|
|
|
|
impl_prototype = parser_prototype = parser_definition = ''
|
|
|
|
impl_definition = meth_o_prototype
|
|
|
|
else:
|
|
|
|
# SLIGHT HACK
|
|
|
|
# use impl_parameters for the parser here!
|
|
|
|
parser_prototype = meth_o_prototype
|
|
|
|
parser_definition = parser_body(parser_prototype)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
else:
|
2015-04-03 18:12:11 -03:00
|
|
|
argname = 'arg'
|
|
|
|
if parameters[0].name == argname:
|
|
|
|
argname += '_'
|
|
|
|
parser_prototype = normalize_snippet("""
|
|
|
|
static PyObject *
|
|
|
|
{c_basename}({self_type}{self_name}, PyObject *%s)
|
|
|
|
""" % argname)
|
|
|
|
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname = parameters[0].get_displayname(0)
|
|
|
|
parsearg = converters[0].parse_arg(argname, displayname)
|
2019-01-11 10:01:14 -04:00
|
|
|
if parsearg is None:
|
|
|
|
parsearg = """
|
|
|
|
if (!PyArg_Parse(%s, "{format_units}:{name}", {parse_arguments})) {{
|
|
|
|
goto exit;
|
|
|
|
}}
|
|
|
|
""" % argname
|
2018-12-25 07:23:47 -04:00
|
|
|
parser_definition = parser_body(parser_prototype,
|
|
|
|
normalize_snippet(parsearg, indent=4))
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
elif has_option_groups:
|
|
|
|
# positional parameters with option groups
|
|
|
|
# (we have to generate lots of PyArg_ParseTuple calls
|
|
|
|
# in a big switch statement)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
flags = "METH_VARARGS"
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_VARARGS
|
2014-02-01 02:03:12 -04:00
|
|
|
parser_definition = parser_body(parser_prototype, ' {option_group_parsing}')
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2021-07-16 12:43:02 -03:00
|
|
|
elif not requires_defining_class and pos_only == len(parameters) - pseudo_args:
|
2017-01-16 20:42:54 -04:00
|
|
|
if not new_or_init:
|
|
|
|
# positional-only, but no option groups
|
|
|
|
# we only need one call to _PyArg_ParseStack
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2017-01-16 20:42:54 -04:00
|
|
|
flags = "METH_FASTCALL"
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_FASTCALL
|
2019-01-11 10:01:14 -04:00
|
|
|
nargs = 'nargs'
|
|
|
|
argname_fmt = 'args[%d]'
|
2017-01-16 20:42:54 -04:00
|
|
|
else:
|
|
|
|
# positional-only, but no option groups
|
|
|
|
# we only need one call to PyArg_ParseTuple
|
|
|
|
|
|
|
|
flags = "METH_VARARGS"
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_VARARGS
|
2019-01-11 10:01:14 -04:00
|
|
|
nargs = 'PyTuple_GET_SIZE(args)'
|
|
|
|
argname_fmt = 'PyTuple_GET_ITEM(args, %d)'
|
|
|
|
|
2021-07-16 12:43:02 -03:00
|
|
|
|
2023-05-20 17:16:49 -03:00
|
|
|
left_args = f"{nargs} - {max_pos}"
|
2021-07-16 12:43:02 -03:00
|
|
|
max_args = NO_VARARG if (vararg != NO_VARARG) else max_pos
|
2019-03-14 05:32:22 -03:00
|
|
|
parser_code = [normalize_snippet("""
|
2021-07-16 12:43:02 -03:00
|
|
|
if (!_PyArg_CheckPositional("{name}", %s, %d, %s)) {{
|
2019-03-14 05:32:22 -03:00
|
|
|
goto exit;
|
|
|
|
}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""" % (nargs, min_pos, max_args), indent=4)]
|
|
|
|
|
2019-01-11 10:01:14 -04:00
|
|
|
has_optional = False
|
2019-03-14 05:32:22 -03:00
|
|
|
for i, p in enumerate(parameters):
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname = p.get_displayname(i+1)
|
2021-07-16 12:43:02 -03:00
|
|
|
argname = argname_fmt % i
|
|
|
|
|
|
|
|
if p.is_vararg():
|
|
|
|
if not new_or_init:
|
|
|
|
parser_code.append(normalize_snippet("""
|
|
|
|
%s = PyTuple_New(%s);
|
2022-12-28 00:16:28 -04:00
|
|
|
if (!%s) {{
|
|
|
|
goto exit;
|
|
|
|
}}
|
2021-07-16 12:43:02 -03:00
|
|
|
for (Py_ssize_t i = 0; i < %s; ++i) {{
|
2022-11-24 08:24:18 -04:00
|
|
|
PyTuple_SET_ITEM(%s, i, Py_NewRef(args[%d + i]));
|
2021-07-16 12:43:02 -03:00
|
|
|
}}
|
|
|
|
""" % (
|
|
|
|
p.converter.parser_name,
|
|
|
|
left_args,
|
2022-12-28 00:16:28 -04:00
|
|
|
p.converter.parser_name,
|
2021-07-16 12:43:02 -03:00
|
|
|
left_args,
|
|
|
|
p.converter.parser_name,
|
|
|
|
max_pos
|
|
|
|
), indent=4))
|
|
|
|
else:
|
|
|
|
parser_code.append(normalize_snippet("""
|
|
|
|
%s = PyTuple_GetSlice(%d, -1);
|
|
|
|
""" % (
|
|
|
|
p.converter.parser_name,
|
|
|
|
max_pos
|
|
|
|
), indent=4))
|
|
|
|
continue
|
|
|
|
|
|
|
|
parsearg = p.converter.parse_arg(argname, displayname)
|
2019-01-11 10:01:14 -04:00
|
|
|
if parsearg is None:
|
|
|
|
parser_code = None
|
|
|
|
break
|
2019-03-14 05:32:22 -03:00
|
|
|
if has_optional or p.is_optional():
|
2019-01-11 10:01:14 -04:00
|
|
|
has_optional = True
|
|
|
|
parser_code.append(normalize_snippet("""
|
|
|
|
if (%s < %d) {{
|
|
|
|
goto skip_optional;
|
|
|
|
}}
|
|
|
|
""", indent=4) % (nargs, i + 1))
|
|
|
|
parser_code.append(normalize_snippet(parsearg, indent=4))
|
2017-01-16 20:42:54 -04:00
|
|
|
|
2019-01-11 10:01:14 -04:00
|
|
|
if parser_code is not None:
|
|
|
|
if has_optional:
|
|
|
|
parser_code.append("skip_optional:")
|
|
|
|
else:
|
|
|
|
if not new_or_init:
|
|
|
|
parser_code = [normalize_snippet("""
|
|
|
|
if (!_PyArg_ParseStack(args, nargs, "{format_units}:{name}",
|
|
|
|
{parse_arguments})) {{
|
|
|
|
goto exit;
|
|
|
|
}}
|
|
|
|
""", indent=4)]
|
|
|
|
else:
|
|
|
|
parser_code = [normalize_snippet("""
|
|
|
|
if (!PyArg_ParseTuple(args, "{format_units}:{name}",
|
|
|
|
{parse_arguments})) {{
|
|
|
|
goto exit;
|
|
|
|
}}
|
|
|
|
""", indent=4)]
|
|
|
|
parser_definition = parser_body(parser_prototype, *parser_code)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
else:
|
2022-11-24 15:56:50 -04:00
|
|
|
has_optional_kw = (max(pos_only, min_pos) + min_kw_only < len(converters) - int(vararg != NO_VARARG))
|
2021-07-16 12:43:02 -03:00
|
|
|
if vararg == NO_VARARG:
|
|
|
|
args_declaration = "_PyArg_UnpackKeywords", "%s, %s, %s" % (
|
|
|
|
min_pos,
|
|
|
|
max_pos,
|
|
|
|
min_kw_only
|
|
|
|
)
|
2022-11-24 15:56:50 -04:00
|
|
|
nargs = "nargs"
|
2021-07-16 12:43:02 -03:00
|
|
|
else:
|
|
|
|
args_declaration = "_PyArg_UnpackKeywordsWithVararg", "%s, %s, %s, %s" % (
|
|
|
|
min_pos,
|
|
|
|
max_pos,
|
|
|
|
min_kw_only,
|
|
|
|
vararg
|
|
|
|
)
|
2022-11-24 15:56:50 -04:00
|
|
|
nargs = f"Py_MIN(nargs, {max_pos})" if max_pos else "0"
|
2019-03-14 05:32:22 -03:00
|
|
|
if not new_or_init:
|
|
|
|
flags = "METH_FASTCALL|METH_KEYWORDS"
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_FASTCALL_KEYWORDS
|
2019-03-14 05:32:22 -03:00
|
|
|
argname_fmt = 'args[%d]'
|
2022-08-13 07:09:40 -03:00
|
|
|
declarations = declare_parser(f)
|
2022-08-11 18:25:49 -03:00
|
|
|
declarations += "\nPyObject *argsbuf[%s];" % len(converters)
|
2019-03-14 05:32:22 -03:00
|
|
|
if has_optional_kw:
|
2022-11-24 15:56:50 -04:00
|
|
|
declarations += "\nPy_ssize_t noptargs = %s + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - %d;" % (nargs, min_pos + min_kw_only)
|
2019-03-14 05:32:22 -03:00
|
|
|
parser_code = [normalize_snippet("""
|
2021-07-16 12:43:02 -03:00
|
|
|
args = %s(args, nargs, NULL, kwnames, &_parser, %s, argsbuf);
|
2019-03-14 05:32:22 -03:00
|
|
|
if (!args) {{
|
|
|
|
goto exit;
|
|
|
|
}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""" % args_declaration, indent=4)]
|
2019-03-14 05:32:22 -03:00
|
|
|
else:
|
|
|
|
# positional-or-keyword arguments
|
|
|
|
flags = "METH_VARARGS|METH_KEYWORDS"
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_KEYWORD
|
2019-03-14 05:32:22 -03:00
|
|
|
argname_fmt = 'fastargs[%d]'
|
2022-08-13 07:09:40 -03:00
|
|
|
declarations = declare_parser(f)
|
2022-08-11 18:25:49 -03:00
|
|
|
declarations += "\nPyObject *argsbuf[%s];" % len(converters)
|
|
|
|
declarations += "\nPyObject * const *fastargs;"
|
|
|
|
declarations += "\nPy_ssize_t nargs = PyTuple_GET_SIZE(args);"
|
2019-03-14 05:32:22 -03:00
|
|
|
if has_optional_kw:
|
2022-11-24 15:56:50 -04:00
|
|
|
declarations += "\nPy_ssize_t noptargs = %s + (kwargs ? PyDict_GET_SIZE(kwargs) : 0) - %d;" % (nargs, min_pos + min_kw_only)
|
2019-03-14 05:32:22 -03:00
|
|
|
parser_code = [normalize_snippet("""
|
2021-07-16 12:43:02 -03:00
|
|
|
fastargs = %s(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, %s, argsbuf);
|
2019-03-14 05:32:22 -03:00
|
|
|
if (!fastargs) {{
|
|
|
|
goto exit;
|
|
|
|
}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""" % args_declaration, indent=4)]
|
|
|
|
|
2020-05-07 10:39:59 -03:00
|
|
|
if requires_defining_class:
|
|
|
|
flags = 'METH_METHOD|' + flags
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_DEF_CLASS
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-14 09:20:58 -03:00
|
|
|
add_label: str | None = None
|
2019-03-14 05:32:22 -03:00
|
|
|
for i, p in enumerate(parameters):
|
2022-04-30 07:15:02 -03:00
|
|
|
if isinstance(p.converter, defining_class_converter):
|
|
|
|
raise ValueError("defining_class should be the first "
|
|
|
|
"parameter (after self)")
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname = p.get_displayname(i+1)
|
|
|
|
parsearg = p.converter.parse_arg(argname_fmt % i, displayname)
|
2019-03-14 05:32:22 -03:00
|
|
|
if parsearg is None:
|
|
|
|
parser_code = None
|
|
|
|
break
|
|
|
|
if add_label and (i == pos_only or i == max_pos):
|
|
|
|
parser_code.append("%s:" % add_label)
|
|
|
|
add_label = None
|
|
|
|
if not p.is_optional():
|
|
|
|
parser_code.append(normalize_snippet(parsearg, indent=4))
|
|
|
|
elif i < pos_only:
|
|
|
|
add_label = 'skip_optional_posonly'
|
|
|
|
parser_code.append(normalize_snippet("""
|
|
|
|
if (nargs < %d) {{
|
|
|
|
goto %s;
|
|
|
|
}}
|
|
|
|
""" % (i + 1, add_label), indent=4))
|
|
|
|
if has_optional_kw:
|
|
|
|
parser_code.append(normalize_snippet("""
|
|
|
|
noptargs--;
|
|
|
|
""", indent=4))
|
|
|
|
parser_code.append(normalize_snippet(parsearg, indent=4))
|
|
|
|
else:
|
|
|
|
if i < max_pos:
|
|
|
|
label = 'skip_optional_pos'
|
|
|
|
first_opt = max(min_pos, pos_only)
|
|
|
|
else:
|
|
|
|
label = 'skip_optional_kwonly'
|
|
|
|
first_opt = max_pos + min_kw_only
|
2021-07-16 12:43:02 -03:00
|
|
|
if vararg != NO_VARARG:
|
|
|
|
first_opt += 1
|
2019-03-14 05:32:22 -03:00
|
|
|
if i == first_opt:
|
|
|
|
add_label = label
|
|
|
|
parser_code.append(normalize_snippet("""
|
|
|
|
if (!noptargs) {{
|
|
|
|
goto %s;
|
|
|
|
}}
|
|
|
|
""" % add_label, indent=4))
|
|
|
|
if i + 1 == len(parameters):
|
|
|
|
parser_code.append(normalize_snippet(parsearg, indent=4))
|
|
|
|
else:
|
|
|
|
add_label = label
|
|
|
|
parser_code.append(normalize_snippet("""
|
|
|
|
if (%s) {{
|
|
|
|
""" % (argname_fmt % i), indent=4))
|
|
|
|
parser_code.append(normalize_snippet(parsearg, indent=8))
|
|
|
|
parser_code.append(normalize_snippet("""
|
|
|
|
if (!--noptargs) {{
|
|
|
|
goto %s;
|
|
|
|
}}
|
|
|
|
}}
|
|
|
|
""" % add_label, indent=4))
|
2014-02-01 02:03:12 -04:00
|
|
|
|
2019-03-14 05:32:22 -03:00
|
|
|
if parser_code is not None:
|
|
|
|
if add_label:
|
|
|
|
parser_code.append("%s:" % add_label)
|
|
|
|
else:
|
2022-08-13 07:09:40 -03:00
|
|
|
declarations = declare_parser(f, hasformat=True)
|
2019-03-14 05:32:22 -03:00
|
|
|
if not new_or_init:
|
|
|
|
parser_code = [normalize_snippet("""
|
2020-05-07 10:39:59 -03:00
|
|
|
if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser{parse_arguments_comma}
|
2019-03-14 05:32:22 -03:00
|
|
|
{parse_arguments})) {{
|
|
|
|
goto exit;
|
|
|
|
}}
|
|
|
|
""", indent=4)]
|
|
|
|
else:
|
|
|
|
parser_code = [normalize_snippet("""
|
|
|
|
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwargs, &_parser,
|
|
|
|
{parse_arguments})) {{
|
|
|
|
goto exit;
|
|
|
|
}}
|
|
|
|
""", indent=4)]
|
|
|
|
parser_definition = parser_body(parser_prototype, *parser_code,
|
|
|
|
declarations=declarations)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
if new_or_init:
|
|
|
|
methoddef_define = ''
|
|
|
|
|
2023-07-13 19:54:05 -03:00
|
|
|
if f.kind is METHOD_NEW:
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_KEYWORD
|
2014-01-19 03:50:21 -04:00
|
|
|
else:
|
|
|
|
return_value_declaration = "int return_value = -1;"
|
2023-08-02 16:37:36 -03:00
|
|
|
parser_prototype = self.PARSER_PROTOTYPE_KEYWORD___INIT__
|
2014-01-19 03:50:21 -04:00
|
|
|
|
|
|
|
fields = list(parser_body_fields)
|
|
|
|
parses_positional = 'METH_NOARGS' not in flags
|
|
|
|
parses_keywords = 'METH_KEYWORDS' in flags
|
|
|
|
if parses_keywords:
|
|
|
|
assert parses_positional
|
|
|
|
|
2020-05-07 10:39:59 -03:00
|
|
|
if requires_defining_class:
|
|
|
|
raise ValueError("Slot methods cannot access their defining class.")
|
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
if not parses_keywords:
|
2023-01-31 16:42:03 -04:00
|
|
|
declarations = '{base_type_ptr}'
|
2014-02-01 02:03:12 -04:00
|
|
|
fields.insert(0, normalize_snippet("""
|
2016-06-09 10:16:06 -03:00
|
|
|
if ({self_type_check}!_PyArg_NoKeywords("{name}", kwargs)) {{
|
2014-02-01 02:03:12 -04:00
|
|
|
goto exit;
|
2016-06-09 10:16:06 -03:00
|
|
|
}}
|
2014-02-01 02:03:12 -04:00
|
|
|
""", indent=4))
|
2014-01-19 03:50:21 -04:00
|
|
|
if not parses_positional:
|
2014-02-01 02:03:12 -04:00
|
|
|
fields.insert(0, normalize_snippet("""
|
2016-06-09 10:16:06 -03:00
|
|
|
if ({self_type_check}!_PyArg_NoPositional("{name}", args)) {{
|
2014-02-01 02:03:12 -04:00
|
|
|
goto exit;
|
2016-06-09 10:16:06 -03:00
|
|
|
}}
|
2014-02-01 02:03:12 -04:00
|
|
|
""", indent=4))
|
2014-01-19 03:50:21 -04:00
|
|
|
|
2019-03-14 05:32:22 -03:00
|
|
|
parser_definition = parser_body(parser_prototype, *fields,
|
2023-01-31 16:42:03 -04:00
|
|
|
declarations=declarations)
|
2014-01-19 03:50:21 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2018-11-27 05:27:36 -04:00
|
|
|
if flags in ('METH_NOARGS', 'METH_O', 'METH_VARARGS'):
|
|
|
|
methoddef_cast = "(PyCFunction)"
|
2022-05-03 15:25:41 -03:00
|
|
|
methoddef_cast_end = ""
|
2018-11-27 05:27:36 -04:00
|
|
|
else:
|
2022-05-03 15:25:41 -03:00
|
|
|
methoddef_cast = "_PyCFunction_CAST("
|
|
|
|
methoddef_cast_end = ")"
|
2018-11-27 05:27:36 -04:00
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
if f.methoddef_flags:
|
|
|
|
flags += '|' + f.methoddef_flags
|
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
methoddef_define = methoddef_define.replace('{methoddef_flags}', flags)
|
2018-11-27 05:27:36 -04:00
|
|
|
methoddef_define = methoddef_define.replace('{methoddef_cast}', methoddef_cast)
|
2022-05-03 15:25:41 -03:00
|
|
|
methoddef_define = methoddef_define.replace('{methoddef_cast_end}', methoddef_cast_end)
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2014-02-01 02:03:12 -04:00
|
|
|
methoddef_ifndef = ''
|
|
|
|
conditional = self.cpp.condition()
|
|
|
|
if not conditional:
|
|
|
|
cpp_if = cpp_endif = ''
|
|
|
|
else:
|
|
|
|
cpp_if = "#if " + conditional
|
|
|
|
cpp_endif = "#endif /* " + conditional + " */"
|
|
|
|
|
2017-11-03 06:09:00 -03:00
|
|
|
if methoddef_define and f.full_name not in clinic.ifndef_symbols:
|
|
|
|
clinic.ifndef_symbols.add(f.full_name)
|
2023-08-02 16:37:36 -03:00
|
|
|
methoddef_ifndef = self.METHODDEF_PROTOTYPE_IFNDEF
|
2014-02-01 02:03:12 -04:00
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
# add ';' to the end of parser_prototype and impl_prototype
|
|
|
|
# (they mustn't be None, but they could be an empty string.)
|
2014-01-17 21:47:17 -04:00
|
|
|
assert parser_prototype is not None
|
|
|
|
if parser_prototype:
|
2014-01-19 03:50:21 -04:00
|
|
|
assert not parser_prototype.endswith(';')
|
2014-01-17 21:47:17 -04:00
|
|
|
parser_prototype += ';'
|
|
|
|
|
|
|
|
if impl_prototype is None:
|
2014-01-19 03:50:21 -04:00
|
|
|
impl_prototype = impl_definition
|
|
|
|
if impl_prototype:
|
|
|
|
impl_prototype += ";"
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
parser_definition = parser_definition.replace("{return_value_declaration}", return_value_declaration)
|
2014-01-17 21:47:17 -04:00
|
|
|
|
|
|
|
d = {
|
|
|
|
"docstring_prototype" : docstring_prototype,
|
|
|
|
"docstring_definition" : docstring_definition,
|
|
|
|
"impl_prototype" : impl_prototype,
|
|
|
|
"methoddef_define" : methoddef_define,
|
|
|
|
"parser_prototype" : parser_prototype,
|
|
|
|
"parser_definition" : parser_definition,
|
|
|
|
"impl_definition" : impl_definition,
|
2014-02-01 02:03:12 -04:00
|
|
|
"cpp_if" : cpp_if,
|
|
|
|
"cpp_endif" : cpp_endif,
|
|
|
|
"methoddef_ifndef" : methoddef_ifndef,
|
2014-01-17 21:47:17 -04:00
|
|
|
}
|
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
# make sure we didn't forget to assign something,
|
|
|
|
# and wrap each non-empty value in \n's
|
2014-01-17 21:47:17 -04:00
|
|
|
d2 = {}
|
|
|
|
for name, value in d.items():
|
2014-01-19 03:50:21 -04:00
|
|
|
assert value is not None, "got a None value for template " + repr(name)
|
2014-01-17 21:47:17 -04:00
|
|
|
if value:
|
|
|
|
value = '\n' + value + '\n'
|
|
|
|
d2[name] = value
|
|
|
|
return d2
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
@staticmethod
|
2023-07-04 20:07:57 -03:00
|
|
|
def group_to_variable_name(group: int) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
adjective = "left_" if group < 0 else "right_"
|
|
|
|
return "group_" + adjective + str(abs(group))
|
|
|
|
|
2023-07-20 18:33:33 -03:00
|
|
|
def render_option_group_parsing(
|
|
|
|
self,
|
|
|
|
f: Function,
|
|
|
|
template_dict: TemplateDict
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
# positional only, grouped, optional arguments!
|
|
|
|
# can be optional on the left or right.
|
|
|
|
# here's an example:
|
|
|
|
#
|
|
|
|
# [ [ [ A1 A2 ] B1 B2 B3 ] C1 C2 ] D1 D2 D3 [ E1 E2 E3 [ F1 F2 F3 ] ]
|
|
|
|
#
|
|
|
|
# Here group D are required, and all other groups are optional.
|
|
|
|
# (Group D's "group" is actually None.)
|
|
|
|
# We can figure out which sets of arguments we have based on
|
|
|
|
# how many arguments are in the tuple.
|
|
|
|
#
|
|
|
|
# Note that you need to count up on both sides. For example,
|
|
|
|
# you could have groups C+D, or C+D+E, or C+D+E+F.
|
|
|
|
#
|
|
|
|
# What if the number of arguments leads us to an ambiguous result?
|
|
|
|
# Clinic prefers groups on the left. So in the above example,
|
|
|
|
# five arguments would map to B+C, not C+D.
|
|
|
|
|
|
|
|
add, output = text_accumulator()
|
|
|
|
parameters = list(f.parameters.values())
|
2014-01-24 10:17:25 -04:00
|
|
|
if isinstance(parameters[0].converter, self_converter):
|
|
|
|
del parameters[0]
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-20 18:33:33 -03:00
|
|
|
group: list[Parameter] | None = None
|
2013-10-19 04:09:25 -03:00
|
|
|
left = []
|
|
|
|
right = []
|
2023-07-20 18:33:33 -03:00
|
|
|
required: list[Parameter] = []
|
|
|
|
last: int | Literal[Sentinels.unspecified] = unspecified
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
for p in parameters:
|
|
|
|
group_id = p.group
|
|
|
|
if group_id != last:
|
|
|
|
last = group_id
|
|
|
|
group = []
|
|
|
|
if group_id < 0:
|
|
|
|
left.append(group)
|
|
|
|
elif group_id == 0:
|
|
|
|
group = required
|
|
|
|
else:
|
|
|
|
right.append(group)
|
2023-07-20 18:33:33 -03:00
|
|
|
assert group is not None
|
2013-10-19 04:09:25 -03:00
|
|
|
group.append(p)
|
|
|
|
|
|
|
|
count_min = sys.maxsize
|
|
|
|
count_max = -1
|
|
|
|
|
2016-06-09 10:02:15 -03:00
|
|
|
add("switch (PyTuple_GET_SIZE(args)) {\n")
|
2013-10-19 04:09:25 -03:00
|
|
|
for subset in permute_optional_groups(left, required, right):
|
|
|
|
count = len(subset)
|
|
|
|
count_min = min(count_min, count)
|
|
|
|
count_max = max(count_max, count)
|
|
|
|
|
2014-01-12 12:49:30 -04:00
|
|
|
if count == 0:
|
|
|
|
add(""" case 0:
|
|
|
|
break;
|
|
|
|
""")
|
|
|
|
continue
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
group_ids = {p.group for p in subset} # eliminate duplicates
|
2023-07-20 18:33:33 -03:00
|
|
|
d: dict[str, str | int] = {}
|
2013-10-19 04:09:25 -03:00
|
|
|
d['count'] = count
|
|
|
|
d['name'] = f.name
|
|
|
|
d['format_units'] = "".join(p.converter.format_unit for p in subset)
|
|
|
|
|
2023-07-20 18:33:33 -03:00
|
|
|
parse_arguments: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
for p in subset:
|
|
|
|
p.converter.parse_argument(parse_arguments)
|
|
|
|
d['parse_arguments'] = ", ".join(parse_arguments)
|
|
|
|
|
|
|
|
group_ids.discard(0)
|
2023-07-20 18:33:33 -03:00
|
|
|
lines = "\n".join([
|
|
|
|
self.group_to_variable_name(g) + " = 1;"
|
|
|
|
for g in group_ids
|
|
|
|
])
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2020-01-06 06:46:04 -04:00
|
|
|
s = """\
|
2013-10-19 04:09:25 -03:00
|
|
|
case {count}:
|
2016-06-09 10:16:06 -03:00
|
|
|
if (!PyArg_ParseTuple(args, "{format_units}:{name}", {parse_arguments})) {{
|
2014-01-22 07:05:49 -04:00
|
|
|
goto exit;
|
2016-06-09 10:16:06 -03:00
|
|
|
}}
|
2013-10-19 04:09:25 -03:00
|
|
|
{group_booleans}
|
|
|
|
break;
|
2020-01-06 06:46:04 -04:00
|
|
|
"""
|
2013-10-19 04:09:25 -03:00
|
|
|
s = linear_format(s, group_booleans=lines)
|
|
|
|
s = s.format_map(d)
|
|
|
|
add(s)
|
|
|
|
|
|
|
|
add(" default:\n")
|
|
|
|
s = ' PyErr_SetString(PyExc_TypeError, "{} requires {} to {} arguments");\n'
|
|
|
|
add(s.format(f.full_name, count_min, count_max))
|
2014-01-22 07:05:49 -04:00
|
|
|
add(' goto exit;\n')
|
2016-06-09 10:02:15 -03:00
|
|
|
add("}")
|
|
|
|
template_dict['option_group_parsing'] = format_escape(output())
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-04 20:07:57 -03:00
|
|
|
def render_function(
|
|
|
|
self,
|
|
|
|
clinic: Clinic | None,
|
|
|
|
f: Function | None
|
|
|
|
) -> str:
|
|
|
|
if f is None or clinic is None:
|
2013-10-19 04:09:25 -03:00
|
|
|
return ""
|
|
|
|
|
|
|
|
add, output = text_accumulator()
|
|
|
|
data = CRenderData()
|
|
|
|
|
2014-02-01 02:03:12 -04:00
|
|
|
assert f.parameters, "We should always have a 'self' at this point!"
|
|
|
|
parameters = f.render_parameters
|
2013-10-19 04:09:25 -03:00
|
|
|
converters = [p.converter for p in parameters]
|
|
|
|
|
2023-07-29 15:47:42 -03:00
|
|
|
templates = self.output_templates(f, clinic)
|
2014-01-24 10:17:25 -04:00
|
|
|
|
|
|
|
f_self = parameters[0]
|
|
|
|
selfless = parameters[1:]
|
|
|
|
assert isinstance(f_self.converter, self_converter), "No self parameter in " + repr(f.full_name) + "!"
|
|
|
|
|
|
|
|
last_group = 0
|
|
|
|
first_optional = len(selfless)
|
2016-06-09 10:30:29 -03:00
|
|
|
positional = selfless and selfless[-1].is_positional_only()
|
2023-07-13 19:54:05 -03:00
|
|
|
new_or_init = f.kind.new_or_init
|
2014-01-24 10:17:25 -04:00
|
|
|
has_option_groups = False
|
|
|
|
|
|
|
|
# offset i by -1 because first_optional needs to ignore self
|
|
|
|
for i, p in enumerate(parameters, -1):
|
|
|
|
c = p.converter
|
|
|
|
|
|
|
|
if (i != -1) and (p.default is not unspecified):
|
|
|
|
first_optional = min(first_optional, i)
|
|
|
|
|
2021-07-16 12:43:02 -03:00
|
|
|
if p.is_vararg():
|
2023-05-20 17:16:49 -03:00
|
|
|
data.cleanup.append(f"Py_XDECREF({c.parser_name});")
|
2021-07-16 12:43:02 -03:00
|
|
|
|
2014-01-24 10:17:25 -04:00
|
|
|
# insert group variable
|
|
|
|
group = p.group
|
|
|
|
if last_group != group:
|
|
|
|
last_group = group
|
|
|
|
if group:
|
|
|
|
group_name = self.group_to_variable_name(group)
|
|
|
|
data.impl_arguments.append(group_name)
|
|
|
|
data.declarations.append("int " + group_name + " = 0;")
|
|
|
|
data.impl_parameters.append("int " + group_name)
|
|
|
|
has_option_groups = True
|
|
|
|
|
|
|
|
c.render(p, data)
|
|
|
|
|
|
|
|
if has_option_groups and (not positional):
|
|
|
|
fail("You cannot use optional groups ('[' and ']')\nunless all parameters are positional-only ('/').")
|
|
|
|
|
|
|
|
# HACK
|
|
|
|
# when we're METH_O, but have a custom return converter,
|
|
|
|
# we use "impl_parameters" for the parsing function
|
|
|
|
# because that works better. but that means we must
|
2014-10-19 12:04:38 -03:00
|
|
|
# suppress actually declaring the impl's parameters
|
2014-01-24 10:17:25 -04:00
|
|
|
# as variables in the parsing function. but since it's
|
|
|
|
# METH_O, we have exactly one anyway, so we know exactly
|
|
|
|
# where it is.
|
|
|
|
if ("METH_O" in templates['methoddef_define'] and
|
2015-04-03 18:12:11 -03:00
|
|
|
'{impl_parameters}' in templates['parser_prototype']):
|
2014-01-24 10:17:25 -04:00
|
|
|
data.declarations.pop(0)
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
full_name = f.full_name
|
2023-07-26 07:54:03 -03:00
|
|
|
template_dict = {'full_name': full_name}
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-24 10:17:25 -04:00
|
|
|
if new_or_init:
|
2023-07-04 20:07:57 -03:00
|
|
|
assert isinstance(f.cls, Class)
|
2014-01-24 10:17:25 -04:00
|
|
|
name = f.cls.name
|
|
|
|
else:
|
|
|
|
name = f.name
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
template_dict['name'] = name
|
|
|
|
|
2014-01-12 18:12:59 -04:00
|
|
|
if f.c_basename:
|
|
|
|
c_basename = f.c_basename
|
|
|
|
else:
|
|
|
|
fields = full_name.split(".")
|
|
|
|
if fields[-1] == '__new__':
|
|
|
|
fields.pop()
|
|
|
|
c_basename = "_".join(fields)
|
2014-01-24 10:17:25 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
template_dict['c_basename'] = c_basename
|
|
|
|
|
2023-05-20 17:16:49 -03:00
|
|
|
template_dict['methoddef_name'] = c_basename.upper() + "_METHODDEF"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
template_dict['docstring'] = self.docstring_for_c_string(f)
|
|
|
|
|
2014-01-26 00:43:29 -04:00
|
|
|
template_dict['self_name'] = template_dict['self_type'] = template_dict['self_type_check'] = ''
|
2020-05-13 19:31:31 -03:00
|
|
|
for converter in converters:
|
|
|
|
converter.set_template_dict(template_dict)
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
f.return_converter.render(f, data)
|
|
|
|
template_dict['impl_return_type'] = f.return_converter.type
|
|
|
|
|
2016-06-09 10:02:15 -03:00
|
|
|
template_dict['declarations'] = format_escape("\n".join(data.declarations))
|
2013-10-19 04:09:25 -03:00
|
|
|
template_dict['initializers'] = "\n\n".join(data.initializers)
|
2014-01-26 00:43:29 -04:00
|
|
|
template_dict['modifications'] = '\n\n'.join(data.modifications)
|
2022-08-11 18:25:49 -03:00
|
|
|
template_dict['keywords_c'] = ' '.join('"' + k + '",'
|
|
|
|
for k in data.keywords)
|
|
|
|
keywords = [k for k in data.keywords if k]
|
|
|
|
template_dict['keywords_py'] = ' '.join('&_Py_ID(' + k + '),'
|
|
|
|
for k in keywords)
|
2013-10-19 04:09:25 -03:00
|
|
|
template_dict['format_units'] = ''.join(data.format_units)
|
|
|
|
template_dict['parse_arguments'] = ', '.join(data.parse_arguments)
|
2020-05-07 10:39:59 -03:00
|
|
|
if data.parse_arguments:
|
|
|
|
template_dict['parse_arguments_comma'] = ',';
|
|
|
|
else:
|
|
|
|
template_dict['parse_arguments_comma'] = '';
|
2013-10-19 04:09:25 -03:00
|
|
|
template_dict['impl_parameters'] = ", ".join(data.impl_parameters)
|
|
|
|
template_dict['impl_arguments'] = ", ".join(data.impl_arguments)
|
2016-06-09 10:02:15 -03:00
|
|
|
template_dict['return_conversion'] = format_escape("".join(data.return_conversion).rstrip())
|
2022-11-24 10:01:26 -04:00
|
|
|
template_dict['post_parsing'] = format_escape("".join(data.post_parsing).rstrip())
|
2016-06-09 10:02:15 -03:00
|
|
|
template_dict['cleanup'] = format_escape("".join(data.cleanup))
|
2013-10-19 04:09:25 -03:00
|
|
|
template_dict['return_value'] = data.return_value
|
|
|
|
|
2014-01-24 10:17:25 -04:00
|
|
|
# used by unpack tuple code generator
|
|
|
|
unpack_min = first_optional
|
|
|
|
unpack_max = len(selfless)
|
|
|
|
template_dict['unpack_min'] = str(unpack_min)
|
|
|
|
template_dict['unpack_max'] = str(unpack_max)
|
2014-01-19 03:50:21 -04:00
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
if has_option_groups:
|
|
|
|
self.render_option_group_parsing(f, template_dict)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
# buffers, not destination
|
|
|
|
for name, destination in clinic.destination_buffers.items():
|
2014-01-17 21:47:17 -04:00
|
|
|
template = templates[name]
|
|
|
|
if has_option_groups:
|
|
|
|
template = linear_format(template,
|
|
|
|
option_group_parsing=template_dict['option_group_parsing'])
|
2013-10-19 04:09:25 -03:00
|
|
|
template = linear_format(template,
|
2014-01-17 21:47:17 -04:00
|
|
|
declarations=template_dict['declarations'],
|
|
|
|
return_conversion=template_dict['return_conversion'],
|
|
|
|
initializers=template_dict['initializers'],
|
2014-01-26 00:43:29 -04:00
|
|
|
modifications=template_dict['modifications'],
|
2022-11-24 10:01:26 -04:00
|
|
|
post_parsing=template_dict['post_parsing'],
|
2014-01-17 21:47:17 -04:00
|
|
|
cleanup=template_dict['cleanup'],
|
|
|
|
)
|
|
|
|
|
|
|
|
# Only generate the "exit:" label
|
|
|
|
# if we have any gotos
|
|
|
|
need_exit_label = "goto exit;" in template
|
|
|
|
template = linear_format(template,
|
|
|
|
exit_label="exit:" if need_exit_label else ''
|
|
|
|
)
|
|
|
|
|
|
|
|
s = template.format_map(template_dict)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2015-04-14 19:07:59 -03:00
|
|
|
# mild hack:
|
|
|
|
# reflow long impl declarations
|
|
|
|
if name in {"impl_prototype", "impl_definition"}:
|
|
|
|
s = wrap_declarations(s)
|
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
if clinic.line_prefix:
|
|
|
|
s = indent_all_lines(s, clinic.line_prefix)
|
|
|
|
if clinic.line_suffix:
|
|
|
|
s = suffix_all_lines(s, clinic.line_suffix)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
destination.append(s)
|
|
|
|
|
|
|
|
return clinic.get_destination('block').dump()
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
def create_regex(
|
|
|
|
before: str,
|
|
|
|
after: str,
|
|
|
|
word: bool = True,
|
|
|
|
whole_line: bool = True
|
|
|
|
) -> re.Pattern[str]:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""Create an re object for matching marker lines."""
|
2016-09-08 14:59:53 -03:00
|
|
|
group_re = r"\w+" if word else ".+"
|
2014-02-09 02:15:29 -04:00
|
|
|
pattern = r'{}({}){}'
|
|
|
|
if whole_line:
|
|
|
|
pattern = '^' + pattern + '$'
|
2014-01-28 09:00:08 -04:00
|
|
|
pattern = pattern.format(re.escape(before), group_re, re.escape(after))
|
|
|
|
return re.compile(pattern)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-07-11 18:21:14 -03:00
|
|
|
@dc.dataclass(slots=True, repr=False)
|
2013-10-19 04:09:25 -03:00
|
|
|
class Block:
|
|
|
|
r"""
|
|
|
|
Represents a single block of text embedded in
|
|
|
|
another file. If dsl_name is None, the block represents
|
|
|
|
verbatim text, raw original text from the file, in
|
|
|
|
which case "input" will be the only non-false member.
|
|
|
|
If dsl_name is not None, the block represents a Clinic
|
|
|
|
block.
|
|
|
|
|
|
|
|
input is always str, with embedded \n characters.
|
|
|
|
input represents the original text from the file;
|
|
|
|
if it's a Clinic block, it is the original text with
|
|
|
|
the body_prefix and redundant leading whitespace removed.
|
|
|
|
|
|
|
|
dsl_name is either str or None. If str, it's the text
|
|
|
|
found on the start line of the block between the square
|
|
|
|
brackets.
|
|
|
|
|
|
|
|
signatures is either list or None. If it's a list,
|
|
|
|
it may only contain clinic.Module, clinic.Class, and
|
|
|
|
clinic.Function objects. At the moment it should
|
|
|
|
contain at most one of each.
|
|
|
|
|
|
|
|
output is either str or None. If str, it's the output
|
|
|
|
from this block, with embedded '\n' characters.
|
|
|
|
|
|
|
|
indent is either str or None. It's the leading whitespace
|
|
|
|
that was found on every line of input. (If body_prefix is
|
|
|
|
not empty, this is the indent *after* removing the
|
|
|
|
body_prefix.)
|
|
|
|
|
|
|
|
preindent is either str or None. It's the whitespace that
|
|
|
|
was found in front of every line of input *before* the
|
|
|
|
"body_prefix" (see the Language object). If body_prefix
|
|
|
|
is empty, preindent must always be empty too.
|
|
|
|
|
|
|
|
To illustrate indent and preindent: Assume that '_'
|
|
|
|
represents whitespace. If the block processed was in a
|
|
|
|
Python file, and looked like this:
|
|
|
|
____#/*[python]
|
|
|
|
____#__for a in range(20):
|
|
|
|
____#____print(a)
|
|
|
|
____#[python]*/
|
|
|
|
"preindent" would be "____" and "indent" would be "__".
|
|
|
|
|
|
|
|
"""
|
2023-07-11 18:21:14 -03:00
|
|
|
input: str
|
|
|
|
dsl_name: str | None = None
|
|
|
|
signatures: list[Module | Class | Function] = dc.field(default_factory=list)
|
|
|
|
output: Any = None # TODO: Very dynamic; probably untypeable in its current form?
|
|
|
|
indent: str = ''
|
|
|
|
preindent: str = ''
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-11 18:21:14 -03:00
|
|
|
def __repr__(self) -> str:
|
2014-01-28 09:00:08 -04:00
|
|
|
dsl_name = self.dsl_name or "text"
|
2023-07-11 18:21:14 -03:00
|
|
|
def summarize(s: object) -> str:
|
2014-01-28 09:00:08 -04:00
|
|
|
s = repr(s)
|
|
|
|
if len(s) > 30:
|
|
|
|
return s[:26] + "..." + s[0]
|
|
|
|
return s
|
|
|
|
return "".join((
|
|
|
|
"<Block ", dsl_name, " input=", summarize(self.input), " output=", summarize(self.output), ">"))
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
class BlockParser:
|
|
|
|
"""
|
|
|
|
Block-oriented parser for Argument Clinic.
|
|
|
|
Iterator, yields Block objects.
|
|
|
|
"""
|
|
|
|
|
2023-07-15 07:11:32 -03:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
input: str,
|
|
|
|
language: Language,
|
|
|
|
*,
|
|
|
|
verify: bool = True
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
"input" should be a str object
|
|
|
|
with embedded \n characters.
|
|
|
|
|
|
|
|
"language" should be a Language object.
|
|
|
|
"""
|
|
|
|
language.validate()
|
|
|
|
|
|
|
|
self.input = collections.deque(reversed(input.splitlines(keepends=True)))
|
|
|
|
self.block_start_line_number = self.line_number = 0
|
|
|
|
|
|
|
|
self.language = language
|
|
|
|
before, _, after = language.start_line.partition('{dsl_name}')
|
|
|
|
assert _ == '{dsl_name}'
|
2014-02-09 02:15:29 -04:00
|
|
|
self.find_start_re = create_regex(before, after, whole_line=False)
|
2013-10-19 04:09:25 -03:00
|
|
|
self.start_re = create_regex(before, after)
|
|
|
|
self.verify = verify
|
2023-07-15 07:11:32 -03:00
|
|
|
self.last_checksum_re: re.Pattern[str] | None = None
|
|
|
|
self.last_dsl_name: str | None = None
|
|
|
|
self.dsl_name: str | None = None
|
2014-01-17 21:47:17 -04:00
|
|
|
self.first_block = True
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-15 07:11:32 -03:00
|
|
|
def __iter__(self) -> BlockParser:
|
2013-10-19 04:09:25 -03:00
|
|
|
return self
|
|
|
|
|
2023-07-15 07:11:32 -03:00
|
|
|
def __next__(self) -> Block:
|
2014-01-17 21:47:17 -04:00
|
|
|
while True:
|
|
|
|
if not self.input:
|
|
|
|
raise StopIteration
|
|
|
|
|
|
|
|
if self.dsl_name:
|
|
|
|
return_value = self.parse_clinic_block(self.dsl_name)
|
|
|
|
self.dsl_name = None
|
|
|
|
self.first_block = False
|
|
|
|
return return_value
|
|
|
|
block = self.parse_verbatim_block()
|
|
|
|
if self.first_block and not block.input:
|
|
|
|
continue
|
|
|
|
self.first_block = False
|
|
|
|
return block
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-07-15 07:11:32 -03:00
|
|
|
def is_start_line(self, line: str) -> str | None:
|
2013-10-19 04:09:25 -03:00
|
|
|
match = self.start_re.match(line.lstrip())
|
|
|
|
return match.group(1) if match else None
|
|
|
|
|
2023-07-15 07:11:32 -03:00
|
|
|
def _line(self, lookahead: bool = False) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.line_number += 1
|
2014-02-01 02:03:12 -04:00
|
|
|
line = self.input.pop()
|
2014-07-27 11:22:20 -03:00
|
|
|
if not lookahead:
|
|
|
|
self.language.parse_line(line)
|
2014-02-01 02:03:12 -04:00
|
|
|
return line
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-15 07:11:32 -03:00
|
|
|
def parse_verbatim_block(self) -> Block:
|
2013-10-19 04:09:25 -03:00
|
|
|
add, output = text_accumulator()
|
|
|
|
self.block_start_line_number = self.line_number
|
|
|
|
|
|
|
|
while self.input:
|
|
|
|
line = self._line()
|
|
|
|
dsl_name = self.is_start_line(line)
|
|
|
|
if dsl_name:
|
|
|
|
self.dsl_name = dsl_name
|
|
|
|
break
|
|
|
|
add(line)
|
|
|
|
|
|
|
|
return Block(output())
|
|
|
|
|
2023-07-15 07:11:32 -03:00
|
|
|
def parse_clinic_block(self, dsl_name: str) -> Block:
|
2013-10-19 04:09:25 -03:00
|
|
|
input_add, input_output = text_accumulator()
|
|
|
|
self.block_start_line_number = self.line_number + 1
|
2014-01-07 16:21:08 -04:00
|
|
|
stop_line = self.language.stop_line.format(dsl_name=dsl_name)
|
2013-10-19 04:09:25 -03:00
|
|
|
body_prefix = self.language.body_prefix.format(dsl_name=dsl_name)
|
|
|
|
|
2023-07-15 07:11:32 -03:00
|
|
|
def is_stop_line(line: str) -> bool:
|
2014-01-07 16:21:08 -04:00
|
|
|
# make sure to recognize stop line even if it
|
|
|
|
# doesn't end with EOL (it could be the very end of the file)
|
2022-05-10 04:23:42 -03:00
|
|
|
if line.startswith(stop_line):
|
2023-05-20 08:08:28 -03:00
|
|
|
remainder = line.removeprefix(stop_line)
|
2022-05-10 04:23:42 -03:00
|
|
|
if remainder and not remainder.isspace():
|
|
|
|
fail(f"Garbage after stop line: {remainder!r}")
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
# gh-92256: don't allow incorrectly formatted stop lines
|
|
|
|
if line.lstrip().startswith(stop_line):
|
|
|
|
fail(f"Whitespace is not allowed before the stop line: {line!r}")
|
2014-01-07 16:21:08 -04:00
|
|
|
return False
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# consume body of program
|
|
|
|
while self.input:
|
|
|
|
line = self._line()
|
2014-01-07 16:21:08 -04:00
|
|
|
if is_stop_line(line) or self.is_start_line(line):
|
2013-10-19 04:09:25 -03:00
|
|
|
break
|
|
|
|
if body_prefix:
|
|
|
|
line = line.lstrip()
|
|
|
|
assert line.startswith(body_prefix)
|
2023-05-20 08:08:28 -03:00
|
|
|
line = line.removeprefix(body_prefix)
|
2013-10-19 04:09:25 -03:00
|
|
|
input_add(line)
|
|
|
|
|
|
|
|
# consume output and checksum line, if present.
|
|
|
|
if self.last_dsl_name == dsl_name:
|
|
|
|
checksum_re = self.last_checksum_re
|
|
|
|
else:
|
2014-01-28 09:00:08 -04:00
|
|
|
before, _, after = self.language.checksum_line.format(dsl_name=dsl_name, arguments='{arguments}').partition('{arguments}')
|
|
|
|
assert _ == '{arguments}'
|
|
|
|
checksum_re = create_regex(before, after, word=False)
|
2013-10-19 04:09:25 -03:00
|
|
|
self.last_dsl_name = dsl_name
|
|
|
|
self.last_checksum_re = checksum_re
|
2023-07-15 07:11:32 -03:00
|
|
|
assert checksum_re is not None
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# scan forward for checksum line
|
|
|
|
output_add, output_output = text_accumulator()
|
2014-01-28 09:00:08 -04:00
|
|
|
arguments = None
|
2013-10-19 04:09:25 -03:00
|
|
|
while self.input:
|
2014-07-27 11:22:20 -03:00
|
|
|
line = self._line(lookahead=True)
|
2013-10-19 04:09:25 -03:00
|
|
|
match = checksum_re.match(line.lstrip())
|
2014-01-28 09:00:08 -04:00
|
|
|
arguments = match.group(1) if match else None
|
|
|
|
if arguments:
|
2013-10-19 04:09:25 -03:00
|
|
|
break
|
|
|
|
output_add(line)
|
|
|
|
if self.is_start_line(line):
|
|
|
|
break
|
|
|
|
|
2023-07-15 07:11:32 -03:00
|
|
|
output: str | None
|
2013-10-23 03:26:23 -03:00
|
|
|
output = output_output()
|
2014-01-28 09:00:08 -04:00
|
|
|
if arguments:
|
|
|
|
d = {}
|
|
|
|
for field in shlex.split(arguments):
|
|
|
|
name, equals, value = field.partition('=')
|
|
|
|
if not equals:
|
2023-05-20 17:16:49 -03:00
|
|
|
fail("Mangled Argument Clinic marker line:", repr(line))
|
2014-01-28 09:00:08 -04:00
|
|
|
d[name.strip()] = value.strip()
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
if self.verify:
|
2014-01-28 09:00:08 -04:00
|
|
|
if 'input' in d:
|
|
|
|
checksum = d['output']
|
|
|
|
else:
|
|
|
|
checksum = d['checksum']
|
|
|
|
|
|
|
|
computed = compute_checksum(output, len(checksum))
|
2013-10-19 04:09:25 -03:00
|
|
|
if checksum != computed:
|
2014-01-14 15:52:01 -04:00
|
|
|
fail("Checksum mismatch!\nExpected: {}\nComputed: {}\n"
|
|
|
|
"Suggested fix: remove all generated code including "
|
2014-01-17 21:47:17 -04:00
|
|
|
"the end marker,\n"
|
|
|
|
"or use the '-f' option."
|
2014-01-14 15:52:01 -04:00
|
|
|
.format(checksum, computed))
|
2013-10-19 04:09:25 -03:00
|
|
|
else:
|
|
|
|
# put back output
|
2014-01-06 15:10:08 -04:00
|
|
|
output_lines = output.splitlines(keepends=True)
|
|
|
|
self.line_number -= len(output_lines)
|
|
|
|
self.input.extend(reversed(output_lines))
|
2013-10-19 04:09:25 -03:00
|
|
|
output = None
|
|
|
|
|
|
|
|
return Block(input_output(), dsl_name, output=output)
|
|
|
|
|
|
|
|
|
2023-07-06 18:17:12 -03:00
|
|
|
@dc.dataclass(slots=True)
|
2013-10-19 04:09:25 -03:00
|
|
|
class BlockPrinter:
|
2023-07-06 18:17:12 -03:00
|
|
|
language: Language
|
|
|
|
f: io.StringIO = dc.field(default_factory=io.StringIO)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-17 08:47:08 -03:00
|
|
|
def print_block(
|
|
|
|
self,
|
|
|
|
block: Block,
|
|
|
|
*,
|
|
|
|
core_includes: bool = False
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
input = block.input
|
|
|
|
output = block.output
|
|
|
|
dsl_name = block.dsl_name
|
|
|
|
write = self.f.write
|
|
|
|
|
2021-08-31 10:59:52 -03:00
|
|
|
assert not ((dsl_name is None) ^ (output is None)), "you must specify dsl_name and output together, dsl_name " + repr(dsl_name)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
if not dsl_name:
|
|
|
|
write(input)
|
|
|
|
return
|
|
|
|
|
|
|
|
write(self.language.start_line.format(dsl_name=dsl_name))
|
|
|
|
write("\n")
|
|
|
|
|
|
|
|
body_prefix = self.language.body_prefix.format(dsl_name=dsl_name)
|
|
|
|
if not body_prefix:
|
|
|
|
write(input)
|
|
|
|
else:
|
|
|
|
for line in input.split('\n'):
|
|
|
|
write(body_prefix)
|
|
|
|
write(line)
|
|
|
|
write("\n")
|
|
|
|
|
|
|
|
write(self.language.stop_line.format(dsl_name=dsl_name))
|
|
|
|
write("\n")
|
|
|
|
|
2022-08-11 18:25:49 -03:00
|
|
|
output = ''
|
|
|
|
if core_includes:
|
|
|
|
output += textwrap.dedent("""
|
|
|
|
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
|
|
|
|
# include "pycore_gc.h" // PyGC_Head
|
|
|
|
# include "pycore_runtime.h" // _Py_ID()
|
|
|
|
#endif
|
|
|
|
|
|
|
|
""")
|
|
|
|
|
2014-01-28 09:00:08 -04:00
|
|
|
input = ''.join(block.input)
|
2022-08-11 18:25:49 -03:00
|
|
|
output += ''.join(block.output)
|
2013-10-19 04:09:25 -03:00
|
|
|
if output:
|
|
|
|
if not output.endswith('\n'):
|
2014-01-17 21:47:17 -04:00
|
|
|
output += '\n'
|
|
|
|
write(output)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-20 17:16:49 -03:00
|
|
|
arguments = "output={output} input={input}".format(
|
|
|
|
output=compute_checksum(output, 16),
|
|
|
|
input=compute_checksum(input, 16)
|
|
|
|
)
|
2014-01-28 09:00:08 -04:00
|
|
|
write(self.language.checksum_line.format(dsl_name=dsl_name, arguments=arguments))
|
2013-10-19 04:09:25 -03:00
|
|
|
write("\n")
|
|
|
|
|
2023-07-17 08:47:08 -03:00
|
|
|
def write(self, text: str) -> None:
|
2014-01-17 21:47:17 -04:00
|
|
|
self.f.write(text)
|
|
|
|
|
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
class BufferSeries:
|
|
|
|
"""
|
|
|
|
Behaves like a "defaultlist".
|
|
|
|
When you ask for an index that doesn't exist yet,
|
|
|
|
the object grows the list until that item exists.
|
|
|
|
So o[n] will always work.
|
|
|
|
|
|
|
|
Supports negative indices for actual items.
|
|
|
|
e.g. o[-1] is an element immediately preceding o[0].
|
|
|
|
"""
|
|
|
|
|
2023-07-22 07:46:42 -03:00
|
|
|
def __init__(self) -> None:
|
2015-04-03 17:09:02 -03:00
|
|
|
self._start = 0
|
2023-07-22 07:46:42 -03:00
|
|
|
self._array: list[_TextAccumulator] = []
|
2015-04-03 17:09:02 -03:00
|
|
|
self._constructor = _text_accumulator
|
|
|
|
|
2023-07-22 07:46:42 -03:00
|
|
|
def __getitem__(self, i: int) -> _TextAccumulator:
|
2015-04-03 17:09:02 -03:00
|
|
|
i -= self._start
|
|
|
|
if i < 0:
|
|
|
|
self._start += i
|
|
|
|
prefix = [self._constructor() for x in range(-i)]
|
|
|
|
self._array = prefix + self._array
|
|
|
|
i = 0
|
|
|
|
while i >= len(self._array):
|
|
|
|
self._array.append(self._constructor())
|
|
|
|
return self._array[i]
|
|
|
|
|
2023-07-22 07:46:42 -03:00
|
|
|
def clear(self) -> None:
|
2015-04-03 17:09:02 -03:00
|
|
|
for ta in self._array:
|
2023-07-22 04:43:13 -03:00
|
|
|
ta.text.clear()
|
2015-04-03 17:09:02 -03:00
|
|
|
|
2023-07-22 07:46:42 -03:00
|
|
|
def dump(self) -> str:
|
2015-04-03 17:09:02 -03:00
|
|
|
texts = [ta.output() for ta in self._array]
|
|
|
|
return "".join(texts)
|
|
|
|
|
|
|
|
|
2023-07-12 18:33:47 -03:00
|
|
|
@dc.dataclass(slots=True, repr=False)
|
2014-01-17 21:47:17 -04:00
|
|
|
class Destination:
|
2023-07-12 18:33:47 -03:00
|
|
|
name: str
|
|
|
|
type: str
|
|
|
|
clinic: Clinic
|
|
|
|
buffers: BufferSeries = dc.field(init=False, default_factory=BufferSeries)
|
|
|
|
filename: str = dc.field(init=False) # set in __post_init__
|
|
|
|
|
|
|
|
args: dc.InitVar[tuple[str, ...]] = ()
|
2023-07-11 20:08:28 -03:00
|
|
|
|
2023-07-12 18:33:47 -03:00
|
|
|
def __post_init__(self, args: tuple[str, ...]) -> None:
|
2015-04-03 17:09:02 -03:00
|
|
|
valid_types = ('buffer', 'file', 'suppress')
|
2023-07-12 18:33:47 -03:00
|
|
|
if self.type not in valid_types:
|
2023-07-11 20:08:28 -03:00
|
|
|
fail(
|
2023-07-12 18:33:47 -03:00
|
|
|
f"Invalid destination type {self.type!r} for {self.name}, "
|
2023-07-11 20:08:28 -03:00
|
|
|
f"must be {', '.join(valid_types)}"
|
|
|
|
)
|
2023-07-12 18:33:47 -03:00
|
|
|
extra_arguments = 1 if self.type == "file" else 0
|
2014-01-17 21:47:17 -04:00
|
|
|
if len(args) < extra_arguments:
|
2023-07-12 18:33:47 -03:00
|
|
|
fail(f"Not enough arguments for destination {self.name} new {self.type}")
|
2014-01-17 21:47:17 -04:00
|
|
|
if len(args) > extra_arguments:
|
2023-07-12 18:33:47 -03:00
|
|
|
fail(f"Too many arguments for destination {self.name} new {self.type}")
|
|
|
|
if self.type =='file':
|
2014-01-17 21:47:17 -04:00
|
|
|
d = {}
|
2023-07-12 18:33:47 -03:00
|
|
|
filename = self.clinic.filename
|
2014-01-26 00:43:29 -04:00
|
|
|
d['path'] = filename
|
|
|
|
dirname, basename = os.path.split(filename)
|
|
|
|
if not dirname:
|
|
|
|
dirname = '.'
|
|
|
|
d['dirname'] = dirname
|
|
|
|
d['basename'] = basename
|
|
|
|
d['basename_root'], d['basename_extension'] = os.path.splitext(filename)
|
2014-01-17 21:47:17 -04:00
|
|
|
self.filename = args[0].format_map(d)
|
|
|
|
|
2023-07-12 18:33:47 -03:00
|
|
|
def __repr__(self) -> str:
|
2014-01-17 21:47:17 -04:00
|
|
|
if self.type == 'file':
|
|
|
|
file_repr = " " + repr(self.filename)
|
|
|
|
else:
|
|
|
|
file_repr = ''
|
|
|
|
return "".join(("<Destination ", self.name, " ", self.type, file_repr, ">"))
|
|
|
|
|
2023-07-12 18:33:47 -03:00
|
|
|
def clear(self) -> None:
|
2014-01-17 21:47:17 -04:00
|
|
|
if self.type != 'buffer':
|
|
|
|
fail("Can't clear destination" + self.name + " , it's not of type buffer")
|
2015-04-03 17:09:02 -03:00
|
|
|
self.buffers.clear()
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2023-07-12 18:33:47 -03:00
|
|
|
def dump(self) -> str:
|
2015-04-03 17:09:02 -03:00
|
|
|
return self.buffers.dump()
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# maps strings to Language objects.
|
|
|
|
# "languages" maps the name of the language ("C", "Python").
|
|
|
|
# "extensions" maps the file extension ("c", "py").
|
2023-05-15 05:49:28 -03:00
|
|
|
LangDict = dict[str, Callable[[str], Language]]
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
languages = { 'C': CLanguage, 'Python': PythonLanguage }
|
2023-05-15 05:49:28 -03:00
|
|
|
extensions: LangDict = { name: CLanguage for name in "c cc cpp cxx h hh hpp hxx".split() }
|
2014-01-05 06:50:45 -04:00
|
|
|
extensions['py'] = PythonLanguage
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-07-05 08:23:22 -03:00
|
|
|
def write_file(filename: str, new_contents: str) -> None:
|
2020-11-18 10:36:27 -04:00
|
|
|
try:
|
2023-07-05 08:23:22 -03:00
|
|
|
with open(filename, 'r', encoding="utf-8") as fp:
|
2020-11-18 10:36:27 -04:00
|
|
|
old_contents = fp.read()
|
|
|
|
|
2023-07-05 08:23:22 -03:00
|
|
|
if old_contents == new_contents:
|
|
|
|
# no change: avoid modifying the file modification time
|
|
|
|
return
|
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
2020-11-18 10:36:27 -04:00
|
|
|
# Atomic write using a temporary file and os.replace()
|
|
|
|
filename_new = f"{filename}.new"
|
|
|
|
with open(filename_new, "w", encoding="utf-8") as fp:
|
|
|
|
fp.write(new_contents)
|
|
|
|
try:
|
|
|
|
os.replace(filename_new, filename)
|
|
|
|
except:
|
|
|
|
os.unlink(filename_new)
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
2023-05-21 17:49:34 -03:00
|
|
|
ClassDict = dict[str, "Class"]
|
|
|
|
DestinationDict = dict[str, Destination]
|
|
|
|
ModuleDict = dict[str, "Module"]
|
2023-07-16 21:04:10 -03:00
|
|
|
|
|
|
|
|
|
|
|
class Parser(Protocol):
|
|
|
|
def __init__(self, clinic: Clinic) -> None: ...
|
|
|
|
def parse(self, block: Block) -> None: ...
|
|
|
|
|
2023-05-21 17:49:34 -03:00
|
|
|
|
2014-02-01 02:03:12 -04:00
|
|
|
clinic = None
|
2013-10-19 04:09:25 -03:00
|
|
|
class Clinic:
|
2014-01-17 21:47:17 -04:00
|
|
|
|
|
|
|
presets_text = """
|
2014-02-01 02:03:12 -04:00
|
|
|
preset block
|
|
|
|
everything block
|
2015-04-03 17:09:02 -03:00
|
|
|
methoddef_ifndef buffer 1
|
2014-02-01 02:03:12 -04:00
|
|
|
docstring_prototype suppress
|
|
|
|
parser_prototype suppress
|
|
|
|
cpp_if suppress
|
|
|
|
cpp_endif suppress
|
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
preset original
|
|
|
|
everything block
|
2015-04-03 17:09:02 -03:00
|
|
|
methoddef_ifndef buffer 1
|
2014-01-17 21:47:17 -04:00
|
|
|
docstring_prototype suppress
|
|
|
|
parser_prototype suppress
|
2014-02-01 02:03:12 -04:00
|
|
|
cpp_if suppress
|
|
|
|
cpp_endif suppress
|
2014-01-17 21:47:17 -04:00
|
|
|
|
|
|
|
preset file
|
|
|
|
everything file
|
2015-04-03 17:09:02 -03:00
|
|
|
methoddef_ifndef file 1
|
2014-01-17 21:47:17 -04:00
|
|
|
docstring_prototype suppress
|
|
|
|
parser_prototype suppress
|
|
|
|
impl_definition block
|
|
|
|
|
|
|
|
preset buffer
|
|
|
|
everything buffer
|
2015-04-03 17:09:02 -03:00
|
|
|
methoddef_ifndef buffer 1
|
|
|
|
impl_definition block
|
2014-01-17 21:47:17 -04:00
|
|
|
docstring_prototype suppress
|
|
|
|
impl_prototype suppress
|
|
|
|
parser_prototype suppress
|
|
|
|
|
|
|
|
preset partial-buffer
|
|
|
|
everything buffer
|
2015-04-03 17:09:02 -03:00
|
|
|
methoddef_ifndef buffer 1
|
2014-01-17 21:47:17 -04:00
|
|
|
docstring_prototype block
|
|
|
|
impl_prototype suppress
|
|
|
|
methoddef_define block
|
|
|
|
parser_prototype block
|
|
|
|
impl_definition block
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
2023-05-21 17:49:34 -03:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
language: CLanguage,
|
|
|
|
printer: BlockPrinter | None = None,
|
|
|
|
*,
|
2023-07-29 15:46:52 -03:00
|
|
|
filename: str,
|
2023-05-21 17:49:34 -03:00
|
|
|
verify: bool = True,
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
# maps strings to Parser objects.
|
|
|
|
# (instantiated from the "parsers" global.)
|
2023-07-16 21:04:10 -03:00
|
|
|
self.parsers: dict[str, Parser] = {}
|
2023-05-21 17:49:34 -03:00
|
|
|
self.language: CLanguage = language
|
2014-01-17 21:47:17 -04:00
|
|
|
if printer:
|
|
|
|
fail("Custom printers are broken right now")
|
2013-10-19 04:09:25 -03:00
|
|
|
self.printer = printer or BlockPrinter(language)
|
|
|
|
self.verify = verify
|
|
|
|
self.filename = filename
|
2023-05-21 17:49:34 -03:00
|
|
|
self.modules: ModuleDict = {}
|
|
|
|
self.classes: ClassDict = {}
|
|
|
|
self.functions: list[Function] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-17 21:47:17 -04:00
|
|
|
self.line_prefix = self.line_suffix = ''
|
|
|
|
|
2023-05-21 17:49:34 -03:00
|
|
|
self.destinations: DestinationDict = {}
|
2014-01-17 21:47:17 -04:00
|
|
|
self.add_destination("block", "buffer")
|
|
|
|
self.add_destination("suppress", "suppress")
|
|
|
|
self.add_destination("buffer", "buffer")
|
|
|
|
if filename:
|
2014-01-26 00:43:29 -04:00
|
|
|
self.add_destination("file", "file", "{dirname}/clinic/{basename}.h")
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
d = self.get_destination_buffer
|
2023-05-20 07:24:00 -03:00
|
|
|
self.destination_buffers = {
|
|
|
|
'cpp_if': d('file'),
|
|
|
|
'docstring_prototype': d('suppress'),
|
|
|
|
'docstring_definition': d('file'),
|
|
|
|
'methoddef_define': d('file'),
|
|
|
|
'impl_prototype': d('file'),
|
|
|
|
'parser_prototype': d('suppress'),
|
|
|
|
'parser_definition': d('file'),
|
|
|
|
'cpp_endif': d('file'),
|
|
|
|
'methoddef_ifndef': d('file', 1),
|
|
|
|
'impl_definition': d('block'),
|
|
|
|
}
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2023-07-26 17:11:15 -03:00
|
|
|
DestBufferType = dict[str, _TextAccumulator]
|
2023-05-21 17:49:34 -03:00
|
|
|
DestBufferList = list[DestBufferType]
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2023-05-21 17:49:34 -03:00
|
|
|
self.destination_buffers_stack: DestBufferList = []
|
|
|
|
self.ifndef_symbols: set[str] = set()
|
|
|
|
|
|
|
|
self.presets: dict[str, dict[Any, Any]] = {}
|
2014-01-17 21:47:17 -04:00
|
|
|
preset = None
|
|
|
|
for line in self.presets_text.strip().split('\n'):
|
|
|
|
line = line.strip()
|
|
|
|
if not line:
|
|
|
|
continue
|
2015-04-03 17:09:02 -03:00
|
|
|
name, value, *options = line.split()
|
2014-01-17 21:47:17 -04:00
|
|
|
if name == 'preset':
|
2023-05-20 07:24:00 -03:00
|
|
|
self.presets[value] = preset = {}
|
2014-01-17 21:47:17 -04:00
|
|
|
continue
|
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
if len(options):
|
|
|
|
index = int(options[0])
|
|
|
|
else:
|
|
|
|
index = 0
|
|
|
|
buffer = self.get_destination_buffer(value, index)
|
2014-01-17 21:47:17 -04:00
|
|
|
|
|
|
|
if name == 'everything':
|
2015-04-03 17:09:02 -03:00
|
|
|
for name in self.destination_buffers:
|
|
|
|
preset[name] = buffer
|
2014-01-17 21:47:17 -04:00
|
|
|
continue
|
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
assert name in self.destination_buffers
|
|
|
|
preset[name] = buffer
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
global clinic
|
|
|
|
clinic = self
|
|
|
|
|
2023-05-21 17:49:34 -03:00
|
|
|
def add_destination(
|
|
|
|
self,
|
|
|
|
name: str,
|
|
|
|
type: str,
|
2023-07-12 18:33:47 -03:00
|
|
|
*args: str
|
2023-05-21 17:49:34 -03:00
|
|
|
) -> None:
|
2015-04-03 17:09:02 -03:00
|
|
|
if name in self.destinations:
|
|
|
|
fail("Destination already exists: " + repr(name))
|
2023-07-12 18:33:47 -03:00
|
|
|
self.destinations[name] = Destination(name, type, self, args)
|
2015-04-03 17:09:02 -03:00
|
|
|
|
2023-05-21 17:49:34 -03:00
|
|
|
def get_destination(self, name: str) -> Destination:
|
2014-01-17 21:47:17 -04:00
|
|
|
d = self.destinations.get(name)
|
|
|
|
if not d:
|
|
|
|
fail("Destination does not exist: " + repr(name))
|
|
|
|
return d
|
|
|
|
|
2023-05-21 17:49:34 -03:00
|
|
|
def get_destination_buffer(
|
|
|
|
self,
|
|
|
|
name: str,
|
|
|
|
item: int = 0
|
2023-07-26 17:11:15 -03:00
|
|
|
) -> _TextAccumulator:
|
2015-04-03 17:09:02 -03:00
|
|
|
d = self.get_destination(name)
|
|
|
|
return d.buffers[item]
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2023-07-16 21:04:10 -03:00
|
|
|
def parse(self, input: str) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
printer = self.printer
|
|
|
|
self.block_parser = BlockParser(input, self.language, verify=self.verify)
|
|
|
|
for block in self.block_parser:
|
|
|
|
dsl_name = block.dsl_name
|
|
|
|
if dsl_name:
|
|
|
|
if dsl_name not in self.parsers:
|
2023-05-20 17:16:49 -03:00
|
|
|
assert dsl_name in parsers, f"No parser to handle {dsl_name!r} block."
|
2013-10-19 04:09:25 -03:00
|
|
|
self.parsers[dsl_name] = parsers[dsl_name](self)
|
|
|
|
parser = self.parsers[dsl_name]
|
2023-08-02 21:00:06 -03:00
|
|
|
parser.parse(block)
|
2013-10-19 04:09:25 -03:00
|
|
|
printer.print_block(block)
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
# these are destinations not buffers
|
2014-01-17 21:47:17 -04:00
|
|
|
for name, destination in self.destinations.items():
|
|
|
|
if destination.type == 'suppress':
|
|
|
|
continue
|
2015-04-03 17:09:02 -03:00
|
|
|
output = destination.dump()
|
2014-01-17 21:47:17 -04:00
|
|
|
|
|
|
|
if output:
|
|
|
|
block = Block("", dsl_name="clinic", output=output)
|
|
|
|
|
|
|
|
if destination.type == 'buffer':
|
|
|
|
block.input = "dump " + name + "\n"
|
|
|
|
warn("Destination buffer " + repr(name) + " not empty at end of file, emptying.")
|
|
|
|
printer.write("\n")
|
|
|
|
printer.print_block(block)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if destination.type == 'file':
|
|
|
|
try:
|
2014-01-26 00:43:29 -04:00
|
|
|
dirname = os.path.dirname(destination.filename)
|
|
|
|
try:
|
|
|
|
os.makedirs(dirname)
|
|
|
|
except FileExistsError:
|
|
|
|
if not os.path.isdir(dirname):
|
|
|
|
fail("Can't write to destination {}, "
|
|
|
|
"can't make directory {}!".format(
|
|
|
|
destination.filename, dirname))
|
2014-01-28 09:00:08 -04:00
|
|
|
if self.verify:
|
2023-05-20 17:16:49 -03:00
|
|
|
with open(destination.filename) as f:
|
2014-01-28 09:00:08 -04:00
|
|
|
parser_2 = BlockParser(f.read(), language=self.language)
|
|
|
|
blocks = list(parser_2)
|
|
|
|
if (len(blocks) != 1) or (blocks[0].input != 'preserve\n'):
|
|
|
|
fail("Modified destination file " + repr(destination.filename) + ", not overwriting!")
|
2014-01-17 21:47:17 -04:00
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
block.input = 'preserve\n'
|
|
|
|
printer_2 = BlockPrinter(self.language)
|
2022-08-11 18:25:49 -03:00
|
|
|
printer_2.print_block(block, core_includes=True)
|
2023-07-05 08:23:22 -03:00
|
|
|
write_file(destination.filename, printer_2.f.getvalue())
|
2014-01-17 21:47:17 -04:00
|
|
|
continue
|
2023-05-03 21:11:29 -03:00
|
|
|
|
2023-07-05 08:23:22 -03:00
|
|
|
return printer.f.getvalue()
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2023-07-28 17:27:21 -03:00
|
|
|
def _module_and_class(
|
|
|
|
self, fields: Iterable[str]
|
|
|
|
) -> tuple[Module | Clinic, Class | None]:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
fields should be an iterable of field names.
|
|
|
|
returns a tuple of (module, class).
|
|
|
|
the module object could actually be self (a clinic object).
|
|
|
|
this function is only ever used to find the parent of where
|
|
|
|
a new class/module should go.
|
|
|
|
"""
|
2023-07-28 17:27:21 -03:00
|
|
|
parent: Clinic | Module | Class
|
|
|
|
child: Module | Class | None
|
|
|
|
module: Clinic | Module
|
|
|
|
cls: Class | None = None
|
|
|
|
so_far: list[str] = []
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
parent = module = self
|
|
|
|
|
|
|
|
for field in fields:
|
|
|
|
so_far.append(field)
|
2023-07-28 17:27:21 -03:00
|
|
|
if not isinstance(parent, Class):
|
2013-10-19 04:09:25 -03:00
|
|
|
child = parent.modules.get(field)
|
|
|
|
if child:
|
2013-11-18 13:32:13 -04:00
|
|
|
parent = module = child
|
2013-10-19 04:09:25 -03:00
|
|
|
continue
|
|
|
|
if not hasattr(parent, 'classes'):
|
|
|
|
return module, cls
|
|
|
|
child = parent.classes.get(field)
|
|
|
|
if not child:
|
|
|
|
fail('Parent class or module ' + '.'.join(so_far) + " does not exist.")
|
|
|
|
cls = parent = child
|
|
|
|
|
|
|
|
return module, cls
|
|
|
|
|
|
|
|
|
2023-05-18 19:57:26 -03:00
|
|
|
def parse_file(
|
|
|
|
filename: str,
|
|
|
|
*,
|
|
|
|
verify: bool = True,
|
|
|
|
output: str | None = None
|
|
|
|
) -> None:
|
2020-11-18 10:36:27 -04:00
|
|
|
if not output:
|
|
|
|
output = filename
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
extension = os.path.splitext(filename)[1][1:]
|
|
|
|
if not extension:
|
|
|
|
fail("Can't extract file type for file " + repr(filename))
|
|
|
|
|
|
|
|
try:
|
2014-02-01 02:03:12 -04:00
|
|
|
language = extensions[extension](filename)
|
2013-10-19 04:09:25 -03:00
|
|
|
except KeyError:
|
|
|
|
fail("Can't identify file type for file " + repr(filename))
|
|
|
|
|
2023-05-20 17:16:49 -03:00
|
|
|
with open(filename, encoding="utf-8") as f:
|
2013-11-23 18:58:45 -04:00
|
|
|
raw = f.read()
|
|
|
|
|
2014-02-09 02:15:29 -04:00
|
|
|
# exit quickly if there are no clinic markers in the file
|
|
|
|
find_start_re = BlockParser("", language).find_start_re
|
|
|
|
if not find_start_re.search(raw):
|
|
|
|
return
|
|
|
|
|
2023-05-21 17:49:34 -03:00
|
|
|
assert isinstance(language, CLanguage)
|
2020-11-18 10:36:27 -04:00
|
|
|
clinic = Clinic(language, verify=verify, filename=filename)
|
2023-07-05 08:23:22 -03:00
|
|
|
cooked = clinic.parse(raw)
|
|
|
|
|
|
|
|
write_file(output, cooked)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-05-18 19:57:26 -03:00
|
|
|
def compute_checksum(
|
|
|
|
input: str | None,
|
|
|
|
length: int | None = None
|
|
|
|
) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
input = input or ''
|
2014-01-28 09:00:08 -04:00
|
|
|
s = hashlib.sha1(input.encode('utf-8')).hexdigest()
|
|
|
|
if length:
|
|
|
|
s = s[:length]
|
|
|
|
return s
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
|
|
|
class PythonParser:
|
2023-05-18 19:57:26 -03:00
|
|
|
def __init__(self, clinic: Clinic) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
pass
|
|
|
|
|
2023-05-18 19:57:26 -03:00
|
|
|
def parse(self, block: Block) -> None:
|
2023-07-06 10:23:02 -03:00
|
|
|
with contextlib.redirect_stdout(io.StringIO()) as s:
|
2013-10-19 04:09:25 -03:00
|
|
|
exec(block.input)
|
2023-07-06 10:23:02 -03:00
|
|
|
block.output = s.getvalue()
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-07-06 18:17:12 -03:00
|
|
|
@dc.dataclass(repr=False)
|
2013-10-19 04:09:25 -03:00
|
|
|
class Module:
|
2023-07-06 18:17:12 -03:00
|
|
|
name: str
|
2023-07-28 17:27:21 -03:00
|
|
|
module: Module | Clinic
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-06 18:17:12 -03:00
|
|
|
def __post_init__(self) -> None:
|
|
|
|
self.parent = self.module
|
2023-05-20 07:24:00 -03:00
|
|
|
self.modules: ModuleDict = {}
|
|
|
|
self.classes: ClassDict = {}
|
2023-05-18 19:57:26 -03:00
|
|
|
self.functions: list[Function] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-18 19:57:26 -03:00
|
|
|
def __repr__(self) -> str:
|
2013-11-18 13:32:13 -04:00
|
|
|
return "<clinic.Module " + repr(self.name) + " at " + str(id(self)) + ">"
|
|
|
|
|
2023-05-18 19:57:26 -03:00
|
|
|
|
2023-07-06 18:17:12 -03:00
|
|
|
@dc.dataclass(repr=False)
|
2013-10-19 04:09:25 -03:00
|
|
|
class Class:
|
2023-07-06 18:17:12 -03:00
|
|
|
name: str
|
2023-07-28 17:27:21 -03:00
|
|
|
module: Module | Clinic
|
2023-07-26 07:54:03 -03:00
|
|
|
cls: Class | None
|
|
|
|
typedef: str
|
|
|
|
type_object: str
|
2023-07-06 18:17:12 -03:00
|
|
|
|
|
|
|
def __post_init__(self) -> None:
|
|
|
|
self.parent = self.cls or self.module
|
2023-05-20 07:24:00 -03:00
|
|
|
self.classes: ClassDict = {}
|
2023-05-18 19:57:26 -03:00
|
|
|
self.functions: list[Function] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-18 19:57:26 -03:00
|
|
|
def __repr__(self) -> str:
|
2013-11-18 13:32:13 -04:00
|
|
|
return "<clinic.Class " + repr(self.name) + " at " + str(id(self)) + ">"
|
|
|
|
|
2023-05-18 19:57:26 -03:00
|
|
|
|
|
|
|
unsupported_special_methods: set[str] = set("""
|
2014-01-12 18:12:59 -04:00
|
|
|
|
|
|
|
__abs__
|
|
|
|
__add__
|
|
|
|
__and__
|
|
|
|
__call__
|
|
|
|
__delitem__
|
|
|
|
__divmod__
|
|
|
|
__eq__
|
|
|
|
__float__
|
|
|
|
__floordiv__
|
|
|
|
__ge__
|
|
|
|
__getattr__
|
|
|
|
__getattribute__
|
|
|
|
__getitem__
|
|
|
|
__gt__
|
|
|
|
__hash__
|
|
|
|
__iadd__
|
|
|
|
__iand__
|
|
|
|
__ifloordiv__
|
|
|
|
__ilshift__
|
2015-03-12 17:01:30 -03:00
|
|
|
__imatmul__
|
2014-01-12 18:12:59 -04:00
|
|
|
__imod__
|
|
|
|
__imul__
|
|
|
|
__index__
|
|
|
|
__int__
|
|
|
|
__invert__
|
|
|
|
__ior__
|
|
|
|
__ipow__
|
|
|
|
__irshift__
|
|
|
|
__isub__
|
|
|
|
__iter__
|
|
|
|
__itruediv__
|
|
|
|
__ixor__
|
|
|
|
__le__
|
|
|
|
__len__
|
|
|
|
__lshift__
|
|
|
|
__lt__
|
2015-03-12 17:01:30 -03:00
|
|
|
__matmul__
|
2014-01-12 18:12:59 -04:00
|
|
|
__mod__
|
|
|
|
__mul__
|
|
|
|
__neg__
|
|
|
|
__next__
|
|
|
|
__or__
|
|
|
|
__pos__
|
|
|
|
__pow__
|
|
|
|
__radd__
|
|
|
|
__rand__
|
|
|
|
__rdivmod__
|
|
|
|
__repr__
|
|
|
|
__rfloordiv__
|
|
|
|
__rlshift__
|
2015-03-12 17:01:30 -03:00
|
|
|
__rmatmul__
|
2014-01-12 18:12:59 -04:00
|
|
|
__rmod__
|
|
|
|
__rmul__
|
|
|
|
__ror__
|
|
|
|
__rpow__
|
|
|
|
__rrshift__
|
|
|
|
__rshift__
|
|
|
|
__rsub__
|
|
|
|
__rtruediv__
|
|
|
|
__rxor__
|
|
|
|
__setattr__
|
|
|
|
__setitem__
|
|
|
|
__str__
|
|
|
|
__sub__
|
|
|
|
__truediv__
|
|
|
|
__xor__
|
2013-11-18 13:32:13 -04:00
|
|
|
|
2014-01-12 18:12:59 -04:00
|
|
|
""".strip().split())
|
|
|
|
|
|
|
|
|
2023-07-13 19:54:05 -03:00
|
|
|
class FunctionKind(enum.Enum):
|
|
|
|
INVALID = enum.auto()
|
|
|
|
CALLABLE = enum.auto()
|
|
|
|
STATIC_METHOD = enum.auto()
|
|
|
|
CLASS_METHOD = enum.auto()
|
|
|
|
METHOD_INIT = enum.auto()
|
|
|
|
METHOD_NEW = enum.auto()
|
|
|
|
|
|
|
|
@functools.cached_property
|
|
|
|
def new_or_init(self) -> bool:
|
|
|
|
return self in {FunctionKind.METHOD_INIT, FunctionKind.METHOD_NEW}
|
|
|
|
|
|
|
|
def __repr__(self) -> str:
|
|
|
|
return f"<FunctionKind.{self.name}>"
|
|
|
|
|
|
|
|
|
|
|
|
INVALID: Final = FunctionKind.INVALID
|
|
|
|
CALLABLE: Final = FunctionKind.CALLABLE
|
|
|
|
STATIC_METHOD: Final = FunctionKind.STATIC_METHOD
|
|
|
|
CLASS_METHOD: Final = FunctionKind.CLASS_METHOD
|
|
|
|
METHOD_INIT: Final = FunctionKind.METHOD_INIT
|
|
|
|
METHOD_NEW: Final = FunctionKind.METHOD_NEW
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 18:24:26 -03:00
|
|
|
ParamDict = dict[str, "Parameter"]
|
|
|
|
ReturnConverterType = Callable[..., "CReturnConverter"]
|
|
|
|
|
2023-07-07 10:10:07 -03:00
|
|
|
|
|
|
|
@dc.dataclass(repr=False)
|
2013-10-19 04:09:25 -03:00
|
|
|
class Function:
|
|
|
|
"""
|
|
|
|
Mutable duck type for inspect.Function.
|
|
|
|
|
|
|
|
docstring - a str containing
|
|
|
|
* embedded line breaks
|
|
|
|
* text outdented to the left margin
|
|
|
|
* no trailing whitespace.
|
|
|
|
It will always be true that
|
|
|
|
(not docstring) or ((not docstring[0].isspace()) and (docstring.rstrip() == docstring))
|
|
|
|
"""
|
2023-07-07 10:10:07 -03:00
|
|
|
parameters: ParamDict = dc.field(default_factory=dict)
|
|
|
|
_: dc.KW_ONLY
|
|
|
|
name: str
|
2023-07-28 17:27:21 -03:00
|
|
|
module: Module | Clinic
|
2023-07-26 07:54:03 -03:00
|
|
|
cls: Class | None
|
|
|
|
c_basename: str | None
|
|
|
|
full_name: str
|
2023-07-07 10:10:07 -03:00
|
|
|
return_converter: CReturnConverter
|
2023-07-26 07:54:03 -03:00
|
|
|
kind: FunctionKind
|
|
|
|
coexist: bool
|
2023-07-07 10:10:07 -03:00
|
|
|
return_annotation: object = inspect.Signature.empty
|
|
|
|
docstring: str = ''
|
|
|
|
# docstring_only means "don't generate a machine-readable
|
|
|
|
# signature, just a normal docstring". it's True for
|
|
|
|
# functions with optional groups because we can't represent
|
|
|
|
# those accurately with inspect.Signature in 3.4.
|
|
|
|
docstring_only: bool = False
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-07 10:10:07 -03:00
|
|
|
def __post_init__(self) -> None:
|
2023-07-28 17:27:21 -03:00
|
|
|
self.parent = self.cls or self.module
|
2023-07-03 11:03:31 -03:00
|
|
|
self.self_converter: self_converter | None = None
|
2023-07-07 10:10:07 -03:00
|
|
|
self.__render_parameters__: list[Parameter] | None = None
|
2014-02-01 02:03:12 -04:00
|
|
|
|
|
|
|
@property
|
2023-07-07 10:10:07 -03:00
|
|
|
def render_parameters(self) -> list[Parameter]:
|
2014-02-01 02:03:12 -04:00
|
|
|
if not self.__render_parameters__:
|
2023-07-07 10:10:07 -03:00
|
|
|
l: list[Parameter] = []
|
|
|
|
self.__render_parameters__ = l
|
2014-02-01 02:03:12 -04:00
|
|
|
for p in self.parameters.values():
|
|
|
|
p = p.copy()
|
|
|
|
p.converter.pre_render()
|
|
|
|
l.append(p)
|
|
|
|
return self.__render_parameters__
|
|
|
|
|
2013-11-23 18:54:00 -04:00
|
|
|
@property
|
2023-05-21 18:24:26 -03:00
|
|
|
def methoddef_flags(self) -> str | None:
|
2023-07-13 19:54:05 -03:00
|
|
|
if self.kind.new_or_init:
|
2014-01-12 18:12:59 -04:00
|
|
|
return None
|
2013-11-23 18:54:00 -04:00
|
|
|
flags = []
|
2023-07-13 19:54:05 -03:00
|
|
|
match self.kind:
|
|
|
|
case FunctionKind.CLASS_METHOD:
|
|
|
|
flags.append('METH_CLASS')
|
|
|
|
case FunctionKind.STATIC_METHOD:
|
|
|
|
flags.append('METH_STATIC')
|
|
|
|
case _ as kind:
|
|
|
|
assert kind is FunctionKind.CALLABLE, f"unknown kind: {kind!r}"
|
2013-11-23 18:54:00 -04:00
|
|
|
if self.coexist:
|
|
|
|
flags.append('METH_COEXIST')
|
|
|
|
return '|'.join(flags)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 18:24:26 -03:00
|
|
|
def __repr__(self) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
return '<clinic.Function ' + self.name + '>'
|
|
|
|
|
2023-07-07 10:10:07 -03:00
|
|
|
def copy(self, **overrides: Any) -> Function:
|
|
|
|
f = dc.replace(self, **overrides)
|
2023-05-20 07:24:00 -03:00
|
|
|
f.parameters = {
|
|
|
|
name: value.copy(function=f)
|
|
|
|
for name, value in f.parameters.items()
|
|
|
|
}
|
2014-02-01 02:03:12 -04:00
|
|
|
return f
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-07 10:10:07 -03:00
|
|
|
@dc.dataclass(repr=False, slots=True)
|
2013-10-19 04:09:25 -03:00
|
|
|
class Parameter:
|
|
|
|
"""
|
|
|
|
Mutable duck type of inspect.Parameter.
|
|
|
|
"""
|
2023-07-07 10:10:07 -03:00
|
|
|
name: str
|
|
|
|
kind: inspect._ParameterKind
|
|
|
|
_: dc.KW_ONLY
|
|
|
|
default: object = inspect.Parameter.empty
|
|
|
|
function: Function
|
|
|
|
converter: CConverter
|
|
|
|
annotation: object = inspect.Parameter.empty
|
|
|
|
docstring: str = ''
|
|
|
|
group: int = 0
|
|
|
|
right_bracket_count: int = dc.field(init=False, default=0)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 18:24:26 -03:00
|
|
|
def __repr__(self) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
return '<clinic.Parameter ' + self.name + '>'
|
|
|
|
|
2023-05-21 18:24:26 -03:00
|
|
|
def is_keyword_only(self) -> bool:
|
2013-10-19 04:09:25 -03:00
|
|
|
return self.kind == inspect.Parameter.KEYWORD_ONLY
|
|
|
|
|
2023-05-21 18:24:26 -03:00
|
|
|
def is_positional_only(self) -> bool:
|
2014-02-09 02:15:29 -04:00
|
|
|
return self.kind == inspect.Parameter.POSITIONAL_ONLY
|
|
|
|
|
2023-05-21 18:24:26 -03:00
|
|
|
def is_vararg(self) -> bool:
|
2021-07-16 12:43:02 -03:00
|
|
|
return self.kind == inspect.Parameter.VAR_POSITIONAL
|
|
|
|
|
2023-05-21 18:24:26 -03:00
|
|
|
def is_optional(self) -> bool:
|
2021-07-16 12:43:02 -03:00
|
|
|
return not self.is_vararg() and (self.default is not unspecified)
|
2019-03-14 05:32:22 -03:00
|
|
|
|
2023-07-07 10:10:07 -03:00
|
|
|
def copy(
|
|
|
|
self,
|
|
|
|
/,
|
|
|
|
*,
|
|
|
|
converter: CConverter | None = None,
|
|
|
|
function: Function | None = None,
|
|
|
|
**overrides: Any
|
|
|
|
) -> Parameter:
|
|
|
|
function = function or self.function
|
|
|
|
if not converter:
|
2014-02-01 02:03:12 -04:00
|
|
|
converter = copy.copy(self.converter)
|
2023-07-07 10:10:07 -03:00
|
|
|
converter.function = function
|
|
|
|
return dc.replace(self, **overrides, function=function, converter=converter)
|
2014-02-01 02:03:12 -04:00
|
|
|
|
2023-05-21 18:24:26 -03:00
|
|
|
def get_displayname(self, i: int) -> str:
|
2019-08-29 11:49:08 -03:00
|
|
|
if i == 0:
|
|
|
|
return '"argument"'
|
|
|
|
if not self.is_positional_only():
|
2023-05-20 17:16:49 -03:00
|
|
|
return f'"argument {self.name!r}"'
|
2019-08-29 11:49:08 -03:00
|
|
|
else:
|
2023-05-20 17:16:49 -03:00
|
|
|
return f'"argument {i}"'
|
2014-02-01 02:03:12 -04:00
|
|
|
|
|
|
|
|
2023-07-25 05:18:19 -03:00
|
|
|
CConverterClassT = TypeVar("CConverterClassT", bound=type["CConverter"])
|
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
def add_c_converter(
|
2023-07-25 05:18:19 -03:00
|
|
|
f: CConverterClassT,
|
2023-07-04 19:13:30 -03:00
|
|
|
name: str | None = None
|
2023-07-25 05:18:19 -03:00
|
|
|
) -> CConverterClassT:
|
2013-10-19 04:09:25 -03:00
|
|
|
if not name:
|
|
|
|
name = f.__name__
|
|
|
|
if not name.endswith('_converter'):
|
|
|
|
return f
|
2023-05-20 08:08:28 -03:00
|
|
|
name = name.removesuffix('_converter')
|
2013-10-19 04:09:25 -03:00
|
|
|
converters[name] = f
|
|
|
|
return f
|
|
|
|
|
2023-07-25 05:18:19 -03:00
|
|
|
def add_default_legacy_c_converter(cls: CConverterClassT) -> CConverterClassT:
|
2013-10-19 04:09:25 -03:00
|
|
|
# automatically add converter for default format unit
|
|
|
|
# (but without stomping on the existing one if it's already
|
|
|
|
# set, in case you subclass)
|
2014-06-11 08:31:29 -03:00
|
|
|
if ((cls.format_unit not in ('O&', '')) and
|
2013-10-19 04:09:25 -03:00
|
|
|
(cls.format_unit not in legacy_converters)):
|
|
|
|
legacy_converters[cls.format_unit] = cls
|
|
|
|
return cls
|
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
def add_legacy_c_converter(
|
|
|
|
format_unit: str,
|
2023-07-25 05:18:19 -03:00
|
|
|
**kwargs: Any
|
|
|
|
) -> Callable[[CConverterClassT], CConverterClassT]:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Adds a legacy converter.
|
|
|
|
"""
|
2023-07-25 05:18:19 -03:00
|
|
|
def closure(f: CConverterClassT) -> CConverterClassT:
|
|
|
|
added_f: Callable[..., CConverter]
|
2013-10-19 04:09:25 -03:00
|
|
|
if not kwargs:
|
|
|
|
added_f = f
|
|
|
|
else:
|
2023-07-25 05:18:19 -03:00
|
|
|
# mypy's special-casing for functools.partial
|
|
|
|
# can't quite grapple with this code here
|
|
|
|
added_f = functools.partial(f, **kwargs) # type: ignore[arg-type]
|
2014-02-01 02:03:12 -04:00
|
|
|
if format_unit:
|
|
|
|
legacy_converters[format_unit] = added_f
|
2013-10-19 04:09:25 -03:00
|
|
|
return f
|
|
|
|
return closure
|
|
|
|
|
|
|
|
class CConverterAutoRegister(type):
|
2023-07-25 18:08:52 -03:00
|
|
|
def __init__(
|
|
|
|
cls, name: str, bases: tuple[type, ...], classdict: dict[str, Any]
|
|
|
|
) -> None:
|
|
|
|
converter_cls = cast(type["CConverter"], cls)
|
|
|
|
add_c_converter(converter_cls)
|
|
|
|
add_default_legacy_c_converter(converter_cls)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
class CConverter(metaclass=CConverterAutoRegister):
|
|
|
|
"""
|
|
|
|
For the init function, self, name, function, and default
|
|
|
|
must be keyword-or-positional parameters. All other
|
2014-01-16 15:32:01 -04:00
|
|
|
parameters must be keyword-only.
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
|
2014-02-01 02:03:12 -04:00
|
|
|
# The C name to use for this variable.
|
2023-07-20 19:45:02 -03:00
|
|
|
name: str
|
2014-02-01 02:03:12 -04:00
|
|
|
|
|
|
|
# The Python name to use for this variable.
|
2023-07-20 19:45:02 -03:00
|
|
|
py_name: str
|
2014-02-01 02:03:12 -04:00
|
|
|
|
2014-01-04 16:44:57 -04:00
|
|
|
# The C type to use for this variable.
|
|
|
|
# 'type' should be a Python string specifying the type, e.g. "int".
|
|
|
|
# If this is a pointer type, the type string should end with ' *'.
|
2023-05-15 05:49:28 -03:00
|
|
|
type: str | None = None
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# The Python default value for this parameter, as a Python value.
|
2014-01-04 16:44:57 -04:00
|
|
|
# Or the magic value "unspecified" if there is no default.
|
2014-01-16 15:32:01 -04:00
|
|
|
# Or the magic value "unknown" if this value is a cannot be evaluated
|
|
|
|
# at Argument-Clinic-preprocessing time (but is presumed to be valid
|
|
|
|
# at runtime).
|
2023-05-18 18:58:42 -03:00
|
|
|
default: object = unspecified
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-12 15:09:57 -04:00
|
|
|
# If not None, default must be isinstance() of this type.
|
|
|
|
# (You can also specify a tuple of types.)
|
2023-05-15 05:49:28 -03:00
|
|
|
default_type: bltns.type[Any] | tuple[bltns.type[Any], ...] | None = None
|
2014-01-12 15:09:57 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# "default" converted into a C value, as a string.
|
|
|
|
# Or None if there is no default.
|
2023-05-15 05:49:28 -03:00
|
|
|
c_default: str | None = None
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-16 15:32:01 -04:00
|
|
|
# "default" converted into a Python value, as a string.
|
|
|
|
# Or None if there is no default.
|
2023-05-15 05:49:28 -03:00
|
|
|
py_default: str | None = None
|
2014-01-16 15:32:01 -04:00
|
|
|
|
2013-11-20 13:13:52 -04:00
|
|
|
# The default value used to initialize the C variable when
|
|
|
|
# there is no default, but not specifying a default may
|
|
|
|
# result in an "uninitialized variable" warning. This can
|
|
|
|
# easily happen when using option groups--although
|
|
|
|
# properly-written code won't actually use the variable,
|
|
|
|
# the variable does get passed in to the _impl. (Ah, if
|
|
|
|
# only dataflow analysis could inline the static function!)
|
|
|
|
#
|
|
|
|
# This value is specified as a string.
|
|
|
|
# Every non-abstract subclass should supply a valid value.
|
2023-05-15 05:49:28 -03:00
|
|
|
c_ignored_default: str = 'NULL'
|
2013-11-20 13:13:52 -04:00
|
|
|
|
2023-05-12 05:34:00 -03:00
|
|
|
# If true, wrap with Py_UNUSED.
|
|
|
|
unused = False
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# The C converter *function* to be used, if any.
|
|
|
|
# (If this is not None, format_unit must be 'O&'.)
|
2023-05-15 05:49:28 -03:00
|
|
|
converter: str | None = None
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2014-01-04 16:44:57 -04:00
|
|
|
# Should Argument Clinic add a '&' before the name of
|
|
|
|
# the variable when passing it into the _impl function?
|
2013-10-19 04:09:25 -03:00
|
|
|
impl_by_reference = False
|
2014-01-04 16:44:57 -04:00
|
|
|
|
|
|
|
# Should Argument Clinic add a '&' before the name of
|
|
|
|
# the variable when passing it into PyArg_ParseTuple (AndKeywords)?
|
2013-10-19 04:09:25 -03:00
|
|
|
parse_by_reference = True
|
2014-01-04 16:44:57 -04:00
|
|
|
|
|
|
|
#############################################################
|
|
|
|
#############################################################
|
|
|
|
## You shouldn't need to read anything below this point to ##
|
|
|
|
## write your own converter functions. ##
|
|
|
|
#############################################################
|
|
|
|
#############################################################
|
|
|
|
|
|
|
|
# The "format unit" to specify for this variable when
|
|
|
|
# parsing arguments using PyArg_ParseTuple (AndKeywords).
|
|
|
|
# Custom converters should always use the default value of 'O&'.
|
|
|
|
format_unit = 'O&'
|
|
|
|
|
|
|
|
# What encoding do we want for this variable? Only used
|
|
|
|
# by format units starting with 'e'.
|
2023-05-17 18:05:22 -03:00
|
|
|
encoding: str | None = None
|
2014-01-04 16:44:57 -04:00
|
|
|
|
2014-01-07 16:13:13 -04:00
|
|
|
# Should this object be required to be a subclass of a specific type?
|
|
|
|
# If not None, should be a string representing a pointer to a
|
|
|
|
# PyTypeObject (e.g. "&PyUnicode_Type").
|
|
|
|
# Only used by the 'O!' format unit (and the "object" converter).
|
2023-07-17 08:47:08 -03:00
|
|
|
subclass_of: str | None = None
|
2014-01-07 16:13:13 -04:00
|
|
|
|
2014-01-04 16:44:57 -04:00
|
|
|
# Do we want an adjacent '_length' variable for this variable?
|
|
|
|
# Only used by format units ending with '#'.
|
2013-10-19 04:09:25 -03:00
|
|
|
length = False
|
|
|
|
|
2014-01-24 10:17:25 -04:00
|
|
|
# Should we show this parameter in the generated
|
|
|
|
# __text_signature__? This is *almost* always True.
|
2014-01-26 00:43:29 -04:00
|
|
|
# (It's only False for __new__, __init__, and METH_STATIC functions.)
|
2014-01-24 10:17:25 -04:00
|
|
|
show_in_signature = True
|
|
|
|
|
|
|
|
# Overrides the name used in a text signature.
|
|
|
|
# The name used for a "self" parameter must be one of
|
|
|
|
# self, type, or module; however users can set their own.
|
|
|
|
# This lets the self_converter overrule the user-settable
|
|
|
|
# name, *just* for the text signature.
|
|
|
|
# Only set by self_converter.
|
2023-07-25 05:49:07 -03:00
|
|
|
signature_name: str | None = None
|
2014-01-24 10:17:25 -04:00
|
|
|
|
|
|
|
# keep in sync with self_converter.__init__!
|
2023-05-12 05:34:00 -03:00
|
|
|
def __init__(self,
|
|
|
|
# Positional args:
|
2023-05-16 08:47:35 -03:00
|
|
|
name: str,
|
|
|
|
py_name: str,
|
2023-07-07 10:10:07 -03:00
|
|
|
function: Function,
|
2023-05-18 18:58:42 -03:00
|
|
|
default: object = unspecified,
|
2023-05-12 05:34:00 -03:00
|
|
|
*, # Keyword only args:
|
2023-05-16 08:47:35 -03:00
|
|
|
c_default: str | None = None,
|
|
|
|
py_default: str | None = None,
|
2023-05-18 18:58:42 -03:00
|
|
|
annotation: str | Literal[Sentinels.unspecified] = unspecified,
|
2023-05-16 08:47:35 -03:00
|
|
|
unused: bool = False,
|
2023-07-25 05:49:07 -03:00
|
|
|
**kwargs: Any
|
|
|
|
) -> None:
|
2018-12-25 07:23:47 -04:00
|
|
|
self.name = ensure_legal_c_identifier(name)
|
2014-02-01 02:03:12 -04:00
|
|
|
self.py_name = py_name
|
2023-05-12 05:34:00 -03:00
|
|
|
self.unused = unused
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
if default is not unspecified:
|
2023-05-18 18:58:42 -03:00
|
|
|
if (self.default_type
|
|
|
|
and default is not unknown
|
|
|
|
and not isinstance(default, self.default_type)
|
|
|
|
):
|
2014-01-12 15:09:57 -04:00
|
|
|
if isinstance(self.default_type, type):
|
|
|
|
types_str = self.default_type.__name__
|
|
|
|
else:
|
2023-05-20 17:16:49 -03:00
|
|
|
names = [cls.__name__ for cls in self.default_type]
|
|
|
|
types_str = ', '.join(names)
|
2014-01-12 15:09:57 -04:00
|
|
|
fail("{}: default value {!r} for field {} is not of type {}".format(
|
|
|
|
self.__class__.__name__, default, name, types_str))
|
2013-10-19 04:09:25 -03:00
|
|
|
self.default = default
|
2014-01-16 15:32:01 -04:00
|
|
|
|
2014-01-19 01:54:15 -04:00
|
|
|
if c_default:
|
|
|
|
self.c_default = c_default
|
|
|
|
if py_default:
|
|
|
|
self.py_default = py_default
|
2014-01-16 15:32:01 -04:00
|
|
|
|
2023-05-18 18:58:42 -03:00
|
|
|
if annotation is not unspecified:
|
2013-10-19 04:09:25 -03:00
|
|
|
fail("The 'annotation' parameter is not currently permitted.")
|
2014-02-01 02:03:12 -04:00
|
|
|
|
2023-08-01 17:10:54 -03:00
|
|
|
# Make sure not to set self.function until after converter_init() has been called.
|
|
|
|
# This prevents you from caching information
|
|
|
|
# about the function in converter_init().
|
|
|
|
# (That breaks if we get cloned.)
|
2013-10-19 04:09:25 -03:00
|
|
|
self.converter_init(**kwargs)
|
2014-02-01 02:03:12 -04:00
|
|
|
self.function = function
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-08-01 17:10:54 -03:00
|
|
|
# Add a custom __getattr__ method to improve the error message
|
|
|
|
# if somebody tries to access self.function in converter_init().
|
|
|
|
#
|
|
|
|
# mypy will assume arbitrary access is okay for a class with a __getattr__ method,
|
|
|
|
# and that's not what we want,
|
|
|
|
# so put it inside an `if not TYPE_CHECKING` block
|
|
|
|
if not TYPE_CHECKING:
|
|
|
|
def __getattr__(self, attr):
|
|
|
|
if attr == "function":
|
|
|
|
fail(
|
|
|
|
f"{self.__class__.__name__!r} object has no attribute 'function'.\n"
|
|
|
|
f"Note: accessing self.function inside converter_init is disallowed!"
|
|
|
|
)
|
|
|
|
return super().__getattr__(attr)
|
|
|
|
|
2023-07-25 05:49:07 -03:00
|
|
|
def converter_init(self) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
pass
|
|
|
|
|
2023-05-16 08:47:35 -03:00
|
|
|
def is_optional(self) -> bool:
|
2014-01-16 15:32:01 -04:00
|
|
|
return (self.default is not unspecified)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-07 10:10:07 -03:00
|
|
|
def _render_self(self, parameter: Parameter, data: CRenderData) -> None:
|
2014-01-24 10:17:25 -04:00
|
|
|
self.parameter = parameter
|
2021-07-16 12:43:02 -03:00
|
|
|
name = self.parser_name
|
2014-01-24 10:17:25 -04:00
|
|
|
|
|
|
|
# impl_arguments
|
|
|
|
s = ("&" if self.impl_by_reference else "") + name
|
|
|
|
data.impl_arguments.append(s)
|
|
|
|
if self.length:
|
|
|
|
data.impl_arguments.append(self.length_name())
|
|
|
|
|
|
|
|
# impl_parameters
|
|
|
|
data.impl_parameters.append(self.simple_declaration(by_reference=self.impl_by_reference))
|
|
|
|
if self.length:
|
2021-05-07 22:17:37 -03:00
|
|
|
data.impl_parameters.append("Py_ssize_t " + self.length_name())
|
2014-01-24 10:17:25 -04:00
|
|
|
|
2023-07-20 19:45:02 -03:00
|
|
|
def _render_non_self(
|
|
|
|
self,
|
|
|
|
parameter: Parameter,
|
|
|
|
data: CRenderData
|
|
|
|
) -> None:
|
2013-11-20 13:13:52 -04:00
|
|
|
self.parameter = parameter
|
2018-12-25 07:23:47 -04:00
|
|
|
name = self.name
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# declarations
|
2021-07-16 12:43:02 -03:00
|
|
|
d = self.declaration(in_parser=True)
|
2013-10-19 04:09:25 -03:00
|
|
|
data.declarations.append(d)
|
|
|
|
|
|
|
|
# initializers
|
|
|
|
initializers = self.initialize()
|
|
|
|
if initializers:
|
|
|
|
data.initializers.append('/* initializers for ' + name + ' */\n' + initializers.rstrip())
|
|
|
|
|
2014-01-26 00:43:29 -04:00
|
|
|
# modifications
|
|
|
|
modifications = self.modify()
|
|
|
|
if modifications:
|
|
|
|
data.modifications.append('/* modifications for ' + name + ' */\n' + modifications.rstrip())
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# keywords
|
2021-07-16 12:43:02 -03:00
|
|
|
if parameter.is_vararg():
|
|
|
|
pass
|
|
|
|
elif parameter.is_positional_only():
|
2016-06-09 10:30:29 -03:00
|
|
|
data.keywords.append('')
|
|
|
|
else:
|
|
|
|
data.keywords.append(parameter.name)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# format_units
|
|
|
|
if self.is_optional() and '|' not in data.format_units:
|
|
|
|
data.format_units.append('|')
|
|
|
|
if parameter.is_keyword_only() and '$' not in data.format_units:
|
|
|
|
data.format_units.append('$')
|
|
|
|
data.format_units.append(self.format_unit)
|
|
|
|
|
|
|
|
# parse_arguments
|
|
|
|
self.parse_argument(data.parse_arguments)
|
|
|
|
|
2022-11-24 10:01:26 -04:00
|
|
|
# post_parsing
|
|
|
|
if post_parsing := self.post_parsing():
|
|
|
|
data.post_parsing.append('/* Post parse cleanup for ' + name + ' */\n' + post_parsing.rstrip() + '\n')
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# cleanup
|
|
|
|
cleanup = self.cleanup()
|
|
|
|
if cleanup:
|
|
|
|
data.cleanup.append('/* Cleanup for ' + name + ' */\n' + cleanup.rstrip() + "\n")
|
|
|
|
|
2023-07-07 10:10:07 -03:00
|
|
|
def render(self, parameter: Parameter, data: CRenderData) -> None:
|
2014-01-24 10:17:25 -04:00
|
|
|
"""
|
|
|
|
parameter is a clinic.Parameter instance.
|
|
|
|
data is a CRenderData instance.
|
|
|
|
"""
|
|
|
|
self._render_self(parameter, data)
|
|
|
|
self._render_non_self(parameter, data)
|
|
|
|
|
2023-07-20 19:45:02 -03:00
|
|
|
def length_name(self) -> str:
|
2013-11-23 18:54:00 -04:00
|
|
|
"""Computes the name of the associated "length" variable."""
|
2023-07-20 19:45:02 -03:00
|
|
|
assert self.length is not None
|
2021-07-16 12:43:02 -03:00
|
|
|
return self.parser_name + "_length"
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# Why is this one broken out separately?
|
|
|
|
# For "positional-only" function parsing,
|
|
|
|
# which generates a bunch of PyArg_ParseTuple calls.
|
2023-07-20 19:45:02 -03:00
|
|
|
def parse_argument(self, args: list[str]) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
assert not (self.converter and self.encoding)
|
|
|
|
if self.format_unit == 'O&':
|
|
|
|
assert self.converter
|
2023-07-20 19:45:02 -03:00
|
|
|
args.append(self.converter)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
if self.encoding:
|
2023-07-20 19:45:02 -03:00
|
|
|
args.append(c_repr(self.encoding))
|
2014-01-07 16:13:13 -04:00
|
|
|
elif self.subclass_of:
|
2023-07-20 19:45:02 -03:00
|
|
|
args.append(self.subclass_of)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2018-12-25 07:23:47 -04:00
|
|
|
s = ("&" if self.parse_by_reference else "") + self.name
|
2023-07-20 19:45:02 -03:00
|
|
|
args.append(s)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2013-11-23 18:54:00 -04:00
|
|
|
if self.length:
|
2023-07-20 19:45:02 -03:00
|
|
|
args.append("&" + self.length_name())
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
#
|
|
|
|
# All the functions after here are intended as extension points.
|
|
|
|
#
|
|
|
|
|
2023-07-22 19:30:42 -03:00
|
|
|
def simple_declaration(
|
|
|
|
self,
|
|
|
|
by_reference: bool = False,
|
|
|
|
*,
|
|
|
|
in_parser: bool = False
|
|
|
|
) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Computes the basic declaration of the variable.
|
|
|
|
Used in computing the prototype declaration and the
|
|
|
|
variable declaration.
|
|
|
|
"""
|
2023-07-22 19:30:42 -03:00
|
|
|
assert isinstance(self.type, str)
|
2013-10-19 04:09:25 -03:00
|
|
|
prototype = [self.type]
|
|
|
|
if by_reference or not self.type.endswith('*'):
|
|
|
|
prototype.append(" ")
|
|
|
|
if by_reference:
|
|
|
|
prototype.append('*')
|
2021-07-16 12:43:02 -03:00
|
|
|
if in_parser:
|
|
|
|
name = self.parser_name
|
|
|
|
else:
|
|
|
|
name = self.name
|
2023-05-12 05:34:00 -03:00
|
|
|
if self.unused:
|
|
|
|
name = f"Py_UNUSED({name})"
|
2021-07-16 12:43:02 -03:00
|
|
|
prototype.append(name)
|
2013-10-19 04:09:25 -03:00
|
|
|
return "".join(prototype)
|
|
|
|
|
2023-07-22 19:30:42 -03:00
|
|
|
def declaration(self, *, in_parser: bool = False) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
The C statement to declare this variable.
|
|
|
|
"""
|
2021-07-16 12:43:02 -03:00
|
|
|
declaration = [self.simple_declaration(in_parser=True)]
|
2013-11-20 13:13:52 -04:00
|
|
|
default = self.c_default
|
|
|
|
if not default and self.parameter.group:
|
|
|
|
default = self.c_ignored_default
|
|
|
|
if default:
|
2013-10-19 04:09:25 -03:00
|
|
|
declaration.append(" = ")
|
2013-11-20 13:13:52 -04:00
|
|
|
declaration.append(default)
|
2013-10-19 04:09:25 -03:00
|
|
|
declaration.append(";")
|
2013-11-23 18:54:00 -04:00
|
|
|
if self.length:
|
2021-05-07 22:17:37 -03:00
|
|
|
declaration.append('\nPy_ssize_t ')
|
2013-11-23 18:54:00 -04:00
|
|
|
declaration.append(self.length_name())
|
|
|
|
declaration.append(';')
|
2016-06-09 10:02:15 -03:00
|
|
|
return "".join(declaration)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 08:47:35 -03:00
|
|
|
def initialize(self) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
The C statements required to set up this variable before parsing.
|
|
|
|
Returns a string containing this code indented at column 0.
|
|
|
|
If no initialization is necessary, returns an empty string.
|
|
|
|
"""
|
|
|
|
return ""
|
|
|
|
|
2023-05-16 08:47:35 -03:00
|
|
|
def modify(self) -> str:
|
2014-01-26 00:43:29 -04:00
|
|
|
"""
|
|
|
|
The C statements required to modify this variable after parsing.
|
|
|
|
Returns a string containing this code indented at column 0.
|
2022-12-28 00:27:57 -04:00
|
|
|
If no modification is necessary, returns an empty string.
|
2014-01-26 00:43:29 -04:00
|
|
|
"""
|
|
|
|
return ""
|
|
|
|
|
2023-05-16 08:47:35 -03:00
|
|
|
def post_parsing(self) -> str:
|
2022-11-24 10:01:26 -04:00
|
|
|
"""
|
|
|
|
The C statements required to do some operations after the end of parsing but before cleaning up.
|
|
|
|
Return a string containing this code indented at column 0.
|
|
|
|
If no operation is necessary, return an empty string.
|
|
|
|
"""
|
|
|
|
return ""
|
|
|
|
|
2023-05-16 08:47:35 -03:00
|
|
|
def cleanup(self) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
The C statements required to clean up after this variable.
|
|
|
|
Returns a string containing this code indented at column 0.
|
|
|
|
If no cleanup is necessary, returns an empty string.
|
|
|
|
"""
|
|
|
|
return ""
|
|
|
|
|
2023-07-25 05:49:07 -03:00
|
|
|
def pre_render(self) -> None:
|
2014-02-01 02:03:12 -04:00
|
|
|
"""
|
|
|
|
A second initialization function, like converter_init,
|
|
|
|
called just before rendering.
|
|
|
|
You are permitted to examine self.function here.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'O&':
|
|
|
|
return """
|
|
|
|
if (!{converter}({argname}, &{paramname})) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2018-12-25 07:23:47 -04:00
|
|
|
converter=self.converter)
|
|
|
|
if self.format_unit == 'O!':
|
|
|
|
cast = '(%s)' % self.type if self.type != 'PyObject *' else ''
|
|
|
|
if self.subclass_of in type_checks:
|
|
|
|
typecheck, typename = type_checks[self.subclass_of]
|
|
|
|
return """
|
|
|
|
if (!{typecheck}({argname})) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "{typename}", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = {cast}{argname};
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname, typecheck=typecheck,
|
|
|
|
typename=typename, cast=cast)
|
2018-12-25 07:23:47 -04:00
|
|
|
return """
|
|
|
|
if (!PyObject_TypeCheck({argname}, {subclass_of})) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, ({subclass_of})->tp_name, {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = {cast}{argname};
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
subclass_of=self.subclass_of, cast=cast,
|
|
|
|
displayname=displayname)
|
2019-01-11 10:01:14 -04:00
|
|
|
if self.format_unit == 'O':
|
|
|
|
cast = '(%s)' % self.type if self.type != 'PyObject *' else ''
|
|
|
|
return """
|
|
|
|
{paramname} = {cast}{argname};
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name, cast=cast)
|
2019-01-11 10:01:14 -04:00
|
|
|
return None
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2023-05-16 19:32:11 -03:00
|
|
|
def set_template_dict(self, template_dict: TemplateDict) -> None:
|
2020-05-13 19:31:31 -03:00
|
|
|
pass
|
|
|
|
|
2021-07-16 12:43:02 -03:00
|
|
|
@property
|
2023-07-20 19:45:02 -03:00
|
|
|
def parser_name(self) -> str:
|
2021-07-16 12:43:02 -03:00
|
|
|
if self.name in CLINIC_PREFIXED_ARGS: # bpo-39741
|
|
|
|
return CLINIC_PREFIX + self.name
|
|
|
|
else:
|
|
|
|
return self.name
|
2020-05-13 19:31:31 -03:00
|
|
|
|
2018-12-25 07:23:47 -04:00
|
|
|
type_checks = {
|
|
|
|
'&PyLong_Type': ('PyLong_Check', 'int'),
|
|
|
|
'&PyTuple_Type': ('PyTuple_Check', 'tuple'),
|
|
|
|
'&PyList_Type': ('PyList_Check', 'list'),
|
|
|
|
'&PySet_Type': ('PySet_Check', 'set'),
|
|
|
|
'&PyFrozenSet_Type': ('PyFrozenSet_Check', 'frozenset'),
|
|
|
|
'&PyDict_Type': ('PyDict_Check', 'dict'),
|
|
|
|
'&PyUnicode_Type': ('PyUnicode_Check', 'str'),
|
|
|
|
'&PyBytes_Type': ('PyBytes_Check', 'bytes'),
|
|
|
|
'&PyByteArray_Type': ('PyByteArray_Check', 'bytearray'),
|
|
|
|
}
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 19:32:11 -03:00
|
|
|
ConverterType = Callable[..., CConverter]
|
|
|
|
ConverterDict = dict[str, ConverterType]
|
|
|
|
|
|
|
|
# maps strings to callables.
|
|
|
|
# these callables must be of the form:
|
|
|
|
# def foo(name, default, *, ...)
|
|
|
|
# The callable may have any number of keyword-only parameters.
|
|
|
|
# The callable must return a CConverter object.
|
|
|
|
# The callable should not call builtins.print.
|
|
|
|
converters: ConverterDict = {}
|
|
|
|
|
|
|
|
# maps strings to callables.
|
|
|
|
# these callables follow the same rules as those for "converters" above.
|
|
|
|
# note however that they will never be called with keyword-only parameters.
|
|
|
|
legacy_converters: ConverterDict = {}
|
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
# maps strings to callables.
|
|
|
|
# these callables must be of the form:
|
|
|
|
# def foo(*, ...)
|
|
|
|
# The callable may have any number of keyword-only parameters.
|
|
|
|
# The callable must return a CReturnConverter object.
|
|
|
|
# The callable should not call builtins.print.
|
|
|
|
ReturnConverterDict = dict[str, ReturnConverterType]
|
|
|
|
return_converters: ReturnConverterDict = {}
|
|
|
|
|
2023-05-17 18:05:22 -03:00
|
|
|
TypeSet = set[bltns.type[Any]]
|
|
|
|
|
2023-05-16 19:32:11 -03:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class bool_converter(CConverter):
|
|
|
|
type = 'int'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = bool
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'p'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = '0'
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-17 18:05:22 -03:00
|
|
|
def converter_init(self, *, accept: TypeSet = {object}) -> None:
|
2017-03-12 05:10:47 -03:00
|
|
|
if accept == {int}:
|
|
|
|
self.format_unit = 'i'
|
|
|
|
elif accept != {object}:
|
|
|
|
fail("bool_converter: illegal 'accept' argument " + repr(accept))
|
2014-01-16 15:32:01 -04:00
|
|
|
if self.default is not unspecified:
|
|
|
|
self.default = bool(self.default)
|
|
|
|
self.c_default = str(int(self.default))
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'i':
|
|
|
|
return """
|
|
|
|
{paramname} = _PyLong_AsInt({argname});
|
|
|
|
if ({paramname} == -1 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2018-12-25 07:23:47 -04:00
|
|
|
elif self.format_unit == 'p':
|
|
|
|
return """
|
|
|
|
{paramname} = PyObject_IsTrue({argname});
|
|
|
|
if ({paramname} < 0) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2020-05-07 10:39:59 -03:00
|
|
|
class defining_class_converter(CConverter):
|
|
|
|
"""
|
|
|
|
A special-case converter:
|
|
|
|
this is the default converter used for the defining class.
|
|
|
|
"""
|
|
|
|
type = 'PyTypeObject *'
|
|
|
|
format_unit = ''
|
|
|
|
show_in_signature = False
|
|
|
|
|
2023-07-25 05:49:07 -03:00
|
|
|
def converter_init(self, *, type: str | None = None) -> None:
|
2020-05-07 10:39:59 -03:00
|
|
|
self.specified_type = type
|
|
|
|
|
2023-07-25 18:08:52 -03:00
|
|
|
def render(self, parameter: Parameter, data: CRenderData) -> None:
|
2020-05-07 10:39:59 -03:00
|
|
|
self._render_self(parameter, data)
|
|
|
|
|
2023-07-25 18:08:52 -03:00
|
|
|
def set_template_dict(self, template_dict: TemplateDict) -> None:
|
2020-05-07 10:39:59 -03:00
|
|
|
template_dict['defining_class_name'] = self.name
|
|
|
|
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class char_converter(CConverter):
|
|
|
|
type = 'char'
|
2015-04-16 00:02:12 -03:00
|
|
|
default_type = (bytes, bytearray)
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'c'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "'\0'"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 19:32:11 -03:00
|
|
|
def converter_init(self) -> None:
|
2018-07-06 07:17:38 -03:00
|
|
|
if isinstance(self.default, self.default_type):
|
|
|
|
if len(self.default) != 1:
|
|
|
|
fail("char_converter: illegal default value " + repr(self.default))
|
|
|
|
|
2018-12-25 05:10:05 -04:00
|
|
|
self.c_default = repr(bytes(self.default))[1:]
|
|
|
|
if self.c_default == '"\'"':
|
|
|
|
self.c_default = r"'\''"
|
2014-01-12 15:09:57 -04:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'c':
|
|
|
|
return """
|
|
|
|
if (PyBytes_Check({argname}) && PyBytes_GET_SIZE({argname}) == 1) {{{{
|
|
|
|
{paramname} = PyBytes_AS_STRING({argname})[0];
|
|
|
|
}}}}
|
|
|
|
else if (PyByteArray_Check({argname}) && PyByteArray_GET_SIZE({argname}) == 1) {{{{
|
|
|
|
{paramname} = PyByteArray_AS_STRING({argname})[0];
|
|
|
|
}}}}
|
|
|
|
else {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "a byte string of length 1", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2014-01-12 15:09:57 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
@add_legacy_c_converter('B', bitwise=True)
|
2014-01-19 03:50:21 -04:00
|
|
|
class unsigned_char_converter(CConverter):
|
2014-01-18 18:38:36 -04:00
|
|
|
type = 'unsigned char'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = int
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'b'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "'\0'"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 19:32:11 -03:00
|
|
|
def converter_init(self, *, bitwise: bool = False) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
if bitwise:
|
2013-11-23 18:54:00 -04:00
|
|
|
self.format_unit = 'B'
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'b':
|
|
|
|
return """
|
|
|
|
{{{{
|
|
|
|
long ival = PyLong_AsLong({argname});
|
|
|
|
if (ival == -1 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
else if (ival < 0) {{{{
|
|
|
|
PyErr_SetString(PyExc_OverflowError,
|
|
|
|
"unsigned byte integer is less than minimum");
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
else if (ival > UCHAR_MAX) {{{{
|
|
|
|
PyErr_SetString(PyExc_OverflowError,
|
|
|
|
"unsigned byte integer is greater than maximum");
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
else {{{{
|
|
|
|
{paramname} = (unsigned char) ival;
|
|
|
|
}}}}
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2018-12-25 07:23:47 -04:00
|
|
|
elif self.format_unit == 'B':
|
|
|
|
return """
|
|
|
|
{{{{
|
2020-05-26 12:43:38 -03:00
|
|
|
unsigned long ival = PyLong_AsUnsignedLongMask({argname});
|
|
|
|
if (ival == (unsigned long)-1 && PyErr_Occurred()) {{{{
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
else {{{{
|
|
|
|
{paramname} = (unsigned char) ival;
|
|
|
|
}}}}
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
class byte_converter(unsigned_char_converter): pass
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class short_converter(CConverter):
|
|
|
|
type = 'short'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = int
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'h'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'h':
|
|
|
|
return """
|
|
|
|
{{{{
|
|
|
|
long ival = PyLong_AsLong({argname});
|
|
|
|
if (ival == -1 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
else if (ival < SHRT_MIN) {{{{
|
|
|
|
PyErr_SetString(PyExc_OverflowError,
|
|
|
|
"signed short integer is less than minimum");
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
else if (ival > SHRT_MAX) {{{{
|
|
|
|
PyErr_SetString(PyExc_OverflowError,
|
|
|
|
"signed short integer is greater than maximum");
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
else {{{{
|
|
|
|
{paramname} = (short) ival;
|
|
|
|
}}}}
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class unsigned_short_converter(CConverter):
|
|
|
|
type = 'unsigned short'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = int
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 19:32:11 -03:00
|
|
|
def converter_init(self, *, bitwise: bool = False) -> None:
|
2018-07-26 07:22:16 -03:00
|
|
|
if bitwise:
|
|
|
|
self.format_unit = 'H'
|
|
|
|
else:
|
|
|
|
self.converter = '_PyLong_UnsignedShort_Converter'
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2019-01-11 10:01:14 -04:00
|
|
|
if self.format_unit == 'H':
|
|
|
|
return """
|
|
|
|
{paramname} = (unsigned short)PyLong_AsUnsignedLongMask({argname});
|
|
|
|
if ({paramname} == (unsigned short)-1 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2019-01-11 10:01:14 -04:00
|
|
|
|
2015-05-04 10:59:46 -03:00
|
|
|
@add_legacy_c_converter('C', accept={str})
|
2013-10-19 04:09:25 -03:00
|
|
|
class int_converter(CConverter):
|
|
|
|
type = 'int'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = int
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'i'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-25 05:49:07 -03:00
|
|
|
def converter_init(
|
|
|
|
self, *, accept: TypeSet = {int}, type: str | None = None
|
|
|
|
) -> None:
|
2015-05-04 10:59:46 -03:00
|
|
|
if accept == {str}:
|
2013-11-23 18:54:00 -04:00
|
|
|
self.format_unit = 'C'
|
2015-05-04 10:59:46 -03:00
|
|
|
elif accept != {int}:
|
|
|
|
fail("int_converter: illegal 'accept' argument " + repr(accept))
|
2021-08-31 10:59:52 -03:00
|
|
|
if type is not None:
|
2014-10-13 06:39:41 -03:00
|
|
|
self.type = type
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'i':
|
|
|
|
return """
|
|
|
|
{paramname} = _PyLong_AsInt({argname});
|
|
|
|
if ({paramname} == -1 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2018-12-25 07:23:47 -04:00
|
|
|
elif self.format_unit == 'C':
|
|
|
|
return """
|
|
|
|
if (!PyUnicode_Check({argname})) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "a unicode character", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
if (PyUnicode_GET_LENGTH({argname}) != 1) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "a unicode character", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = PyUnicode_READ_CHAR({argname}, 0);
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class unsigned_int_converter(CConverter):
|
|
|
|
type = 'unsigned int'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = int
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 19:32:11 -03:00
|
|
|
def converter_init(self, *, bitwise: bool = False) -> None:
|
2018-07-26 07:22:16 -03:00
|
|
|
if bitwise:
|
|
|
|
self.format_unit = 'I'
|
|
|
|
else:
|
|
|
|
self.converter = '_PyLong_UnsignedInt_Converter'
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'I':
|
|
|
|
return """
|
|
|
|
{paramname} = (unsigned int)PyLong_AsUnsignedLongMask({argname});
|
|
|
|
if ({paramname} == (unsigned int)-1 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class long_converter(CConverter):
|
|
|
|
type = 'long'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = int
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'l'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'l':
|
|
|
|
return """
|
|
|
|
{paramname} = PyLong_AsLong({argname});
|
|
|
|
if ({paramname} == -1 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class unsigned_long_converter(CConverter):
|
|
|
|
type = 'unsigned long'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = int
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 19:32:11 -03:00
|
|
|
def converter_init(self, *, bitwise: bool = False) -> None:
|
2018-07-26 07:22:16 -03:00
|
|
|
if bitwise:
|
|
|
|
self.format_unit = 'k'
|
|
|
|
else:
|
|
|
|
self.converter = '_PyLong_UnsignedLong_Converter'
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'k':
|
|
|
|
return """
|
|
|
|
if (!PyLong_Check({argname})) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "int", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = PyLong_AsUnsignedLongMask({argname});
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2016-09-08 13:29:11 -03:00
|
|
|
class long_long_converter(CConverter):
|
|
|
|
type = 'long long'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = int
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'L'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'L':
|
|
|
|
return """
|
|
|
|
{paramname} = PyLong_AsLongLong({argname});
|
2019-10-21 03:49:48 -03:00
|
|
|
if ({paramname} == -1 && PyErr_Occurred()) {{{{
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2016-09-08 13:29:11 -03:00
|
|
|
class unsigned_long_long_converter(CConverter):
|
|
|
|
type = 'unsigned long long'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = int
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-16 19:32:11 -03:00
|
|
|
def converter_init(self, *, bitwise: bool = False) -> None:
|
2018-07-26 07:22:16 -03:00
|
|
|
if bitwise:
|
|
|
|
self.format_unit = 'K'
|
|
|
|
else:
|
|
|
|
self.converter = '_PyLong_UnsignedLongLong_Converter'
|
2017-03-30 03:15:31 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'K':
|
|
|
|
return """
|
|
|
|
if (!PyLong_Check({argname})) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "int", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = PyLong_AsUnsignedLongLongMask({argname});
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class Py_ssize_t_converter(CConverter):
|
|
|
|
type = 'Py_ssize_t'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-17 18:05:22 -03:00
|
|
|
def converter_init(self, *, accept: TypeSet = {int}) -> None:
|
2017-03-30 03:15:31 -03:00
|
|
|
if accept == {int}:
|
|
|
|
self.format_unit = 'n'
|
|
|
|
self.default_type = int
|
|
|
|
elif accept == {int, NoneType}:
|
|
|
|
self.converter = '_Py_convert_optional_to_ssize_t'
|
|
|
|
else:
|
|
|
|
fail("Py_ssize_t_converter: illegal 'accept' argument " + repr(accept))
|
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'n':
|
|
|
|
return """
|
|
|
|
{{{{
|
|
|
|
Py_ssize_t ival = -1;
|
2020-05-28 04:33:45 -03:00
|
|
|
PyObject *iobj = _PyNumber_Index({argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
if (iobj != NULL) {{{{
|
|
|
|
ival = PyLong_AsSsize_t(iobj);
|
|
|
|
Py_DECREF(iobj);
|
|
|
|
}}}}
|
|
|
|
if (ival == -1 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = ival;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2017-03-19 14:37:40 -03:00
|
|
|
class slice_index_converter(CConverter):
|
|
|
|
type = 'Py_ssize_t'
|
|
|
|
|
2023-05-17 18:05:22 -03:00
|
|
|
def converter_init(self, *, accept: TypeSet = {int, NoneType}) -> None:
|
2017-03-19 14:37:40 -03:00
|
|
|
if accept == {int}:
|
2017-03-30 12:29:23 -03:00
|
|
|
self.converter = '_PyEval_SliceIndexNotNone'
|
2017-03-19 14:37:40 -03:00
|
|
|
elif accept == {int, NoneType}:
|
2017-03-30 12:29:23 -03:00
|
|
|
self.converter = '_PyEval_SliceIndex'
|
2017-03-19 14:37:40 -03:00
|
|
|
else:
|
|
|
|
fail("slice_index_converter: illegal 'accept' argument " + repr(accept))
|
|
|
|
|
2018-07-26 07:22:16 -03:00
|
|
|
class size_t_converter(CConverter):
|
|
|
|
type = 'size_t'
|
|
|
|
converter = '_PyLong_Size_t_Converter'
|
|
|
|
c_ignored_default = "0"
|
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'n':
|
|
|
|
return """
|
|
|
|
{paramname} = PyNumber_AsSsize_t({argname}, PyExc_OverflowError);
|
|
|
|
if ({paramname} == -1 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2017-03-19 14:37:40 -03:00
|
|
|
|
2020-10-09 17:00:45 -03:00
|
|
|
class fildes_converter(CConverter):
|
|
|
|
type = 'int'
|
|
|
|
converter = '_PyLong_FileDescriptor_Converter'
|
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def _parse_arg(self, argname: str, displayname: str) -> str | None:
|
2020-10-09 17:00:45 -03:00
|
|
|
return """
|
|
|
|
{paramname} = PyObject_AsFileDescriptor({argname});
|
|
|
|
if ({paramname} == -1) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
""".format(argname=argname, paramname=self.name)
|
|
|
|
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class float_converter(CConverter):
|
|
|
|
type = 'float'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = float
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'f'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0.0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'f':
|
|
|
|
return """
|
2019-08-24 23:10:39 -03:00
|
|
|
if (PyFloat_CheckExact({argname})) {{{{
|
|
|
|
{paramname} = (float) (PyFloat_AS_DOUBLE({argname}));
|
|
|
|
}}}}
|
|
|
|
else
|
|
|
|
{{{{
|
|
|
|
{paramname} = (float) PyFloat_AsDouble({argname});
|
|
|
|
if ({paramname} == -1.0 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2018-12-25 07:23:47 -04:00
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class double_converter(CConverter):
|
|
|
|
type = 'double'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = float
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'd'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "0.0"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'd':
|
|
|
|
return """
|
2019-08-24 23:10:39 -03:00
|
|
|
if (PyFloat_CheckExact({argname})) {{{{
|
|
|
|
{paramname} = PyFloat_AS_DOUBLE({argname});
|
|
|
|
}}}}
|
|
|
|
else
|
|
|
|
{{{{
|
|
|
|
{paramname} = PyFloat_AsDouble({argname});
|
|
|
|
if ({paramname} == -1.0 && PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2018-12-25 07:23:47 -04:00
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
class Py_complex_converter(CConverter):
|
|
|
|
type = 'Py_complex'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = complex
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'D'
|
2013-11-20 13:13:52 -04:00
|
|
|
c_ignored_default = "{0.0, 0.0}"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'D':
|
|
|
|
return """
|
|
|
|
{paramname} = PyComplex_AsCComplex({argname});
|
|
|
|
if (PyErr_Occurred()) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name)
|
2019-08-29 11:49:08 -03:00
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
class object_converter(CConverter):
|
|
|
|
type = 'PyObject *'
|
|
|
|
format_unit = 'O'
|
|
|
|
|
2023-05-17 18:05:22 -03:00
|
|
|
def converter_init(
|
|
|
|
self, *,
|
2023-07-22 19:30:42 -03:00
|
|
|
converter: str | None = None,
|
|
|
|
type: str | None = None,
|
|
|
|
subclass_of: str | None = None
|
2023-05-17 18:05:22 -03:00
|
|
|
) -> None:
|
2014-01-12 15:09:57 -04:00
|
|
|
if converter:
|
|
|
|
if subclass_of:
|
|
|
|
fail("object: Cannot pass in both 'converter' and 'subclass_of'")
|
|
|
|
self.format_unit = 'O&'
|
|
|
|
self.converter = converter
|
|
|
|
elif subclass_of:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.format_unit = 'O!'
|
2014-01-07 16:13:13 -04:00
|
|
|
self.subclass_of = subclass_of
|
2014-01-12 15:09:57 -04:00
|
|
|
|
2014-01-07 16:13:13 -04:00
|
|
|
if type is not None:
|
|
|
|
self.type = type
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2015-04-16 00:02:12 -03:00
|
|
|
#
|
2015-05-04 10:59:46 -03:00
|
|
|
# We define three conventions for buffer types in the 'accept' argument:
|
|
|
|
#
|
|
|
|
# buffer : any object supporting the buffer interface
|
|
|
|
# rwbuffer: any object supporting the buffer interface, but must be writeable
|
|
|
|
# robuffer: any object supporting the buffer interface, but must not be writeable
|
2015-04-16 00:02:12 -03:00
|
|
|
#
|
|
|
|
|
2015-05-04 10:59:46 -03:00
|
|
|
class buffer: pass
|
|
|
|
class rwbuffer: pass
|
|
|
|
class robuffer: pass
|
|
|
|
|
2023-07-26 17:12:18 -03:00
|
|
|
StrConverterKeyType = tuple[frozenset[type], bool, bool]
|
|
|
|
|
|
|
|
def str_converter_key(
|
|
|
|
types: TypeSet, encoding: bool | str | None, zeroes: bool
|
|
|
|
) -> StrConverterKeyType:
|
2015-05-08 03:30:09 -03:00
|
|
|
return (frozenset(types), bool(encoding), bool(zeroes))
|
|
|
|
|
2023-07-26 17:12:18 -03:00
|
|
|
str_converter_argument_map: dict[StrConverterKeyType, str] = {}
|
2015-05-08 03:30:09 -03:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class str_converter(CConverter):
|
|
|
|
type = 'const char *'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = (str, Null, NoneType)
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 's'
|
|
|
|
|
2023-05-17 18:05:22 -03:00
|
|
|
def converter_init(
|
|
|
|
self,
|
|
|
|
*,
|
|
|
|
accept: TypeSet = {str},
|
|
|
|
encoding: str | None = None,
|
|
|
|
zeroes: bool = False
|
|
|
|
) -> None:
|
2015-04-16 00:02:12 -03:00
|
|
|
|
2015-05-08 03:30:09 -03:00
|
|
|
key = str_converter_key(accept, encoding, zeroes)
|
|
|
|
format_unit = str_converter_argument_map.get(key)
|
|
|
|
if not format_unit:
|
|
|
|
fail("str_converter: illegal combination of arguments", key)
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2015-05-08 03:30:09 -03:00
|
|
|
self.format_unit = format_unit
|
|
|
|
self.length = bool(zeroes)
|
2013-11-23 18:54:00 -04:00
|
|
|
if encoding:
|
2015-05-08 03:30:09 -03:00
|
|
|
if self.default not in (Null, None, unspecified):
|
|
|
|
fail("str_converter: Argument Clinic doesn't support default values for encoded strings")
|
2013-11-23 18:54:00 -04:00
|
|
|
self.encoding = encoding
|
2015-05-08 03:30:09 -03:00
|
|
|
self.type = 'char *'
|
|
|
|
# sorry, clinic can't support preallocated buffers
|
|
|
|
# for es# and et#
|
|
|
|
self.c_default = "NULL"
|
2019-09-14 06:24:05 -03:00
|
|
|
if NoneType in accept and self.c_default == "Py_None":
|
|
|
|
self.c_default = "NULL"
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2023-07-25 19:33:03 -03:00
|
|
|
def post_parsing(self) -> str:
|
2015-05-08 03:30:09 -03:00
|
|
|
if self.encoding:
|
2018-12-25 07:23:47 -04:00
|
|
|
name = self.name
|
2022-11-24 10:01:26 -04:00
|
|
|
return f"PyMem_FREE({name});\n"
|
2023-07-25 19:33:03 -03:00
|
|
|
else:
|
|
|
|
return ""
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 's':
|
|
|
|
return """
|
|
|
|
if (!PyUnicode_Check({argname})) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "str", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
Py_ssize_t {paramname}_length;
|
|
|
|
{paramname} = PyUnicode_AsUTF8AndSize({argname}, &{paramname}_length);
|
|
|
|
if ({paramname} == NULL) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
if (strlen({paramname}) != (size_t){paramname}_length) {{{{
|
|
|
|
PyErr_SetString(PyExc_ValueError, "embedded null character");
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
2019-01-11 10:01:14 -04:00
|
|
|
if self.format_unit == 'z':
|
|
|
|
return """
|
|
|
|
if ({argname} == Py_None) {{{{
|
|
|
|
{paramname} = NULL;
|
|
|
|
}}}}
|
|
|
|
else if (PyUnicode_Check({argname})) {{{{
|
|
|
|
Py_ssize_t {paramname}_length;
|
|
|
|
{paramname} = PyUnicode_AsUTF8AndSize({argname}, &{paramname}_length);
|
|
|
|
if ({paramname} == NULL) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
if (strlen({paramname}) != (size_t){paramname}_length) {{{{
|
|
|
|
PyErr_SetString(PyExc_ValueError, "embedded null character");
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
}}}}
|
|
|
|
else {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "str or None", {argname});
|
2019-01-11 10:01:14 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2015-05-08 03:30:09 -03:00
|
|
|
#
|
|
|
|
# This is the fourth or fifth rewrite of registering all the
|
2018-09-14 01:17:40 -03:00
|
|
|
# string converter format units. Previous approaches hid
|
2015-05-08 03:30:09 -03:00
|
|
|
# bugs--generally mismatches between the semantics of the format
|
|
|
|
# unit and the arguments necessary to represent those semantics
|
|
|
|
# properly. Hopefully with this approach we'll get it 100% right.
|
|
|
|
#
|
|
|
|
# The r() function (short for "register") both registers the
|
|
|
|
# mapping from arguments to format unit *and* registers the
|
|
|
|
# legacy C converter for that format unit.
|
|
|
|
#
|
2023-05-17 18:05:22 -03:00
|
|
|
ConverterKeywordDict = dict[str, TypeSet | bool]
|
|
|
|
|
|
|
|
def r(format_unit: str,
|
|
|
|
*,
|
|
|
|
accept: TypeSet,
|
|
|
|
encoding: bool = False,
|
|
|
|
zeroes: bool = False
|
|
|
|
) -> None:
|
2015-05-08 03:30:09 -03:00
|
|
|
if not encoding and format_unit != 's':
|
|
|
|
# add the legacy c converters here too.
|
|
|
|
#
|
|
|
|
# note: add_legacy_c_converter can't work for
|
|
|
|
# es, es#, et, or et#
|
|
|
|
# because of their extra encoding argument
|
|
|
|
#
|
|
|
|
# also don't add the converter for 's' because
|
|
|
|
# the metaclass for CConverter adds it for us.
|
2023-05-17 18:05:22 -03:00
|
|
|
kwargs: ConverterKeywordDict = {}
|
2015-05-08 03:30:09 -03:00
|
|
|
if accept != {str}:
|
|
|
|
kwargs['accept'] = accept
|
|
|
|
if zeroes:
|
|
|
|
kwargs['zeroes'] = True
|
|
|
|
added_f = functools.partial(str_converter, **kwargs)
|
|
|
|
legacy_converters[format_unit] = added_f
|
|
|
|
|
|
|
|
d = str_converter_argument_map
|
|
|
|
key = str_converter_key(accept, encoding, zeroes)
|
|
|
|
if key in d:
|
|
|
|
sys.exit("Duplicate keys specified for str_converter_argument_map!")
|
|
|
|
d[key] = format_unit
|
|
|
|
|
|
|
|
r('es', encoding=True, accept={str})
|
|
|
|
r('es#', encoding=True, zeroes=True, accept={str})
|
|
|
|
r('et', encoding=True, accept={bytes, bytearray, str})
|
|
|
|
r('et#', encoding=True, zeroes=True, accept={bytes, bytearray, str})
|
|
|
|
r('s', accept={str})
|
|
|
|
r('s#', zeroes=True, accept={robuffer, str})
|
|
|
|
r('y', accept={robuffer})
|
|
|
|
r('y#', zeroes=True, accept={robuffer})
|
|
|
|
r('z', accept={str, NoneType})
|
|
|
|
r('z#', zeroes=True, accept={robuffer, str, NoneType})
|
|
|
|
del r
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
|
|
|
class PyBytesObject_converter(CConverter):
|
|
|
|
type = 'PyBytesObject *'
|
|
|
|
format_unit = 'S'
|
2015-05-04 10:59:46 -03:00
|
|
|
# accept = {bytes}
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'S':
|
|
|
|
return """
|
|
|
|
if (!PyBytes_Check({argname})) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "bytes", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = ({type}){argname};
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
type=self.type, displayname=displayname)
|
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class PyByteArrayObject_converter(CConverter):
|
|
|
|
type = 'PyByteArrayObject *'
|
|
|
|
format_unit = 'Y'
|
2015-05-04 10:59:46 -03:00
|
|
|
# accept = {bytearray}
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'Y':
|
|
|
|
return """
|
|
|
|
if (!PyByteArray_Check({argname})) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "bytearray", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = ({type}){argname};
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
type=self.type, displayname=displayname)
|
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class unicode_converter(CConverter):
|
|
|
|
type = 'PyObject *'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = (str, Null, NoneType)
|
2013-10-19 04:09:25 -03:00
|
|
|
format_unit = 'U'
|
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'U':
|
|
|
|
return """
|
|
|
|
if (!PyUnicode_Check({argname})) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "str", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = {argname};
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2020-06-30 03:03:15 -03:00
|
|
|
@add_legacy_c_converter('u')
|
2015-05-08 03:30:09 -03:00
|
|
|
@add_legacy_c_converter('u#', zeroes=True)
|
2015-05-04 10:59:46 -03:00
|
|
|
@add_legacy_c_converter('Z', accept={str, NoneType})
|
2015-05-08 03:30:09 -03:00
|
|
|
@add_legacy_c_converter('Z#', accept={str, NoneType}, zeroes=True)
|
2013-10-19 04:09:25 -03:00
|
|
|
class Py_UNICODE_converter(CConverter):
|
2023-05-31 14:52:33 -03:00
|
|
|
type = 'const wchar_t *'
|
2014-01-12 15:09:57 -04:00
|
|
|
default_type = (str, Null, NoneType)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-17 18:05:22 -03:00
|
|
|
def converter_init(
|
|
|
|
self, *,
|
|
|
|
accept: TypeSet = {str},
|
|
|
|
zeroes: bool = False
|
|
|
|
) -> None:
|
2015-05-04 10:59:46 -03:00
|
|
|
format_unit = 'Z' if accept=={str, NoneType} else 'u'
|
2015-05-08 03:30:09 -03:00
|
|
|
if zeroes:
|
2013-11-23 18:54:00 -04:00
|
|
|
format_unit += '#'
|
|
|
|
self.length = True
|
2020-06-30 03:03:15 -03:00
|
|
|
self.format_unit = format_unit
|
|
|
|
else:
|
|
|
|
self.accept = accept
|
|
|
|
if accept == {str}:
|
|
|
|
self.converter = '_PyUnicode_WideCharString_Converter'
|
|
|
|
elif accept == {str, NoneType}:
|
|
|
|
self.converter = '_PyUnicode_WideCharString_Opt_Converter'
|
|
|
|
else:
|
|
|
|
fail("Py_UNICODE_converter: illegal 'accept' argument " + repr(accept))
|
2022-10-03 04:42:54 -03:00
|
|
|
self.c_default = "NULL"
|
2020-06-30 03:03:15 -03:00
|
|
|
|
2023-07-25 19:33:03 -03:00
|
|
|
def cleanup(self) -> str:
|
|
|
|
if self.length:
|
|
|
|
return ""
|
|
|
|
else:
|
2020-06-30 03:03:15 -03:00
|
|
|
return """\
|
|
|
|
PyMem_Free((void *){name});
|
|
|
|
""".format(name=self.name)
|
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, argnum: str) -> str | None:
|
2020-06-30 03:03:15 -03:00
|
|
|
if not self.length:
|
|
|
|
if self.accept == {str}:
|
|
|
|
return """
|
|
|
|
if (!PyUnicode_Check({argname})) {{{{
|
|
|
|
_PyArg_BadArgument("{{name}}", {argnum}, "str", {argname});
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
{paramname} = PyUnicode_AsWideCharString({argname}, NULL);
|
|
|
|
if ({paramname} == NULL) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
""".format(argname=argname, paramname=self.name, argnum=argnum)
|
|
|
|
elif self.accept == {str, NoneType}:
|
|
|
|
return """
|
|
|
|
if ({argname} == Py_None) {{{{
|
|
|
|
{paramname} = NULL;
|
|
|
|
}}}}
|
|
|
|
else if (PyUnicode_Check({argname})) {{{{
|
|
|
|
{paramname} = PyUnicode_AsWideCharString({argname}, NULL);
|
|
|
|
if ({paramname} == NULL) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
}}}}
|
|
|
|
else {{{{
|
|
|
|
_PyArg_BadArgument("{{name}}", {argnum}, "str or None", {argname});
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
""".format(argname=argname, paramname=self.name, argnum=argnum)
|
|
|
|
return super().parse_arg(argname, argnum)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2015-05-04 10:59:46 -03:00
|
|
|
@add_legacy_c_converter('s*', accept={str, buffer})
|
|
|
|
@add_legacy_c_converter('z*', accept={str, buffer, NoneType})
|
|
|
|
@add_legacy_c_converter('w*', accept={rwbuffer})
|
2013-10-19 04:09:25 -03:00
|
|
|
class Py_buffer_converter(CConverter):
|
|
|
|
type = 'Py_buffer'
|
|
|
|
format_unit = 'y*'
|
|
|
|
impl_by_reference = True
|
2014-01-12 15:09:57 -04:00
|
|
|
c_ignored_default = "{NULL, NULL}"
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-17 18:05:22 -03:00
|
|
|
def converter_init(self, *, accept: TypeSet = {buffer}) -> None:
|
2014-01-12 15:09:57 -04:00
|
|
|
if self.default not in (unspecified, None):
|
|
|
|
fail("The only legal default value for Py_buffer is None.")
|
2015-05-04 10:59:46 -03:00
|
|
|
|
2014-01-06 14:34:00 -04:00
|
|
|
self.c_default = self.c_ignored_default
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2015-05-04 10:59:46 -03:00
|
|
|
if accept == {str, buffer, NoneType}:
|
|
|
|
format_unit = 'z*'
|
|
|
|
elif accept == {str, buffer}:
|
|
|
|
format_unit = 's*'
|
|
|
|
elif accept == {buffer}:
|
|
|
|
format_unit = 'y*'
|
|
|
|
elif accept == {rwbuffer}:
|
|
|
|
format_unit = 'w*'
|
2013-10-19 04:09:25 -03:00
|
|
|
else:
|
2013-11-23 18:54:00 -04:00
|
|
|
fail("Py_buffer_converter: illegal combination of arguments")
|
|
|
|
|
|
|
|
self.format_unit = format_unit
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-24 18:23:58 -03:00
|
|
|
def cleanup(self) -> str:
|
2018-12-25 07:23:47 -04:00
|
|
|
name = self.name
|
2016-06-09 10:16:06 -03:00
|
|
|
return "".join(["if (", name, ".obj) {\n PyBuffer_Release(&", name, ");\n}\n"])
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2023-07-26 07:51:24 -03:00
|
|
|
def parse_arg(self, argname: str, displayname: str) -> str | None:
|
2018-12-25 07:23:47 -04:00
|
|
|
if self.format_unit == 'y*':
|
|
|
|
return """
|
|
|
|
if (PyObject_GetBuffer({argname}, &{paramname}, PyBUF_SIMPLE) != 0) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
if (!PyBuffer_IsContiguous(&{paramname}, 'C')) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "contiguous buffer", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
elif self.format_unit == 's*':
|
|
|
|
return """
|
|
|
|
if (PyUnicode_Check({argname})) {{{{
|
|
|
|
Py_ssize_t len;
|
|
|
|
const char *ptr = PyUnicode_AsUTF8AndSize({argname}, &len);
|
|
|
|
if (ptr == NULL) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
PyBuffer_FillInfo(&{paramname}, {argname}, (void *)ptr, len, 1, 0);
|
|
|
|
}}}}
|
|
|
|
else {{{{ /* any bytes-like object */
|
|
|
|
if (PyObject_GetBuffer({argname}, &{paramname}, PyBUF_SIMPLE) != 0) {{{{
|
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
if (!PyBuffer_IsContiguous(&{paramname}, 'C')) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "contiguous buffer", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
elif self.format_unit == 'w*':
|
|
|
|
return """
|
|
|
|
if (PyObject_GetBuffer({argname}, &{paramname}, PyBUF_WRITABLE) < 0) {{{{
|
|
|
|
PyErr_Clear();
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "read-write bytes-like object", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
|
|
|
if (!PyBuffer_IsContiguous(&{paramname}, 'C')) {{{{
|
2019-08-29 11:49:08 -03:00
|
|
|
_PyArg_BadArgument("{{name}}", {displayname}, "contiguous buffer", {argname});
|
2018-12-25 07:23:47 -04:00
|
|
|
goto exit;
|
|
|
|
}}}}
|
2021-07-16 12:43:02 -03:00
|
|
|
""".format(argname=argname, paramname=self.parser_name,
|
2019-08-29 11:49:08 -03:00
|
|
|
displayname=displayname)
|
|
|
|
return super().parse_arg(argname, displayname)
|
2018-12-25 07:23:47 -04:00
|
|
|
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
def correct_name_for_self(
|
|
|
|
f: Function
|
|
|
|
) -> tuple[str, str]:
|
2014-01-24 10:17:25 -04:00
|
|
|
if f.kind in (CALLABLE, METHOD_INIT):
|
|
|
|
if f.cls:
|
|
|
|
return "PyObject *", "self"
|
2016-07-07 11:35:15 -03:00
|
|
|
return "PyObject *", "module"
|
2023-07-13 19:54:05 -03:00
|
|
|
if f.kind is STATIC_METHOD:
|
2014-01-24 10:17:25 -04:00
|
|
|
return "void *", "null"
|
|
|
|
if f.kind in (CLASS_METHOD, METHOD_NEW):
|
|
|
|
return "PyTypeObject *", "type"
|
|
|
|
raise RuntimeError("Unhandled type of function f: " + repr(f.kind))
|
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
def required_type_for_self_for_parser(
|
|
|
|
f: Function
|
|
|
|
) -> str | None:
|
2014-01-26 00:43:29 -04:00
|
|
|
type, _ = correct_name_for_self(f)
|
|
|
|
if f.kind in (METHOD_INIT, METHOD_NEW, STATIC_METHOD, CLASS_METHOD):
|
|
|
|
return type
|
|
|
|
return None
|
|
|
|
|
2014-01-24 10:17:25 -04:00
|
|
|
|
2013-11-23 18:54:00 -04:00
|
|
|
class self_converter(CConverter):
|
|
|
|
"""
|
|
|
|
A special-case converter:
|
|
|
|
this is the default converter used for "self".
|
|
|
|
"""
|
2023-07-25 05:49:07 -03:00
|
|
|
type: str | None = None
|
2014-01-24 10:17:25 -04:00
|
|
|
format_unit = ''
|
|
|
|
|
2023-07-22 19:30:42 -03:00
|
|
|
def converter_init(self, *, type: str | None = None) -> None:
|
2014-02-01 02:03:12 -04:00
|
|
|
self.specified_type = type
|
|
|
|
|
2023-07-25 05:49:07 -03:00
|
|
|
def pre_render(self) -> None:
|
2013-11-23 18:54:00 -04:00
|
|
|
f = self.function
|
2014-01-24 10:17:25 -04:00
|
|
|
default_type, default_name = correct_name_for_self(f)
|
|
|
|
self.signature_name = default_name
|
2014-02-01 02:03:12 -04:00
|
|
|
self.type = self.specified_type or self.type or default_type
|
2014-01-24 10:17:25 -04:00
|
|
|
|
|
|
|
kind = self.function.kind
|
|
|
|
|
2023-07-13 19:54:05 -03:00
|
|
|
if kind is STATIC_METHOD or kind.new_or_init:
|
2014-01-24 10:17:25 -04:00
|
|
|
self.show_in_signature = False
|
|
|
|
|
|
|
|
# tp_new (METHOD_NEW) functions are of type newfunc:
|
2022-02-24 12:51:59 -04:00
|
|
|
# typedef PyObject *(*newfunc)(PyTypeObject *, PyObject *, PyObject *);
|
2014-01-24 10:17:25 -04:00
|
|
|
#
|
|
|
|
# tp_init (METHOD_INIT) functions are of type initproc:
|
|
|
|
# typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
|
|
|
|
#
|
|
|
|
# All other functions generated by Argument Clinic are stored in
|
|
|
|
# PyMethodDef structures, in the ml_meth slot, which is of type PyCFunction:
|
|
|
|
# typedef PyObject *(*PyCFunction)(PyObject *, PyObject *);
|
|
|
|
# However! We habitually cast these functions to PyCFunction,
|
|
|
|
# since functions that accept keyword arguments don't fit this signature
|
|
|
|
# but are stored there anyway. So strict type equality isn't important
|
|
|
|
# for these functions.
|
|
|
|
#
|
|
|
|
# So:
|
|
|
|
#
|
|
|
|
# * The name of the first parameter to the impl and the parsing function will always
|
|
|
|
# be self.name.
|
|
|
|
#
|
|
|
|
# * The type of the first parameter to the impl will always be of self.type.
|
|
|
|
#
|
|
|
|
# * If the function is neither tp_new (METHOD_NEW) nor tp_init (METHOD_INIT):
|
|
|
|
# * The type of the first parameter to the parsing function is also self.type.
|
|
|
|
# This means that if you step into the parsing function, your "self" parameter
|
|
|
|
# is of the correct type, which may make debugging more pleasant.
|
|
|
|
#
|
|
|
|
# * Else if the function is tp_new (METHOD_NEW):
|
|
|
|
# * The type of the first parameter to the parsing function is "PyTypeObject *",
|
|
|
|
# so the type signature of the function call is an exact match.
|
|
|
|
# * If self.type != "PyTypeObject *", we cast the first parameter to self.type
|
|
|
|
# in the impl call.
|
|
|
|
#
|
|
|
|
# * Else if the function is tp_init (METHOD_INIT):
|
|
|
|
# * The type of the first parameter to the parsing function is "PyObject *",
|
|
|
|
# so the type signature of the function call is an exact match.
|
|
|
|
# * If self.type != "PyObject *", we cast the first parameter to self.type
|
|
|
|
# in the impl call.
|
|
|
|
|
|
|
|
@property
|
2023-07-25 05:49:07 -03:00
|
|
|
def parser_type(self) -> str:
|
|
|
|
assert self.type is not None
|
2014-01-26 00:43:29 -04:00
|
|
|
return required_type_for_self_for_parser(self.function) or self.type
|
2014-01-04 16:44:57 -04:00
|
|
|
|
2023-07-25 18:08:52 -03:00
|
|
|
def render(self, parameter: Parameter, data: CRenderData) -> None:
|
2014-01-24 10:17:25 -04:00
|
|
|
"""
|
|
|
|
parameter is a clinic.Parameter instance.
|
|
|
|
data is a CRenderData instance.
|
|
|
|
"""
|
2023-07-13 19:54:05 -03:00
|
|
|
if self.function.kind is STATIC_METHOD:
|
2014-01-24 10:17:25 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
self._render_self(parameter, data)
|
|
|
|
|
|
|
|
if self.type != self.parser_type:
|
|
|
|
# insert cast to impl_argument[0], aka self.
|
|
|
|
# we know we're in the first slot in all the CRenderData lists,
|
|
|
|
# because we render parameters in order, and self is always first.
|
|
|
|
assert len(data.impl_arguments) == 1
|
|
|
|
assert data.impl_arguments[0] == self.name
|
2023-07-25 18:08:52 -03:00
|
|
|
assert self.type is not None
|
2014-01-24 10:17:25 -04:00
|
|
|
data.impl_arguments[0] = '(' + self.type + ")" + data.impl_arguments[0]
|
|
|
|
|
2023-07-22 19:30:42 -03:00
|
|
|
def set_template_dict(self, template_dict: TemplateDict) -> None:
|
2014-01-24 10:17:25 -04:00
|
|
|
template_dict['self_name'] = self.name
|
|
|
|
template_dict['self_type'] = self.parser_type
|
2014-01-26 02:01:12 -04:00
|
|
|
kind = self.function.kind
|
|
|
|
cls = self.function.cls
|
|
|
|
|
2023-07-13 19:54:05 -03:00
|
|
|
if kind.new_or_init and cls and cls.typedef:
|
|
|
|
if kind is METHOD_NEW:
|
2023-01-31 16:42:03 -04:00
|
|
|
type_check = (
|
|
|
|
'({0} == base_tp || {0}->tp_init == base_tp->tp_init)'
|
|
|
|
).format(self.name)
|
2014-01-26 02:01:12 -04:00
|
|
|
else:
|
2023-01-31 16:42:03 -04:00
|
|
|
type_check = ('(Py_IS_TYPE({0}, base_tp) ||\n '
|
|
|
|
' Py_TYPE({0})->tp_new == base_tp->tp_new)'
|
|
|
|
).format(self.name)
|
2020-02-14 03:50:19 -04:00
|
|
|
|
2023-05-20 17:16:49 -03:00
|
|
|
line = f'{type_check} &&\n '
|
2020-02-14 03:50:19 -04:00
|
|
|
template_dict['self_type_check'] = line
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2023-07-22 19:30:42 -03:00
|
|
|
type_object = cls.type_object
|
2023-01-31 16:42:03 -04:00
|
|
|
type_ptr = f'PyTypeObject *base_tp = {type_object};'
|
|
|
|
template_dict['base_type_ptr'] = type_ptr
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def add_c_return_converter(
|
|
|
|
f: ReturnConverterType,
|
|
|
|
name: str | None = None
|
|
|
|
) -> ReturnConverterType:
|
2013-10-19 04:09:25 -03:00
|
|
|
if not name:
|
|
|
|
name = f.__name__
|
|
|
|
if not name.endswith('_return_converter'):
|
|
|
|
return f
|
2023-05-20 08:08:28 -03:00
|
|
|
name = name.removesuffix('_return_converter')
|
2013-10-19 04:09:25 -03:00
|
|
|
return_converters[name] = f
|
|
|
|
return f
|
|
|
|
|
|
|
|
|
|
|
|
class CReturnConverterAutoRegister(type):
|
2023-05-21 17:23:14 -03:00
|
|
|
def __init__(
|
|
|
|
cls: ReturnConverterType,
|
|
|
|
name: str,
|
|
|
|
bases: tuple[type, ...],
|
|
|
|
classdict: dict[str, Any]
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
add_c_return_converter(cls)
|
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class CReturnConverter(metaclass=CReturnConverterAutoRegister):
|
|
|
|
|
2014-01-04 16:44:57 -04:00
|
|
|
# The C type to use for this variable.
|
|
|
|
# 'type' should be a Python string specifying the type, e.g. "int".
|
|
|
|
# If this is a pointer type, the type string should end with ' *'.
|
2013-10-19 04:09:25 -03:00
|
|
|
type = 'PyObject *'
|
2014-01-04 16:44:57 -04:00
|
|
|
|
|
|
|
# The Python default value for this parameter, as a Python value.
|
|
|
|
# Or the magic value "unspecified" if there is no default.
|
2023-05-18 18:58:42 -03:00
|
|
|
default: object = None
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
*,
|
|
|
|
py_default: str | None = None,
|
2023-07-24 18:23:58 -03:00
|
|
|
**kwargs: Any
|
2023-05-21 17:23:14 -03:00
|
|
|
) -> None:
|
2014-01-16 15:32:01 -04:00
|
|
|
self.py_default = py_default
|
2013-10-19 04:09:25 -03:00
|
|
|
try:
|
|
|
|
self.return_converter_init(**kwargs)
|
|
|
|
except TypeError as e:
|
|
|
|
s = ', '.join(name + '=' + repr(value) for name, value in kwargs.items())
|
|
|
|
sys.exit(self.__class__.__name__ + '(' + s + ')\n' + str(e))
|
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def return_converter_init(self) -> None: ...
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def declare(self, data: CRenderData) -> None:
|
|
|
|
line: list[str] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
add = line.append
|
|
|
|
add(self.type)
|
|
|
|
if not self.type.endswith('*'):
|
|
|
|
add(' ')
|
2023-05-07 18:55:37 -03:00
|
|
|
add(data.converter_retval + ';')
|
2013-10-19 04:09:25 -03:00
|
|
|
data.declarations.append(''.join(line))
|
2023-05-07 18:55:37 -03:00
|
|
|
data.return_value = data.converter_retval
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def err_occurred_if(
|
|
|
|
self,
|
|
|
|
expr: str,
|
|
|
|
data: CRenderData
|
|
|
|
) -> None:
|
2023-05-20 17:16:49 -03:00
|
|
|
line = f'if (({expr}) && PyErr_Occurred()) {{\n goto exit;\n}}\n'
|
|
|
|
data.return_conversion.append(line)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def err_occurred_if_null_pointer(
|
|
|
|
self,
|
|
|
|
variable: str,
|
|
|
|
data: CRenderData
|
|
|
|
) -> None:
|
2023-05-20 17:16:49 -03:00
|
|
|
line = f'if ({variable} == NULL) {{\n goto exit;\n}}\n'
|
|
|
|
data.return_conversion.append(line)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def render(
|
|
|
|
self,
|
|
|
|
function: Function,
|
|
|
|
data: CRenderData
|
|
|
|
) -> None: ...
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
add_c_return_converter(CReturnConverter, 'object')
|
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
|
2014-01-12 15:09:57 -04:00
|
|
|
class bool_return_converter(CReturnConverter):
|
2013-10-19 04:09:25 -03:00
|
|
|
type = 'int'
|
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def render(
|
|
|
|
self,
|
|
|
|
function: Function,
|
|
|
|
data: CRenderData
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.declare(data)
|
2023-05-07 18:55:37 -03:00
|
|
|
self.err_occurred_if(f"{data.converter_retval} == -1", data)
|
|
|
|
data.return_conversion.append(
|
|
|
|
f'return_value = PyBool_FromLong((long){data.converter_retval});\n'
|
|
|
|
)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class long_return_converter(CReturnConverter):
|
|
|
|
type = 'long'
|
2014-01-12 15:09:57 -04:00
|
|
|
conversion_fn = 'PyLong_FromLong'
|
|
|
|
cast = ''
|
2014-08-05 06:55:21 -03:00
|
|
|
unsigned_cast = ''
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def render(
|
|
|
|
self,
|
|
|
|
function: Function,
|
|
|
|
data: CRenderData
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.declare(data)
|
2023-05-07 18:55:37 -03:00
|
|
|
self.err_occurred_if(f"{data.converter_retval} == {self.unsigned_cast}-1", data)
|
2013-10-19 04:09:25 -03:00
|
|
|
data.return_conversion.append(
|
2023-05-07 18:55:37 -03:00
|
|
|
f'return_value = {self.conversion_fn}({self.cast}{data.converter_retval});\n'
|
|
|
|
)
|
2014-01-12 15:09:57 -04:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
|
2014-01-12 15:09:57 -04:00
|
|
|
class int_return_converter(long_return_converter):
|
|
|
|
type = 'int'
|
|
|
|
cast = '(long)'
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
class init_return_converter(long_return_converter):
|
|
|
|
"""
|
|
|
|
Special return converter for __init__ functions.
|
|
|
|
"""
|
|
|
|
type = 'int'
|
|
|
|
cast = '(long)'
|
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def render(
|
|
|
|
self,
|
|
|
|
function: Function,
|
|
|
|
data: CRenderData
|
|
|
|
) -> None: ...
|
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
|
2014-01-12 15:09:57 -04:00
|
|
|
class unsigned_long_return_converter(long_return_converter):
|
|
|
|
type = 'unsigned long'
|
|
|
|
conversion_fn = 'PyLong_FromUnsignedLong'
|
2014-08-05 06:55:21 -03:00
|
|
|
unsigned_cast = '(unsigned long)'
|
2014-01-12 15:09:57 -04:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
|
2014-01-12 15:09:57 -04:00
|
|
|
class unsigned_int_return_converter(unsigned_long_return_converter):
|
|
|
|
type = 'unsigned int'
|
|
|
|
cast = '(unsigned long)'
|
2014-08-05 06:55:21 -03:00
|
|
|
unsigned_cast = '(unsigned int)'
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
|
2014-01-12 15:09:57 -04:00
|
|
|
class Py_ssize_t_return_converter(long_return_converter):
|
2013-10-19 04:09:25 -03:00
|
|
|
type = 'Py_ssize_t'
|
2014-01-12 15:09:57 -04:00
|
|
|
conversion_fn = 'PyLong_FromSsize_t'
|
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
|
2014-01-12 15:09:57 -04:00
|
|
|
class size_t_return_converter(long_return_converter):
|
|
|
|
type = 'size_t'
|
|
|
|
conversion_fn = 'PyLong_FromSize_t'
|
2014-08-05 06:55:21 -03:00
|
|
|
unsigned_cast = '(size_t)'
|
2014-01-12 15:09:57 -04:00
|
|
|
|
|
|
|
|
|
|
|
class double_return_converter(CReturnConverter):
|
|
|
|
type = 'double'
|
|
|
|
cast = ''
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
def render(
|
|
|
|
self,
|
|
|
|
function: Function,
|
|
|
|
data: CRenderData
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.declare(data)
|
2023-05-07 18:55:37 -03:00
|
|
|
self.err_occurred_if(f"{data.converter_retval} == -1.0", data)
|
2013-10-19 04:09:25 -03:00
|
|
|
data.return_conversion.append(
|
2023-05-07 18:55:37 -03:00
|
|
|
f'return_value = PyFloat_FromDouble({self.cast}{data.converter_retval});\n'
|
|
|
|
)
|
2014-01-12 15:09:57 -04:00
|
|
|
|
2023-05-21 17:23:14 -03:00
|
|
|
|
2014-01-12 15:09:57 -04:00
|
|
|
class float_return_converter(double_return_converter):
|
|
|
|
type = 'float'
|
|
|
|
cast = '(double)'
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
def eval_ast_expr(
|
|
|
|
node: ast.expr,
|
|
|
|
globals: dict[str, Any],
|
|
|
|
*,
|
|
|
|
filename: str = '-'
|
2023-07-29 09:39:21 -03:00
|
|
|
) -> Any:
|
2015-05-04 10:59:46 -03:00
|
|
|
"""
|
2023-07-29 09:39:21 -03:00
|
|
|
Takes an ast.Expr node. Compiles it into a function object,
|
|
|
|
then calls the function object with 0 arguments.
|
|
|
|
Returns the result of that function call.
|
2015-05-04 10:59:46 -03:00
|
|
|
|
|
|
|
globals represents the globals dict the expression
|
|
|
|
should see. (There's no equivalent for "locals" here.)
|
|
|
|
"""
|
|
|
|
|
|
|
|
if isinstance(node, ast.Expr):
|
|
|
|
node = node.value
|
|
|
|
|
2023-07-04 19:13:30 -03:00
|
|
|
expr = ast.Expression(node)
|
|
|
|
co = compile(expr, filename, 'eval')
|
2023-05-16 09:02:18 -03:00
|
|
|
fn = FunctionType(co, globals)
|
2015-05-04 10:59:46 -03:00
|
|
|
return fn()
|
|
|
|
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class IndentStack:
|
2023-07-22 19:30:42 -03:00
|
|
|
def __init__(self) -> None:
|
|
|
|
self.indents: list[int] = []
|
|
|
|
self.margin: str | None = None
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-22 19:30:42 -03:00
|
|
|
def _ensure(self) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
if not self.indents:
|
|
|
|
fail('IndentStack expected indents, but none are defined.')
|
|
|
|
|
2023-07-20 20:19:11 -03:00
|
|
|
def measure(self, line: str) -> int:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Returns the length of the line's margin.
|
|
|
|
"""
|
|
|
|
if '\t' in line:
|
2014-02-09 02:15:29 -04:00
|
|
|
fail('Tab characters are illegal in the Argument Clinic DSL.')
|
2013-10-19 04:09:25 -03:00
|
|
|
stripped = line.lstrip()
|
|
|
|
if not len(stripped):
|
|
|
|
# we can't tell anything from an empty line
|
|
|
|
# so just pretend it's indented like our current indent
|
|
|
|
self._ensure()
|
|
|
|
return self.indents[-1]
|
|
|
|
return len(line) - len(stripped)
|
|
|
|
|
2023-07-20 20:19:11 -03:00
|
|
|
def infer(self, line: str) -> int:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Infer what is now the current margin based on this line.
|
|
|
|
Returns:
|
|
|
|
1 if we have indented (or this is the first margin)
|
|
|
|
0 if the margin has not changed
|
|
|
|
-N if we have dedented N times
|
|
|
|
"""
|
|
|
|
indent = self.measure(line)
|
|
|
|
margin = ' ' * indent
|
|
|
|
if not self.indents:
|
|
|
|
self.indents.append(indent)
|
|
|
|
self.margin = margin
|
|
|
|
return 1
|
|
|
|
current = self.indents[-1]
|
|
|
|
if indent == current:
|
|
|
|
return 0
|
|
|
|
if indent > current:
|
|
|
|
self.indents.append(indent)
|
|
|
|
self.margin = margin
|
|
|
|
return 1
|
|
|
|
# indent < current
|
|
|
|
if indent not in self.indents:
|
|
|
|
fail("Illegal outdent.")
|
|
|
|
outdent_count = 0
|
|
|
|
while indent != current:
|
|
|
|
self.indents.pop()
|
|
|
|
current = self.indents[-1]
|
|
|
|
outdent_count -= 1
|
|
|
|
self.margin = margin
|
|
|
|
return outdent_count
|
|
|
|
|
|
|
|
@property
|
2023-07-20 20:19:11 -03:00
|
|
|
def depth(self) -> int:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Returns how many margins are currently defined.
|
|
|
|
"""
|
|
|
|
return len(self.indents)
|
|
|
|
|
2023-07-20 20:19:11 -03:00
|
|
|
def dedent(self, line: str) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Dedents a line by the currently defined margin.
|
|
|
|
"""
|
2023-08-03 21:17:17 -03:00
|
|
|
assert self.margin is not None, "Cannot call .dedent() before calling .infer()"
|
2013-10-19 04:09:25 -03:00
|
|
|
margin = self.margin
|
|
|
|
indent = self.indents[-1]
|
|
|
|
if not line.startswith(margin):
|
|
|
|
fail('Cannot dedent, line does not start with the previous margin:')
|
|
|
|
return line[indent:]
|
|
|
|
|
|
|
|
|
2023-08-01 17:42:39 -03:00
|
|
|
StateKeeper = Callable[[str], None]
|
2023-07-03 10:14:59 -03:00
|
|
|
ConverterArgs = dict[str, Any]
|
2023-05-20 18:55:02 -03:00
|
|
|
|
2023-07-03 18:16:21 -03:00
|
|
|
class ParamState(enum.IntEnum):
|
|
|
|
"""Parameter parsing state.
|
|
|
|
|
|
|
|
[ [ a, b, ] c, ] d, e, f=3, [ g, h, [ i ] ] <- line
|
|
|
|
01 2 3 4 5 6 <- state transitions
|
|
|
|
"""
|
|
|
|
# Before we've seen anything.
|
|
|
|
# Legal transitions: to LEFT_SQUARE_BEFORE or REQUIRED
|
|
|
|
START = 0
|
|
|
|
|
|
|
|
# Left square backets before required params.
|
|
|
|
LEFT_SQUARE_BEFORE = 1
|
|
|
|
|
|
|
|
# In a group, before required params.
|
|
|
|
GROUP_BEFORE = 2
|
|
|
|
|
|
|
|
# Required params, positional-or-keyword or positional-only (we
|
|
|
|
# don't know yet). Renumber left groups!
|
|
|
|
REQUIRED = 3
|
|
|
|
|
|
|
|
# Positional-or-keyword or positional-only params that now must have
|
|
|
|
# default values.
|
|
|
|
OPTIONAL = 4
|
|
|
|
|
|
|
|
# In a group, after required params.
|
|
|
|
GROUP_AFTER = 5
|
|
|
|
|
|
|
|
# Right square brackets after required params.
|
|
|
|
RIGHT_SQUARE_AFTER = 6
|
|
|
|
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
class DSLParser:
|
2023-07-03 07:06:54 -03:00
|
|
|
function: Function | None
|
|
|
|
state: StateKeeper
|
|
|
|
keyword_only: bool
|
|
|
|
positional_only: bool
|
|
|
|
group: int
|
|
|
|
parameter_state: int
|
|
|
|
seen_positional_with_default: bool
|
|
|
|
indent: IndentStack
|
2023-07-13 19:54:05 -03:00
|
|
|
kind: FunctionKind
|
2023-07-03 07:06:54 -03:00
|
|
|
coexist: bool
|
|
|
|
parameter_continuation: str
|
|
|
|
preserve_output: bool
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def __init__(self, clinic: Clinic) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.clinic = clinic
|
|
|
|
|
|
|
|
self.directives = {}
|
|
|
|
for name in dir(self):
|
|
|
|
# functions that start with directive_ are added to directives
|
|
|
|
_, s, key = name.partition("directive_")
|
|
|
|
if s:
|
|
|
|
self.directives[key] = getattr(self, name)
|
|
|
|
|
|
|
|
# functions that start with at_ are too, with an @ in front
|
|
|
|
_, s, key = name.partition("at_")
|
|
|
|
if s:
|
|
|
|
self.directives['@' + key] = getattr(self, name)
|
|
|
|
|
|
|
|
self.reset()
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def reset(self) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.function = None
|
2023-07-03 07:06:54 -03:00
|
|
|
self.state = self.state_dsl_start
|
2013-10-19 04:09:25 -03:00
|
|
|
self.keyword_only = False
|
2016-06-09 10:30:29 -03:00
|
|
|
self.positional_only = False
|
2013-10-19 04:09:25 -03:00
|
|
|
self.group = 0
|
2023-07-03 18:16:21 -03:00
|
|
|
self.parameter_state: ParamState = ParamState.START
|
2014-01-26 00:43:29 -04:00
|
|
|
self.seen_positional_with_default = False
|
2013-10-19 04:09:25 -03:00
|
|
|
self.indent = IndentStack()
|
|
|
|
self.kind = CALLABLE
|
|
|
|
self.coexist = False
|
2014-01-16 15:32:01 -04:00
|
|
|
self.parameter_continuation = ''
|
2014-01-17 21:47:17 -04:00
|
|
|
self.preserve_output = False
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def directive_version(self, required: str) -> None:
|
2013-11-23 18:54:00 -04:00
|
|
|
global version
|
|
|
|
if version_comparitor(version, required) < 0:
|
|
|
|
fail("Insufficient Clinic version!\n Version: " + version + "\n Required: " + required)
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def directive_module(self, name: str) -> None:
|
2023-05-20 07:52:32 -03:00
|
|
|
fields = name.split('.')[:-1]
|
2013-10-19 04:09:25 -03:00
|
|
|
module, cls = self.clinic._module_and_class(fields)
|
|
|
|
if cls:
|
|
|
|
fail("Can't nest a module inside a class!")
|
2014-01-26 00:43:29 -04:00
|
|
|
|
2023-08-04 02:28:25 -03:00
|
|
|
if name in module.modules:
|
2014-01-26 00:43:29 -04:00
|
|
|
fail("Already defined module " + repr(name) + "!")
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
m = Module(name, module)
|
|
|
|
module.modules[name] = m
|
|
|
|
self.block.signatures.append(m)
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def directive_class(
|
|
|
|
self,
|
|
|
|
name: str,
|
|
|
|
typedef: str,
|
|
|
|
type_object: str
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
fields = name.split('.')
|
|
|
|
name = fields.pop()
|
|
|
|
module, cls = self.clinic._module_and_class(fields)
|
|
|
|
|
2014-01-26 00:43:29 -04:00
|
|
|
parent = cls or module
|
|
|
|
if name in parent.classes:
|
|
|
|
fail("Already defined class " + repr(name) + "!")
|
|
|
|
|
|
|
|
c = Class(name, module, cls, typedef, type_object)
|
|
|
|
parent.classes[name] = c
|
2013-10-19 04:09:25 -03:00
|
|
|
self.block.signatures.append(c)
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def directive_set(self, name: str, value: str) -> None:
|
2014-01-17 21:47:17 -04:00
|
|
|
if name not in ("line_prefix", "line_suffix"):
|
|
|
|
fail("unknown variable", repr(name))
|
|
|
|
|
|
|
|
value = value.format_map({
|
|
|
|
'block comment start': '/*',
|
|
|
|
'block comment end': '*/',
|
|
|
|
})
|
|
|
|
|
|
|
|
self.clinic.__dict__[name] = value
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def directive_destination(
|
|
|
|
self,
|
|
|
|
name: str,
|
|
|
|
command: str,
|
2023-07-24 18:23:58 -03:00
|
|
|
*args: str
|
2023-05-20 18:55:02 -03:00
|
|
|
) -> None:
|
2023-07-22 04:43:13 -03:00
|
|
|
match command:
|
|
|
|
case "new":
|
|
|
|
self.clinic.add_destination(name, *args)
|
|
|
|
case "clear":
|
|
|
|
self.clinic.get_destination(name).clear()
|
|
|
|
case _:
|
|
|
|
fail("unknown destination command", repr(command))
|
2014-01-17 21:47:17 -04:00
|
|
|
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def directive_output(
|
|
|
|
self,
|
|
|
|
command_or_name: str,
|
|
|
|
destination: str = ''
|
|
|
|
) -> None:
|
2015-04-03 17:09:02 -03:00
|
|
|
fd = self.clinic.destination_buffers
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
if command_or_name == "preset":
|
2014-01-17 21:47:17 -04:00
|
|
|
preset = self.clinic.presets.get(destination)
|
|
|
|
if not preset:
|
|
|
|
fail("Unknown preset " + repr(destination) + "!")
|
|
|
|
fd.update(preset)
|
|
|
|
return
|
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
if command_or_name == "push":
|
|
|
|
self.clinic.destination_buffers_stack.append(fd.copy())
|
2014-01-17 21:47:17 -04:00
|
|
|
return
|
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
if command_or_name == "pop":
|
|
|
|
if not self.clinic.destination_buffers_stack:
|
2014-01-17 21:47:17 -04:00
|
|
|
fail("Can't 'output pop', stack is empty!")
|
2015-04-03 17:09:02 -03:00
|
|
|
previous_fd = self.clinic.destination_buffers_stack.pop()
|
2014-01-17 21:47:17 -04:00
|
|
|
fd.update(previous_fd)
|
|
|
|
return
|
|
|
|
|
|
|
|
# secret command for debugging!
|
2015-04-03 17:09:02 -03:00
|
|
|
if command_or_name == "print":
|
2014-01-17 21:47:17 -04:00
|
|
|
self.block.output.append(pprint.pformat(fd))
|
|
|
|
self.block.output.append('\n')
|
|
|
|
return
|
|
|
|
|
2022-07-05 06:06:04 -03:00
|
|
|
d = self.clinic.get_destination_buffer(destination)
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
if command_or_name == "everything":
|
2014-01-17 21:47:17 -04:00
|
|
|
for name in list(fd):
|
|
|
|
fd[name] = d
|
|
|
|
return
|
|
|
|
|
2015-04-03 17:09:02 -03:00
|
|
|
if command_or_name not in fd:
|
|
|
|
fail("Invalid command / destination name " + repr(command_or_name) + ", must be one of:\n preset push pop print everything " + " ".join(fd))
|
|
|
|
fd[command_or_name] = d
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def directive_dump(self, name: str) -> None:
|
2014-01-17 21:47:17 -04:00
|
|
|
self.block.output.append(self.clinic.get_destination(name).dump())
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def directive_printout(self, *args: str) -> None:
|
2014-01-17 21:47:17 -04:00
|
|
|
self.block.output.append(' '.join(args))
|
|
|
|
self.block.output.append('\n')
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def directive_preserve(self) -> None:
|
2014-01-17 21:47:17 -04:00
|
|
|
if self.preserve_output:
|
|
|
|
fail("Can't have preserve twice in one block!")
|
|
|
|
self.preserve_output = True
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def at_classmethod(self) -> None:
|
2014-01-16 15:32:01 -04:00
|
|
|
if self.kind is not CALLABLE:
|
|
|
|
fail("Can't set @classmethod, function is not a normal callable")
|
2013-10-19 04:09:25 -03:00
|
|
|
self.kind = CLASS_METHOD
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def at_staticmethod(self) -> None:
|
2014-01-16 15:32:01 -04:00
|
|
|
if self.kind is not CALLABLE:
|
|
|
|
fail("Can't set @staticmethod, function is not a normal callable")
|
2013-10-19 04:09:25 -03:00
|
|
|
self.kind = STATIC_METHOD
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def at_coexist(self) -> None:
|
2014-01-16 15:32:01 -04:00
|
|
|
if self.coexist:
|
|
|
|
fail("Called @coexist twice!")
|
2013-10-19 04:09:25 -03:00
|
|
|
self.coexist = True
|
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def parse(self, block: Block) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.reset()
|
|
|
|
self.block = block
|
2014-01-17 21:47:17 -04:00
|
|
|
self.saved_output = self.block.output
|
|
|
|
block.output = []
|
2013-10-19 04:09:25 -03:00
|
|
|
block_start = self.clinic.block_parser.line_number
|
|
|
|
lines = block.input.split('\n')
|
|
|
|
for line_number, line in enumerate(lines, self.clinic.block_parser.block_start_line_number):
|
|
|
|
if '\t' in line:
|
|
|
|
fail('Tab characters are illegal in the Clinic DSL.\n\t' + repr(line), line_number=block_start)
|
2023-08-02 21:00:06 -03:00
|
|
|
try:
|
|
|
|
self.state(line)
|
|
|
|
except ClinicError as exc:
|
|
|
|
exc.lineno = line_number
|
|
|
|
raise
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-08-01 17:42:39 -03:00
|
|
|
self.do_post_block_processing_cleanup()
|
2023-07-29 15:47:42 -03:00
|
|
|
block.output.extend(self.clinic.language.render(self.clinic, block.signatures))
|
2014-01-17 21:47:17 -04:00
|
|
|
|
|
|
|
if self.preserve_output:
|
|
|
|
if block.output:
|
|
|
|
fail("'preserve' only works for blocks that don't produce any output!")
|
|
|
|
block.output = self.saved_output
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-08-01 20:32:27 -03:00
|
|
|
def in_docstring(self) -> bool:
|
|
|
|
"""Return true if we are processing a docstring."""
|
|
|
|
return self.state in {
|
|
|
|
self.state_parameter_docstring,
|
|
|
|
self.state_function_docstring,
|
|
|
|
}
|
|
|
|
|
|
|
|
def valid_line(self, line: str) -> bool:
|
2013-10-19 04:09:25 -03:00
|
|
|
# ignore comment-only lines
|
|
|
|
if line.lstrip().startswith('#'):
|
2023-07-02 20:42:38 -03:00
|
|
|
return False
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# Ignore empty lines too
|
|
|
|
# (but not in docstring sections!)
|
2023-08-01 20:32:27 -03:00
|
|
|
if not self.in_docstring() and not line.strip():
|
2023-07-02 20:42:38 -03:00
|
|
|
return False
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-02 20:42:38 -03:00
|
|
|
return True
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-20 18:55:02 -03:00
|
|
|
def next(
|
|
|
|
self,
|
|
|
|
state: StateKeeper,
|
|
|
|
line: str | None = None
|
|
|
|
) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.state = state
|
|
|
|
if line is not None:
|
|
|
|
self.state(line)
|
|
|
|
|
2023-08-01 17:42:39 -03:00
|
|
|
def state_dsl_start(self, line: str) -> None:
|
2023-07-02 20:42:38 -03:00
|
|
|
if not self.valid_line(line):
|
2013-10-19 04:09:25 -03:00
|
|
|
return
|
2014-02-01 02:03:12 -04:00
|
|
|
|
|
|
|
# is it a directive?
|
|
|
|
fields = shlex.split(line)
|
|
|
|
directive_name = fields[0]
|
|
|
|
directive = self.directives.get(directive_name, None)
|
|
|
|
if directive:
|
|
|
|
try:
|
|
|
|
directive(*fields[1:])
|
|
|
|
except TypeError as e:
|
|
|
|
fail(str(e))
|
|
|
|
return
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
self.next(self.state_modulename_name, line)
|
|
|
|
|
2023-08-01 17:42:39 -03:00
|
|
|
def state_modulename_name(self, line: str) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
# looking for declaration, which establishes the leftmost column
|
|
|
|
# line should be
|
|
|
|
# modulename.fnname [as c_basename] [-> return annotation]
|
|
|
|
# square brackets denote optional syntax.
|
|
|
|
#
|
2014-01-15 02:22:41 -04:00
|
|
|
# alternatively:
|
|
|
|
# modulename.fnname [as c_basename] = modulename.existing_fn_name
|
|
|
|
# clones the parameters and return converter from that
|
|
|
|
# function. you can't modify them. you must enter a
|
|
|
|
# new docstring.
|
|
|
|
#
|
2013-10-19 04:09:25 -03:00
|
|
|
# (but we might find a directive first!)
|
|
|
|
#
|
|
|
|
# this line is permitted to start with whitespace.
|
|
|
|
# we'll call this number of spaces F (for "function").
|
|
|
|
|
2023-07-03 11:03:31 -03:00
|
|
|
if not self.valid_line(line):
|
2013-10-19 04:09:25 -03:00
|
|
|
return
|
|
|
|
|
|
|
|
self.indent.infer(line)
|
|
|
|
|
2014-01-15 02:22:41 -04:00
|
|
|
# are we cloning?
|
|
|
|
before, equals, existing = line.rpartition('=')
|
2023-07-03 11:03:31 -03:00
|
|
|
c_basename: str | None
|
2014-01-15 02:22:41 -04:00
|
|
|
if equals:
|
|
|
|
full_name, _, c_basename = before.partition(' as ')
|
|
|
|
full_name = full_name.strip()
|
|
|
|
c_basename = c_basename.strip()
|
|
|
|
existing = existing.strip()
|
|
|
|
if (is_legal_py_identifier(full_name) and
|
|
|
|
(not c_basename or is_legal_c_identifier(c_basename)) and
|
|
|
|
is_legal_py_identifier(existing)):
|
|
|
|
# we're cloning!
|
|
|
|
fields = [x.strip() for x in existing.split('.')]
|
|
|
|
function_name = fields.pop()
|
|
|
|
module, cls = self.clinic._module_and_class(fields)
|
|
|
|
|
|
|
|
for existing_function in (cls or module).functions:
|
|
|
|
if existing_function.name == function_name:
|
|
|
|
break
|
|
|
|
else:
|
2023-08-02 21:00:06 -03:00
|
|
|
print(f"{cls=}, {module=}, {existing=}", file=sys.stderr)
|
|
|
|
print(f"{(cls or module).functions=}", file=sys.stderr)
|
2023-07-28 15:10:45 -03:00
|
|
|
fail(f"Couldn't find existing function {existing!r}!")
|
2014-01-15 02:22:41 -04:00
|
|
|
|
|
|
|
fields = [x.strip() for x in full_name.split('.')]
|
|
|
|
function_name = fields.pop()
|
|
|
|
module, cls = self.clinic._module_and_class(fields)
|
|
|
|
|
2023-07-13 19:54:05 -03:00
|
|
|
if not (existing_function.kind is self.kind and existing_function.coexist == self.coexist):
|
2014-01-15 02:22:41 -04:00
|
|
|
fail("'kind' of function and cloned function don't match! (@classmethod/@staticmethod/@coexist)")
|
2023-07-11 18:21:14 -03:00
|
|
|
function = existing_function.copy(
|
|
|
|
name=function_name, full_name=full_name, module=module,
|
|
|
|
cls=cls, c_basename=c_basename, docstring=''
|
|
|
|
)
|
|
|
|
self.function = function
|
|
|
|
self.block.signatures.append(function)
|
|
|
|
(cls or module).functions.append(function)
|
2014-01-15 02:22:41 -04:00
|
|
|
self.next(self.state_function_docstring)
|
|
|
|
return
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
line, _, returns = line.partition('->')
|
2023-07-27 18:51:18 -03:00
|
|
|
returns = returns.strip()
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
full_name, _, c_basename = line.partition(' as ')
|
|
|
|
full_name = full_name.strip()
|
|
|
|
c_basename = c_basename.strip() or None
|
|
|
|
|
2013-10-27 06:49:39 -03:00
|
|
|
if not is_legal_py_identifier(full_name):
|
2023-05-20 17:16:49 -03:00
|
|
|
fail("Illegal function name:", full_name)
|
2013-10-27 06:49:39 -03:00
|
|
|
if c_basename and not is_legal_c_identifier(c_basename):
|
2023-05-20 17:16:49 -03:00
|
|
|
fail("Illegal C basename:", c_basename)
|
2013-10-27 06:49:39 -03:00
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
return_converter = None
|
|
|
|
if returns:
|
2023-05-20 17:16:49 -03:00
|
|
|
ast_input = f"def x() -> {returns}: pass"
|
2013-10-19 04:09:25 -03:00
|
|
|
try:
|
2023-07-27 18:51:18 -03:00
|
|
|
module_node = ast.parse(ast_input)
|
2013-10-19 04:09:25 -03:00
|
|
|
except SyntaxError:
|
2023-07-27 18:51:18 -03:00
|
|
|
fail(f"Badly formed annotation for {full_name}: {returns!r}")
|
|
|
|
function_node = module_node.body[0]
|
|
|
|
assert isinstance(function_node, ast.FunctionDef)
|
2013-10-19 04:09:25 -03:00
|
|
|
try:
|
2023-07-27 18:51:18 -03:00
|
|
|
name, legacy, kwargs = self.parse_converter(function_node.returns)
|
2014-01-14 16:02:43 -04:00
|
|
|
if legacy:
|
2023-07-27 18:51:18 -03:00
|
|
|
fail(f"Legacy converter {name!r} not allowed as a return converter")
|
2013-10-19 04:09:25 -03:00
|
|
|
if name not in return_converters:
|
2023-07-27 18:51:18 -03:00
|
|
|
fail(f"No available return converter called {name!r}")
|
2013-10-19 04:09:25 -03:00
|
|
|
return_converter = return_converters[name](**kwargs)
|
|
|
|
except ValueError:
|
2023-07-27 18:51:18 -03:00
|
|
|
fail(f"Badly formed annotation for {full_name}: {returns!r}")
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
fields = [x.strip() for x in full_name.split('.')]
|
|
|
|
function_name = fields.pop()
|
|
|
|
module, cls = self.clinic._module_and_class(fields)
|
|
|
|
|
2014-01-12 18:12:59 -04:00
|
|
|
fields = full_name.split('.')
|
2021-03-24 21:19:23 -03:00
|
|
|
if fields[-1] in unsupported_special_methods:
|
|
|
|
fail(f"{fields[-1]} is a special method and cannot be converted to Argument Clinic! (Yet.)")
|
|
|
|
|
2014-01-12 18:12:59 -04:00
|
|
|
if fields[-1] == '__new__':
|
2023-07-13 19:54:05 -03:00
|
|
|
if (self.kind is not CLASS_METHOD) or (not cls):
|
2014-01-12 18:12:59 -04:00
|
|
|
fail("__new__ must be a class method!")
|
|
|
|
self.kind = METHOD_NEW
|
|
|
|
elif fields[-1] == '__init__':
|
2023-07-13 19:54:05 -03:00
|
|
|
if (self.kind is not CALLABLE) or (not cls):
|
2014-01-12 18:12:59 -04:00
|
|
|
fail("__init__ must be a normal method, not a class or static method!")
|
|
|
|
self.kind = METHOD_INIT
|
2014-01-19 03:50:21 -04:00
|
|
|
if not return_converter:
|
|
|
|
return_converter = init_return_converter()
|
2014-01-12 18:12:59 -04:00
|
|
|
|
2014-01-19 03:50:21 -04:00
|
|
|
if not return_converter:
|
|
|
|
return_converter = CReturnConverter()
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
if not module:
|
|
|
|
fail("Undefined module used in declaration of " + repr(full_name.strip()) + ".")
|
|
|
|
self.function = Function(name=function_name, full_name=full_name, module=module, cls=cls, c_basename=c_basename,
|
|
|
|
return_converter=return_converter, kind=self.kind, coexist=self.coexist)
|
|
|
|
self.block.signatures.append(self.function)
|
2014-01-24 10:17:25 -04:00
|
|
|
|
|
|
|
# insert a self converter automatically
|
2014-01-26 00:43:29 -04:00
|
|
|
type, name = correct_name_for_self(self.function)
|
|
|
|
kwargs = {}
|
|
|
|
if cls and type == "PyObject *":
|
|
|
|
kwargs['type'] = cls.typedef
|
2014-02-01 02:03:12 -04:00
|
|
|
sc = self.function.self_converter = self_converter(name, name, self.function, **kwargs)
|
2023-07-03 11:03:31 -03:00
|
|
|
p_self = Parameter(name, inspect.Parameter.POSITIONAL_ONLY,
|
|
|
|
function=self.function, converter=sc)
|
|
|
|
self.function.parameters[name] = p_self
|
2014-01-24 10:17:25 -04:00
|
|
|
|
2014-01-15 02:22:41 -04:00
|
|
|
(cls or module).functions.append(self.function)
|
2013-10-19 04:09:25 -03:00
|
|
|
self.next(self.state_parameters_start)
|
|
|
|
|
|
|
|
# Now entering the parameters section. The rules, formally stated:
|
|
|
|
#
|
|
|
|
# * All lines must be indented with spaces only.
|
|
|
|
# * The first line must be a parameter declaration.
|
|
|
|
# * The first line must be indented.
|
|
|
|
# * This first line establishes the indent for parameters.
|
|
|
|
# * We'll call this number of spaces P (for "parameter").
|
|
|
|
# * Thenceforth:
|
|
|
|
# * Lines indented with P spaces specify a parameter.
|
|
|
|
# * Lines indented with > P spaces are docstrings for the previous
|
|
|
|
# parameter.
|
|
|
|
# * We'll call this number of spaces D (for "docstring").
|
|
|
|
# * All subsequent lines indented with >= D spaces are stored as
|
|
|
|
# part of the per-parameter docstring.
|
|
|
|
# * All lines will have the first D spaces of the indent stripped
|
|
|
|
# before they are stored.
|
|
|
|
# * It's illegal to have a line starting with a number of spaces X
|
|
|
|
# such that P < X < D.
|
|
|
|
# * A line with < P spaces is the first line of the function
|
|
|
|
# docstring, which ends processing for parameters and per-parameter
|
|
|
|
# docstrings.
|
|
|
|
# * The first line of the function docstring must be at the same
|
|
|
|
# indent as the function declaration.
|
|
|
|
# * It's illegal to have any line in the parameters section starting
|
|
|
|
# with X spaces such that F < X < P. (As before, F is the indent
|
|
|
|
# of the function declaration.)
|
|
|
|
#
|
|
|
|
# Also, currently Argument Clinic places the following restrictions on groups:
|
|
|
|
# * Each group must contain at least one parameter.
|
|
|
|
# * Each group may contain at most one group, which must be the furthest
|
|
|
|
# thing in the group from the required parameters. (The nested group
|
|
|
|
# must be the first in the group when it's before the required
|
|
|
|
# parameters, and the last thing in the group when after the required
|
|
|
|
# parameters.)
|
|
|
|
# * There may be at most one (top-level) group to the left or right of
|
|
|
|
# the required parameters.
|
|
|
|
# * You must specify a slash, and it must be after all parameters.
|
|
|
|
# (In other words: either all parameters are positional-only,
|
|
|
|
# or none are.)
|
|
|
|
#
|
|
|
|
# Said another way:
|
|
|
|
# * Each group must contain at least one parameter.
|
|
|
|
# * All left square brackets before the required parameters must be
|
|
|
|
# consecutive. (You can't have a left square bracket followed
|
|
|
|
# by a parameter, then another left square bracket. You can't
|
|
|
|
# have a left square bracket, a parameter, a right square bracket,
|
|
|
|
# and then a left square bracket.)
|
|
|
|
# * All right square brackets after the required parameters must be
|
|
|
|
# consecutive.
|
|
|
|
#
|
|
|
|
# These rules are enforced with a single state variable:
|
|
|
|
# "parameter_state". (Previously the code was a miasma of ifs and
|
2023-07-03 18:16:21 -03:00
|
|
|
# separate boolean state variables.) The states are defined in the
|
|
|
|
# ParamState class.
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-08-01 17:42:39 -03:00
|
|
|
def state_parameters_start(self, line: str) -> None:
|
2023-07-02 20:42:38 -03:00
|
|
|
if not self.valid_line(line):
|
2013-10-19 04:09:25 -03:00
|
|
|
return
|
|
|
|
|
|
|
|
# if this line is not indented, we have no parameters
|
|
|
|
if not self.indent.infer(line):
|
|
|
|
return self.next(self.state_function_docstring, line)
|
|
|
|
|
2014-01-16 15:32:01 -04:00
|
|
|
self.parameter_continuation = ''
|
2013-10-19 04:09:25 -03:00
|
|
|
return self.next(self.state_parameter, line)
|
|
|
|
|
|
|
|
|
2023-07-24 18:23:58 -03:00
|
|
|
def to_required(self) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Transition to the "required" parameter state.
|
|
|
|
"""
|
2023-07-03 18:16:21 -03:00
|
|
|
if self.parameter_state is not ParamState.REQUIRED:
|
|
|
|
self.parameter_state = ParamState.REQUIRED
|
2023-07-24 18:23:58 -03:00
|
|
|
assert self.function is not None
|
2013-10-19 04:09:25 -03:00
|
|
|
for p in self.function.parameters.values():
|
|
|
|
p.group = -p.group
|
|
|
|
|
2023-08-01 17:42:39 -03:00
|
|
|
def state_parameter(self, line: str) -> None:
|
2023-07-03 19:10:46 -03:00
|
|
|
assert isinstance(self.function, Function)
|
2014-01-16 15:32:01 -04:00
|
|
|
|
2023-07-02 20:42:38 -03:00
|
|
|
if not self.valid_line(line):
|
2013-10-19 04:09:25 -03:00
|
|
|
return
|
|
|
|
|
2023-07-03 19:10:46 -03:00
|
|
|
if self.parameter_continuation:
|
|
|
|
line = self.parameter_continuation + ' ' + line.lstrip()
|
|
|
|
self.parameter_continuation = ''
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
assert self.indent.depth == 2
|
|
|
|
indent = self.indent.infer(line)
|
|
|
|
if indent == -1:
|
|
|
|
# we outdented, must be to definition column
|
|
|
|
return self.next(self.state_function_docstring, line)
|
|
|
|
|
|
|
|
if indent == 1:
|
|
|
|
# we indented, must be to new parameter docstring column
|
|
|
|
return self.next(self.state_parameter_docstring_start, line)
|
|
|
|
|
2014-01-16 15:32:01 -04:00
|
|
|
line = line.rstrip()
|
|
|
|
if line.endswith('\\'):
|
|
|
|
self.parameter_continuation = line[:-1]
|
|
|
|
return
|
|
|
|
|
2023-07-17 19:37:11 -03:00
|
|
|
func = self.function
|
|
|
|
match line.lstrip():
|
|
|
|
case '*':
|
|
|
|
self.parse_star(func)
|
|
|
|
case '[':
|
|
|
|
self.parse_opening_square_bracket(func)
|
|
|
|
case ']':
|
|
|
|
self.parse_closing_square_bracket(func)
|
|
|
|
case '/':
|
|
|
|
self.parse_slash(func)
|
|
|
|
case param:
|
|
|
|
self.parse_parameter(param)
|
|
|
|
|
|
|
|
def parse_parameter(self, line: str) -> None:
|
|
|
|
assert self.function is not None
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-03 18:16:21 -03:00
|
|
|
match self.parameter_state:
|
|
|
|
case ParamState.START | ParamState.REQUIRED:
|
2013-10-19 04:09:25 -03:00
|
|
|
self.to_required()
|
2023-07-03 18:16:21 -03:00
|
|
|
case ParamState.LEFT_SQUARE_BEFORE:
|
|
|
|
self.parameter_state = ParamState.GROUP_BEFORE
|
|
|
|
case ParamState.GROUP_BEFORE:
|
|
|
|
if not self.group:
|
|
|
|
self.to_required()
|
|
|
|
case ParamState.GROUP_AFTER | ParamState.OPTIONAL:
|
|
|
|
pass
|
|
|
|
case st:
|
|
|
|
fail(f"Function {self.function.name} has an unsupported group configuration. (Unexpected state {st}.a)")
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-02-01 02:03:12 -04:00
|
|
|
# handle "as" for parameters too
|
|
|
|
c_name = None
|
|
|
|
name, have_as_token, trailing = line.partition(' as ')
|
|
|
|
if have_as_token:
|
|
|
|
name = name.strip()
|
|
|
|
if ' ' not in name:
|
|
|
|
fields = trailing.strip().split(' ')
|
|
|
|
if not fields:
|
|
|
|
fail("Invalid 'as' clause!")
|
|
|
|
c_name = fields[0]
|
|
|
|
if c_name.endswith(':'):
|
|
|
|
name += ':'
|
|
|
|
c_name = c_name[:-1]
|
|
|
|
fields[0] = name
|
|
|
|
line = ' '.join(fields)
|
|
|
|
|
2023-07-03 19:10:46 -03:00
|
|
|
default: str | None
|
2014-01-16 15:32:01 -04:00
|
|
|
base, equals, default = line.rpartition('=')
|
|
|
|
if not equals:
|
|
|
|
base = default
|
|
|
|
default = None
|
2014-01-26 00:43:29 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
module = None
|
|
|
|
try:
|
2023-05-20 17:16:49 -03:00
|
|
|
ast_input = f"def x({base}): pass"
|
2013-10-19 04:09:25 -03:00
|
|
|
module = ast.parse(ast_input)
|
|
|
|
except SyntaxError:
|
2014-01-16 15:32:01 -04:00
|
|
|
try:
|
2014-01-26 00:43:29 -04:00
|
|
|
# the last = was probably inside a function call, like
|
2015-05-04 10:59:46 -03:00
|
|
|
# c: int(accept={str})
|
2014-01-26 00:43:29 -04:00
|
|
|
# so assume there was no actual default value.
|
2014-01-16 15:32:01 -04:00
|
|
|
default = None
|
2023-05-20 17:16:49 -03:00
|
|
|
ast_input = f"def x({line}): pass"
|
2014-01-16 15:32:01 -04:00
|
|
|
module = ast.parse(ast_input)
|
|
|
|
except SyntaxError:
|
|
|
|
pass
|
2013-10-19 04:09:25 -03:00
|
|
|
if not module:
|
2013-10-23 03:26:23 -03:00
|
|
|
fail("Function " + self.function.name + " has an invalid parameter declaration:\n\t" + line)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-03 19:10:46 -03:00
|
|
|
function = module.body[0]
|
|
|
|
assert isinstance(function, ast.FunctionDef)
|
|
|
|
function_args = function.args
|
2015-05-04 10:59:46 -03:00
|
|
|
|
|
|
|
if len(function_args.args) > 1:
|
|
|
|
fail("Function " + self.function.name + " has an invalid parameter declaration (comma?):\n\t" + line)
|
|
|
|
if function_args.defaults or function_args.kw_defaults:
|
|
|
|
fail("Function " + self.function.name + " has an invalid parameter declaration (default value?):\n\t" + line)
|
2021-07-16 12:43:02 -03:00
|
|
|
if function_args.kwarg:
|
|
|
|
fail("Function " + self.function.name + " has an invalid parameter declaration (**kwargs?):\n\t" + line)
|
2015-05-04 10:59:46 -03:00
|
|
|
|
2021-07-16 12:43:02 -03:00
|
|
|
if function_args.vararg:
|
|
|
|
is_vararg = True
|
|
|
|
parameter = function_args.vararg
|
|
|
|
else:
|
|
|
|
is_vararg = False
|
|
|
|
parameter = function_args.args[0]
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-01-07 15:53:01 -04:00
|
|
|
parameter_name = parameter.arg
|
|
|
|
name, legacy, kwargs = self.parse_converter(parameter.annotation)
|
|
|
|
|
2014-01-16 15:32:01 -04:00
|
|
|
if not default:
|
2023-07-03 18:16:21 -03:00
|
|
|
if self.parameter_state is ParamState.OPTIONAL:
|
|
|
|
fail(f"Can't have a parameter without a default ({parameter_name!r})\n"
|
|
|
|
"after a parameter with a default!")
|
2023-07-03 19:10:46 -03:00
|
|
|
value: Sentinels | Null
|
2021-07-16 12:43:02 -03:00
|
|
|
if is_vararg:
|
|
|
|
value = NULL
|
|
|
|
kwargs.setdefault('c_default', "NULL")
|
|
|
|
else:
|
|
|
|
value = unspecified
|
2014-01-16 15:32:01 -04:00
|
|
|
if 'py_default' in kwargs:
|
|
|
|
fail("You can't specify py_default without specifying a default value!")
|
|
|
|
else:
|
2021-07-16 12:43:02 -03:00
|
|
|
if is_vararg:
|
|
|
|
fail("Vararg can't take a default value!")
|
|
|
|
|
2023-07-03 18:16:21 -03:00
|
|
|
if self.parameter_state is ParamState.REQUIRED:
|
|
|
|
self.parameter_state = ParamState.OPTIONAL
|
2014-01-16 15:32:01 -04:00
|
|
|
default = default.strip()
|
2014-01-25 00:52:30 -04:00
|
|
|
bad = False
|
2023-05-20 17:16:49 -03:00
|
|
|
ast_input = f"x = {default}"
|
2014-01-16 15:32:01 -04:00
|
|
|
try:
|
|
|
|
module = ast.parse(ast_input)
|
|
|
|
|
2014-01-24 10:17:25 -04:00
|
|
|
if 'c_default' not in kwargs:
|
|
|
|
# we can only represent very simple data values in C.
|
2020-08-11 10:26:59 -03:00
|
|
|
# detect whether default is okay, via a denylist
|
2014-01-24 10:17:25 -04:00
|
|
|
# of disallowed ast nodes.
|
|
|
|
class DetectBadNodes(ast.NodeVisitor):
|
|
|
|
bad = False
|
2023-07-24 18:23:58 -03:00
|
|
|
def bad_node(self, node: ast.AST) -> None:
|
2014-01-24 10:17:25 -04:00
|
|
|
self.bad = True
|
|
|
|
|
|
|
|
# inline function call
|
|
|
|
visit_Call = bad_node
|
|
|
|
# inline if statement ("x = 3 if y else z")
|
|
|
|
visit_IfExp = bad_node
|
|
|
|
|
|
|
|
# comprehensions and generator expressions
|
|
|
|
visit_ListComp = visit_SetComp = bad_node
|
|
|
|
visit_DictComp = visit_GeneratorExp = bad_node
|
|
|
|
|
|
|
|
# literals for advanced types
|
|
|
|
visit_Dict = visit_Set = bad_node
|
|
|
|
visit_List = visit_Tuple = bad_node
|
|
|
|
|
|
|
|
# "starred": "a = [1, 2, 3]; *a"
|
|
|
|
visit_Starred = bad_node
|
|
|
|
|
2020-08-11 10:26:59 -03:00
|
|
|
denylist = DetectBadNodes()
|
|
|
|
denylist.visit(module)
|
|
|
|
bad = denylist.bad
|
2014-01-24 10:17:25 -04:00
|
|
|
else:
|
|
|
|
# if they specify a c_default, we can be more lenient about the default value.
|
2014-01-25 00:52:30 -04:00
|
|
|
# but at least make an attempt at ensuring it's a valid expression.
|
|
|
|
try:
|
|
|
|
value = eval(default)
|
2023-05-18 18:58:42 -03:00
|
|
|
if value is unspecified:
|
2014-01-25 00:52:30 -04:00
|
|
|
fail("'unspecified' is not a legal default value!")
|
|
|
|
except NameError:
|
|
|
|
pass # probably a named constant
|
|
|
|
except Exception as e:
|
|
|
|
fail("Malformed expression given as default value\n"
|
|
|
|
"{!r} caused {!r}".format(default, e))
|
2014-01-24 10:17:25 -04:00
|
|
|
if bad:
|
2014-01-16 15:32:01 -04:00
|
|
|
fail("Unsupported expression as default value: " + repr(default))
|
|
|
|
|
2023-07-03 19:10:46 -03:00
|
|
|
assignment = module.body[0]
|
|
|
|
assert isinstance(assignment, ast.Assign)
|
|
|
|
expr = assignment.value
|
2014-01-16 15:32:01 -04:00
|
|
|
# mild hack: explicitly support NULL as a default value
|
2023-07-03 19:10:46 -03:00
|
|
|
c_default: str | None
|
2014-01-16 15:32:01 -04:00
|
|
|
if isinstance(expr, ast.Name) and expr.id == 'NULL':
|
|
|
|
value = NULL
|
2019-09-14 06:24:05 -03:00
|
|
|
py_default = '<unrepresentable>'
|
2014-01-16 15:32:01 -04:00
|
|
|
c_default = "NULL"
|
|
|
|
elif (isinstance(expr, ast.BinOp) or
|
2018-09-27 11:42:37 -03:00
|
|
|
(isinstance(expr, ast.UnaryOp) and
|
2023-05-09 18:16:22 -03:00
|
|
|
not (isinstance(expr.operand, ast.Constant) and
|
|
|
|
type(expr.operand.value) in {int, float, complex})
|
2018-09-27 11:42:37 -03:00
|
|
|
)):
|
2014-01-16 15:32:01 -04:00
|
|
|
c_default = kwargs.get("c_default")
|
|
|
|
if not (isinstance(c_default, str) and c_default):
|
2018-09-27 11:42:37 -03:00
|
|
|
fail("When you specify an expression (" + repr(default) + ") as your default value,\nyou MUST specify a valid c_default." + ast.dump(expr))
|
2014-01-16 15:32:01 -04:00
|
|
|
py_default = default
|
|
|
|
value = unknown
|
|
|
|
elif isinstance(expr, ast.Attribute):
|
|
|
|
a = []
|
2023-07-03 19:10:46 -03:00
|
|
|
n: ast.expr | ast.Attribute = expr
|
2014-01-16 15:32:01 -04:00
|
|
|
while isinstance(n, ast.Attribute):
|
|
|
|
a.append(n.attr)
|
|
|
|
n = n.value
|
|
|
|
if not isinstance(n, ast.Name):
|
|
|
|
fail("Unsupported default value " + repr(default) + " (looked like a Python constant)")
|
|
|
|
a.append(n.id)
|
|
|
|
py_default = ".".join(reversed(a))
|
|
|
|
|
|
|
|
c_default = kwargs.get("c_default")
|
|
|
|
if not (isinstance(c_default, str) and c_default):
|
|
|
|
fail("When you specify a named constant (" + repr(py_default) + ") as your default value,\nyou MUST specify a valid c_default.")
|
|
|
|
|
|
|
|
try:
|
|
|
|
value = eval(py_default)
|
|
|
|
except NameError:
|
|
|
|
value = unknown
|
|
|
|
else:
|
|
|
|
value = ast.literal_eval(expr)
|
|
|
|
py_default = repr(value)
|
2023-07-03 19:10:46 -03:00
|
|
|
if isinstance(value, (bool, NoneType)):
|
2014-01-16 15:32:01 -04:00
|
|
|
c_default = "Py_" + py_default
|
|
|
|
elif isinstance(value, str):
|
2014-01-18 04:26:16 -04:00
|
|
|
c_default = c_repr(value)
|
2014-01-16 15:32:01 -04:00
|
|
|
else:
|
|
|
|
c_default = py_default
|
|
|
|
|
|
|
|
except SyntaxError as e:
|
|
|
|
fail("Syntax error: " + repr(e.text))
|
|
|
|
except (ValueError, AttributeError):
|
|
|
|
value = unknown
|
2014-01-12 15:09:57 -04:00
|
|
|
c_default = kwargs.get("c_default")
|
2014-01-16 15:32:01 -04:00
|
|
|
py_default = default
|
2014-01-12 15:09:57 -04:00
|
|
|
if not (isinstance(c_default, str) and c_default):
|
|
|
|
fail("When you specify a named constant (" + repr(py_default) + ") as your default value,\nyou MUST specify a valid c_default.")
|
|
|
|
|
2014-01-16 15:32:01 -04:00
|
|
|
kwargs.setdefault('c_default', c_default)
|
|
|
|
kwargs.setdefault('py_default', py_default)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
dict = legacy_converters if legacy else converters
|
|
|
|
legacy_str = "legacy " if legacy else ""
|
|
|
|
if name not in dict:
|
2023-05-20 17:16:49 -03:00
|
|
|
fail(f'{name} is not a valid {legacy_str}converter')
|
2014-02-01 02:03:12 -04:00
|
|
|
# if you use a c_name for the parameter, we just give that name to the converter
|
|
|
|
# but the parameter object gets the python name
|
|
|
|
converter = dict[name](c_name or parameter_name, parameter_name, self.function, value, **kwargs)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-03 19:10:46 -03:00
|
|
|
kind: inspect._ParameterKind
|
2021-07-16 12:43:02 -03:00
|
|
|
if is_vararg:
|
|
|
|
kind = inspect.Parameter.VAR_POSITIONAL
|
|
|
|
elif self.keyword_only:
|
|
|
|
kind = inspect.Parameter.KEYWORD_ONLY
|
|
|
|
else:
|
|
|
|
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
|
2014-01-24 10:17:25 -04:00
|
|
|
|
2013-11-23 18:54:00 -04:00
|
|
|
if isinstance(converter, self_converter):
|
2014-01-24 10:17:25 -04:00
|
|
|
if len(self.function.parameters) == 1:
|
2023-07-03 18:16:21 -03:00
|
|
|
if self.parameter_state is not ParamState.REQUIRED:
|
2014-01-24 10:17:25 -04:00
|
|
|
fail("A 'self' parameter cannot be marked optional.")
|
|
|
|
if value is not unspecified:
|
|
|
|
fail("A 'self' parameter cannot have a default value.")
|
|
|
|
if self.group:
|
|
|
|
fail("A 'self' parameter cannot be in an optional group.")
|
|
|
|
kind = inspect.Parameter.POSITIONAL_ONLY
|
2023-07-03 18:16:21 -03:00
|
|
|
self.parameter_state = ParamState.START
|
2014-01-24 10:17:25 -04:00
|
|
|
self.function.parameters.clear()
|
|
|
|
else:
|
|
|
|
fail("A 'self' parameter, if specified, must be the very first thing in the parameter block.")
|
2013-11-23 18:54:00 -04:00
|
|
|
|
2020-05-07 10:39:59 -03:00
|
|
|
if isinstance(converter, defining_class_converter):
|
|
|
|
_lp = len(self.function.parameters)
|
|
|
|
if _lp == 1:
|
2023-07-03 18:16:21 -03:00
|
|
|
if self.parameter_state is not ParamState.REQUIRED:
|
2020-05-07 10:39:59 -03:00
|
|
|
fail("A 'defining_class' parameter cannot be marked optional.")
|
|
|
|
if value is not unspecified:
|
|
|
|
fail("A 'defining_class' parameter cannot have a default value.")
|
|
|
|
if self.group:
|
|
|
|
fail("A 'defining_class' parameter cannot be in an optional group.")
|
|
|
|
else:
|
|
|
|
fail("A 'defining_class' parameter, if specified, must either be the first thing in the parameter block, or come just after 'self'.")
|
|
|
|
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
p = Parameter(parameter_name, kind, function=self.function, converter=converter, default=value, group=self.group)
|
2014-01-17 21:47:17 -04:00
|
|
|
|
2022-07-07 06:29:34 -03:00
|
|
|
names = [k.name for k in self.function.parameters.values()]
|
|
|
|
if parameter_name in names[1:]:
|
2014-01-17 21:47:17 -04:00
|
|
|
fail("You can't have two parameters named " + repr(parameter_name) + "!")
|
2022-07-07 06:29:34 -03:00
|
|
|
elif names and parameter_name == names[0] and c_name is None:
|
|
|
|
fail(f"Parameter '{parameter_name}' requires a custom C name")
|
|
|
|
|
|
|
|
key = f"{parameter_name}_as_{c_name}" if c_name else parameter_name
|
|
|
|
self.function.parameters[key] = p
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-05-21 20:29:43 -03:00
|
|
|
@staticmethod
|
2023-07-03 10:14:59 -03:00
|
|
|
def parse_converter(
|
|
|
|
annotation: ast.expr | None
|
|
|
|
) -> tuple[str, bool, ConverterArgs]:
|
2023-05-21 20:29:43 -03:00
|
|
|
match annotation:
|
|
|
|
case ast.Constant(value=str() as value):
|
|
|
|
return value, True, {}
|
|
|
|
case ast.Name(name):
|
|
|
|
return name, False, {}
|
|
|
|
case ast.Call(func=ast.Name(name)):
|
|
|
|
symbols = globals()
|
2023-07-03 10:14:59 -03:00
|
|
|
kwargs: ConverterArgs = {}
|
|
|
|
for node in annotation.keywords:
|
|
|
|
if not isinstance(node.arg, str):
|
|
|
|
fail("Cannot use a kwarg splat in a function-call annotation")
|
|
|
|
kwargs[node.arg] = eval_ast_expr(node.value, symbols)
|
2023-05-21 20:29:43 -03:00
|
|
|
return name, False, kwargs
|
|
|
|
case _:
|
|
|
|
fail(
|
|
|
|
"Annotations must be either a name, a function call, or a string."
|
|
|
|
)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-17 19:37:11 -03:00
|
|
|
def parse_star(self, function: Function) -> None:
|
|
|
|
"""Parse keyword-only parameter marker '*'."""
|
|
|
|
if self.keyword_only:
|
|
|
|
fail(f"Function {function.name} uses '*' more than once.")
|
|
|
|
self.keyword_only = True
|
|
|
|
|
|
|
|
def parse_opening_square_bracket(self, function: Function) -> None:
|
|
|
|
"""Parse opening parameter group symbol '['."""
|
|
|
|
match self.parameter_state:
|
|
|
|
case ParamState.START | ParamState.LEFT_SQUARE_BEFORE:
|
|
|
|
self.parameter_state = ParamState.LEFT_SQUARE_BEFORE
|
|
|
|
case ParamState.REQUIRED | ParamState.GROUP_AFTER:
|
|
|
|
self.parameter_state = ParamState.GROUP_AFTER
|
|
|
|
case st:
|
|
|
|
fail(f"Function {function.name} has an unsupported group configuration. "
|
|
|
|
f"(Unexpected state {st}.b)")
|
|
|
|
self.group += 1
|
|
|
|
function.docstring_only = True
|
|
|
|
|
|
|
|
def parse_closing_square_bracket(self, function: Function) -> None:
|
|
|
|
"""Parse closing parameter group symbol ']'."""
|
|
|
|
if not self.group:
|
|
|
|
fail(f"Function {function.name} has a ] without a matching [.")
|
|
|
|
if not any(p.group == self.group for p in function.parameters.values()):
|
|
|
|
fail(f"Function {function.name} has an empty group.\n"
|
|
|
|
"All groups must contain at least one parameter.")
|
|
|
|
self.group -= 1
|
|
|
|
match self.parameter_state:
|
|
|
|
case ParamState.LEFT_SQUARE_BEFORE | ParamState.GROUP_BEFORE:
|
|
|
|
self.parameter_state = ParamState.GROUP_BEFORE
|
|
|
|
case ParamState.GROUP_AFTER | ParamState.RIGHT_SQUARE_AFTER:
|
|
|
|
self.parameter_state = ParamState.RIGHT_SQUARE_AFTER
|
|
|
|
case st:
|
|
|
|
fail(f"Function {function.name} has an unsupported group configuration. "
|
|
|
|
f"(Unexpected state {st}.c)")
|
|
|
|
|
|
|
|
def parse_slash(self, function: Function) -> None:
|
|
|
|
"""Parse positional-only parameter marker '/'."""
|
|
|
|
if self.positional_only:
|
|
|
|
fail(f"Function {function.name} uses '/' more than once.")
|
|
|
|
self.positional_only = True
|
|
|
|
# REQUIRED and OPTIONAL are allowed here, that allows positional-only
|
|
|
|
# without option groups to work (and have default values!)
|
|
|
|
allowed = {
|
|
|
|
ParamState.REQUIRED,
|
|
|
|
ParamState.OPTIONAL,
|
|
|
|
ParamState.RIGHT_SQUARE_AFTER,
|
|
|
|
ParamState.GROUP_BEFORE,
|
|
|
|
}
|
|
|
|
if (self.parameter_state not in allowed) or self.group:
|
|
|
|
fail(f"Function {function.name} has an unsupported group configuration. "
|
|
|
|
f"(Unexpected state {self.parameter_state}.d)")
|
|
|
|
if self.keyword_only:
|
|
|
|
fail(f"Function {function.name} mixes keyword-only and "
|
|
|
|
"positional-only parameters, which is unsupported.")
|
|
|
|
# fixup preceding parameters
|
|
|
|
for p in function.parameters.values():
|
|
|
|
if p.is_vararg():
|
|
|
|
continue
|
|
|
|
if (p.kind is not inspect.Parameter.POSITIONAL_OR_KEYWORD and
|
|
|
|
not isinstance(p.converter, self_converter)
|
|
|
|
):
|
|
|
|
fail(f"Function {function.name} mixes keyword-only and "
|
|
|
|
"positional-only parameters, which is unsupported.")
|
|
|
|
p.kind = inspect.Parameter.POSITIONAL_ONLY
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-08-01 17:42:39 -03:00
|
|
|
def state_parameter_docstring_start(self, line: str) -> None:
|
2023-07-22 19:30:42 -03:00
|
|
|
assert self.indent.margin is not None, "self.margin.infer() has not yet been called to set the margin"
|
2013-10-19 04:09:25 -03:00
|
|
|
self.parameter_docstring_indent = len(self.indent.margin)
|
|
|
|
assert self.indent.depth == 3
|
|
|
|
return self.next(self.state_parameter_docstring, line)
|
|
|
|
|
2023-08-01 20:32:27 -03:00
|
|
|
def docstring_append(self, obj: Function | Parameter, line: str) -> None:
|
|
|
|
"""Add a rstripped line to the current docstring."""
|
2023-08-02 09:40:23 -03:00
|
|
|
matches = re.finditer(r'[^\x00-\x7F]', line)
|
|
|
|
if offending := ", ".join([repr(m[0]) for m in matches]):
|
|
|
|
warn("Non-ascii characters are not allowed in docstrings:",
|
|
|
|
offending)
|
|
|
|
|
2023-08-01 20:32:27 -03:00
|
|
|
docstring = obj.docstring
|
|
|
|
if docstring:
|
|
|
|
docstring += "\n"
|
|
|
|
if stripped := line.rstrip():
|
|
|
|
docstring += self.indent.dedent(stripped)
|
|
|
|
obj.docstring = docstring
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# every line of the docstring must start with at least F spaces,
|
|
|
|
# where F > P.
|
|
|
|
# these F spaces will be stripped.
|
2023-08-01 17:42:39 -03:00
|
|
|
def state_parameter_docstring(self, line: str) -> None:
|
2023-08-01 20:32:27 -03:00
|
|
|
if not self.valid_line(line):
|
2013-10-19 04:09:25 -03:00
|
|
|
return
|
|
|
|
|
|
|
|
indent = self.indent.measure(line)
|
|
|
|
if indent < self.parameter_docstring_indent:
|
|
|
|
self.indent.infer(line)
|
|
|
|
assert self.indent.depth < 3
|
|
|
|
if self.indent.depth == 2:
|
|
|
|
# back to a parameter
|
|
|
|
return self.next(self.state_parameter, line)
|
|
|
|
assert self.indent.depth == 1
|
|
|
|
return self.next(self.state_function_docstring, line)
|
|
|
|
|
2023-07-24 18:23:58 -03:00
|
|
|
assert self.function and self.function.parameters
|
2023-08-01 20:32:27 -03:00
|
|
|
last_param = next(reversed(self.function.parameters.values()))
|
|
|
|
self.docstring_append(last_param, line)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
# the final stanza of the DSL is the docstring.
|
2023-08-01 17:42:39 -03:00
|
|
|
def state_function_docstring(self, line: str) -> None:
|
2023-07-24 18:23:58 -03:00
|
|
|
assert self.function is not None
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
if self.group:
|
|
|
|
fail("Function " + self.function.name + " has a ] without a matching [.")
|
|
|
|
|
2023-08-01 20:32:27 -03:00
|
|
|
if not self.valid_line(line):
|
2013-10-19 04:09:25 -03:00
|
|
|
return
|
|
|
|
|
2023-08-01 20:32:27 -03:00
|
|
|
self.docstring_append(self.function, line)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-24 17:29:50 -03:00
|
|
|
def format_docstring(self) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
f = self.function
|
2023-07-24 17:29:50 -03:00
|
|
|
assert f is not None
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-07-13 19:54:05 -03:00
|
|
|
new_or_init = f.kind.new_or_init
|
2014-01-24 10:17:25 -04:00
|
|
|
if new_or_init and not f.docstring:
|
|
|
|
# don't render a docstring at all, no signature, nothing.
|
|
|
|
return f.docstring
|
|
|
|
|
2014-02-09 02:15:29 -04:00
|
|
|
text, add, output = _text_accumulator()
|
2014-02-01 02:03:12 -04:00
|
|
|
parameters = f.render_parameters
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
##
|
|
|
|
## docstring first line
|
|
|
|
##
|
|
|
|
|
2014-02-09 02:15:29 -04:00
|
|
|
if new_or_init:
|
|
|
|
# classes get *just* the name of the class
|
|
|
|
# not __new__, not __init__, and not module.classname
|
|
|
|
assert f.cls
|
|
|
|
add(f.cls.name)
|
2014-01-22 07:05:49 -04:00
|
|
|
else:
|
2014-02-09 02:15:29 -04:00
|
|
|
add(f.name)
|
2013-10-19 04:09:25 -03:00
|
|
|
add('(')
|
|
|
|
|
|
|
|
# populate "right_bracket_count" field for every parameter
|
2014-01-24 10:17:25 -04:00
|
|
|
assert parameters, "We should always have a self parameter. " + repr(f)
|
|
|
|
assert isinstance(parameters[0].converter, self_converter)
|
2016-06-09 10:30:29 -03:00
|
|
|
# self is always positional-only.
|
|
|
|
assert parameters[0].is_positional_only()
|
2023-07-07 10:10:07 -03:00
|
|
|
assert parameters[0].right_bracket_count == 0
|
2016-06-09 10:30:29 -03:00
|
|
|
positional_only = True
|
|
|
|
for p in parameters[1:]:
|
|
|
|
if not p.is_positional_only():
|
|
|
|
positional_only = False
|
|
|
|
else:
|
|
|
|
assert positional_only
|
|
|
|
if positional_only:
|
|
|
|
p.right_bracket_count = abs(p.group)
|
2013-10-19 04:09:25 -03:00
|
|
|
else:
|
|
|
|
# don't put any right brackets around non-positional-only parameters, ever.
|
2016-06-09 10:30:29 -03:00
|
|
|
p.right_bracket_count = 0
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
right_bracket_count = 0
|
|
|
|
|
2023-07-24 17:29:50 -03:00
|
|
|
def fix_right_bracket_count(desired: int) -> str:
|
2013-10-19 04:09:25 -03:00
|
|
|
nonlocal right_bracket_count
|
|
|
|
s = ''
|
|
|
|
while right_bracket_count < desired:
|
|
|
|
s += '['
|
|
|
|
right_bracket_count += 1
|
|
|
|
while right_bracket_count > desired:
|
|
|
|
s += ']'
|
|
|
|
right_bracket_count -= 1
|
|
|
|
return s
|
|
|
|
|
2014-02-09 02:15:29 -04:00
|
|
|
need_slash = False
|
|
|
|
added_slash = False
|
|
|
|
need_a_trailing_slash = False
|
|
|
|
|
|
|
|
# we only need a trailing slash:
|
|
|
|
# * if this is not a "docstring_only" signature
|
|
|
|
# * and if the last *shown* parameter is
|
|
|
|
# positional only
|
|
|
|
if not f.docstring_only:
|
|
|
|
for p in reversed(parameters):
|
|
|
|
if not p.converter.show_in_signature:
|
|
|
|
continue
|
|
|
|
if p.is_positional_only():
|
|
|
|
need_a_trailing_slash = True
|
|
|
|
break
|
|
|
|
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
added_star = False
|
2014-02-09 02:15:29 -04:00
|
|
|
|
|
|
|
first_parameter = True
|
|
|
|
last_p = parameters[-1]
|
|
|
|
line_length = len(''.join(text))
|
|
|
|
indent = " " * line_length
|
2023-07-24 17:29:50 -03:00
|
|
|
def add_parameter(text: str) -> None:
|
2014-02-09 02:15:29 -04:00
|
|
|
nonlocal line_length
|
|
|
|
nonlocal first_parameter
|
|
|
|
if first_parameter:
|
|
|
|
s = text
|
|
|
|
first_parameter = False
|
|
|
|
else:
|
|
|
|
s = ' ' + text
|
|
|
|
if line_length + len(s) >= 72:
|
|
|
|
add('\n')
|
|
|
|
add(indent)
|
|
|
|
line_length = len(indent)
|
|
|
|
s = text
|
|
|
|
line_length += len(s)
|
|
|
|
add(s)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
for p in parameters:
|
2014-01-24 10:17:25 -04:00
|
|
|
if not p.converter.show_in_signature:
|
|
|
|
continue
|
2013-10-19 04:09:25 -03:00
|
|
|
assert p.name
|
|
|
|
|
2014-02-09 02:15:29 -04:00
|
|
|
is_self = isinstance(p.converter, self_converter)
|
|
|
|
if is_self and f.docstring_only:
|
|
|
|
# this isn't a real machine-parsable signature,
|
|
|
|
# so let's not print the "self" parameter
|
|
|
|
continue
|
|
|
|
|
|
|
|
if p.is_positional_only():
|
|
|
|
need_slash = not f.docstring_only
|
|
|
|
elif need_slash and not (added_slash or p.is_positional_only()):
|
|
|
|
added_slash = True
|
|
|
|
add_parameter('/,')
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
if p.is_keyword_only() and not added_star:
|
|
|
|
added_star = True
|
2014-02-09 02:15:29 -04:00
|
|
|
add_parameter('*,')
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-02-09 02:15:29 -04:00
|
|
|
p_add, p_output = text_accumulator()
|
|
|
|
p_add(fix_right_bracket_count(p.right_bracket_count))
|
2014-01-28 09:00:08 -04:00
|
|
|
|
2014-02-01 02:03:12 -04:00
|
|
|
if isinstance(p.converter, self_converter):
|
2014-02-09 02:15:29 -04:00
|
|
|
# annotate first parameter as being a "self".
|
|
|
|
#
|
|
|
|
# if inspect.Signature gets this function,
|
|
|
|
# and it's already bound, the self parameter
|
|
|
|
# will be stripped off.
|
|
|
|
#
|
|
|
|
# if it's not bound, it should be marked
|
|
|
|
# as positional-only.
|
|
|
|
#
|
|
|
|
# note: we don't print "self" for __init__,
|
|
|
|
# because this isn't actually the signature
|
|
|
|
# for __init__. (it can't be, __init__ doesn't
|
|
|
|
# have a docstring.) if this is an __init__
|
|
|
|
# (or __new__), then this signature is for
|
2014-10-19 12:04:38 -03:00
|
|
|
# calling the class to construct a new instance.
|
2014-02-09 02:15:29 -04:00
|
|
|
p_add('$')
|
|
|
|
|
2021-07-16 12:43:02 -03:00
|
|
|
if p.is_vararg():
|
|
|
|
p_add("*")
|
|
|
|
|
2014-02-09 02:15:29 -04:00
|
|
|
name = p.converter.signature_name or p.name
|
|
|
|
p_add(name)
|
|
|
|
|
2021-07-16 12:43:02 -03:00
|
|
|
if not p.is_vararg() and p.converter.is_optional():
|
2014-02-09 02:15:29 -04:00
|
|
|
p_add('=')
|
2014-01-19 06:27:34 -04:00
|
|
|
value = p.converter.py_default
|
|
|
|
if not value:
|
2014-01-19 07:01:23 -04:00
|
|
|
value = repr(p.converter.default)
|
2014-02-09 02:15:29 -04:00
|
|
|
p_add(value)
|
|
|
|
|
|
|
|
if (p != last_p) or need_a_trailing_slash:
|
|
|
|
p_add(',')
|
|
|
|
|
|
|
|
add_parameter(p_output())
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
add(fix_right_bracket_count(0))
|
2014-02-09 02:15:29 -04:00
|
|
|
if need_a_trailing_slash:
|
|
|
|
add_parameter('/')
|
2013-10-19 04:09:25 -03:00
|
|
|
add(')')
|
|
|
|
|
2014-01-16 15:32:01 -04:00
|
|
|
# PEP 8 says:
|
|
|
|
#
|
|
|
|
# The Python standard library will not use function annotations
|
|
|
|
# as that would result in a premature commitment to a particular
|
|
|
|
# annotation style. Instead, the annotations are left for users
|
|
|
|
# to discover and experiment with useful annotation styles.
|
|
|
|
#
|
|
|
|
# therefore this is commented out:
|
|
|
|
#
|
|
|
|
# if f.return_converter.py_default:
|
2013-11-23 19:37:55 -04:00
|
|
|
# add(' -> ')
|
2014-01-16 15:32:01 -04:00
|
|
|
# add(f.return_converter.py_default)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2014-02-09 02:15:29 -04:00
|
|
|
if not f.docstring_only:
|
2015-04-13 20:22:35 -03:00
|
|
|
add("\n" + sig_end_marker + "\n")
|
2014-02-09 02:15:29 -04:00
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
docstring_first_line = output()
|
|
|
|
|
|
|
|
# now fix up the places where the brackets look wrong
|
|
|
|
docstring_first_line = docstring_first_line.replace(', ]', ',] ')
|
|
|
|
|
2013-11-23 19:37:55 -04:00
|
|
|
# okay. now we're officially building the "parameters" section.
|
2013-10-19 04:09:25 -03:00
|
|
|
# create substitution text for {parameters}
|
2013-11-23 19:37:55 -04:00
|
|
|
spacer_line = False
|
2013-10-19 04:09:25 -03:00
|
|
|
for p in parameters:
|
|
|
|
if not p.docstring.strip():
|
|
|
|
continue
|
2013-11-23 19:37:55 -04:00
|
|
|
if spacer_line:
|
|
|
|
add('\n')
|
|
|
|
else:
|
|
|
|
spacer_line = True
|
2013-10-19 04:09:25 -03:00
|
|
|
add(" ")
|
|
|
|
add(p.name)
|
|
|
|
add('\n')
|
|
|
|
add(textwrap.indent(rstrip_lines(p.docstring.rstrip()), " "))
|
2023-07-24 17:29:50 -03:00
|
|
|
parameters_output = output()
|
|
|
|
if parameters_output:
|
|
|
|
parameters_output += '\n'
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
##
|
|
|
|
## docstring body
|
|
|
|
##
|
|
|
|
|
|
|
|
docstring = f.docstring.rstrip()
|
|
|
|
lines = [line.rstrip() for line in docstring.split('\n')]
|
|
|
|
|
|
|
|
# Enforce the summary line!
|
|
|
|
# The first line of a docstring should be a summary of the function.
|
|
|
|
# It should fit on one line (80 columns? 79 maybe?) and be a paragraph
|
|
|
|
# by itself.
|
|
|
|
#
|
|
|
|
# Argument Clinic enforces the following rule:
|
|
|
|
# * either the docstring is empty,
|
|
|
|
# * or it must have a summary line.
|
|
|
|
#
|
|
|
|
# Guido said Clinic should enforce this:
|
|
|
|
# http://mail.python.org/pipermail/python-dev/2013-June/127110.html
|
|
|
|
|
|
|
|
if len(lines) >= 2:
|
|
|
|
if lines[1]:
|
|
|
|
fail("Docstring for " + f.full_name + " does not have a summary line!\n" +
|
|
|
|
"Every non-blank function docstring must start with\n" +
|
|
|
|
"a single line summary followed by an empty line.")
|
|
|
|
elif len(lines) == 1:
|
|
|
|
# the docstring is only one line right now--the summary line.
|
|
|
|
# add an empty line after the summary line so we have space
|
2013-11-23 19:37:55 -04:00
|
|
|
# between it and the {parameters} we're about to add.
|
2013-10-19 04:09:25 -03:00
|
|
|
lines.append('')
|
|
|
|
|
2013-11-23 19:37:55 -04:00
|
|
|
parameters_marker_count = len(docstring.split('{parameters}')) - 1
|
|
|
|
if parameters_marker_count > 1:
|
|
|
|
fail('You may not specify {parameters} more than once in a docstring!')
|
|
|
|
|
|
|
|
if not parameters_marker_count:
|
|
|
|
# insert after summary line
|
|
|
|
lines.insert(2, '{parameters}')
|
|
|
|
|
|
|
|
# insert at front of docstring
|
|
|
|
lines.insert(0, docstring_first_line)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
docstring = "\n".join(lines)
|
|
|
|
|
|
|
|
add(docstring)
|
|
|
|
docstring = output()
|
|
|
|
|
2023-07-24 17:29:50 -03:00
|
|
|
docstring = linear_format(docstring, parameters=parameters_output)
|
2013-10-19 04:09:25 -03:00
|
|
|
docstring = docstring.rstrip()
|
|
|
|
|
|
|
|
return docstring
|
|
|
|
|
2023-08-01 17:42:39 -03:00
|
|
|
def do_post_block_processing_cleanup(self) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
"""
|
|
|
|
Called when processing the block is done.
|
|
|
|
"""
|
|
|
|
if not self.function:
|
|
|
|
return
|
|
|
|
|
|
|
|
if self.keyword_only:
|
|
|
|
values = self.function.parameters.values()
|
|
|
|
if not values:
|
|
|
|
no_parameter_after_star = True
|
|
|
|
else:
|
|
|
|
last_parameter = next(reversed(list(values)))
|
|
|
|
no_parameter_after_star = last_parameter.kind != inspect.Parameter.KEYWORD_ONLY
|
|
|
|
if no_parameter_after_star:
|
|
|
|
fail("Function " + self.function.name + " specifies '*' without any parameters afterwards.")
|
|
|
|
|
|
|
|
self.function.docstring = self.format_docstring()
|
|
|
|
|
|
|
|
|
2014-01-24 10:17:25 -04:00
|
|
|
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
# maps strings to callables.
|
|
|
|
# the callable should return an object
|
|
|
|
# that implements the clinic parser
|
|
|
|
# interface (__init__ and parse).
|
|
|
|
#
|
|
|
|
# example parsers:
|
|
|
|
# "clinic", handles the Clinic DSL
|
|
|
|
# "python", handles running Python code
|
|
|
|
#
|
2023-07-16 21:04:10 -03:00
|
|
|
parsers: dict[str, Callable[[Clinic], Parser]] = {
|
|
|
|
'clinic': DSLParser,
|
|
|
|
'python': PythonParser,
|
|
|
|
}
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
|
|
|
clinic = None
|
|
|
|
|
|
|
|
|
2023-08-01 13:24:23 -03:00
|
|
|
def create_cli() -> argparse.ArgumentParser:
|
2019-06-02 13:58:10 -03:00
|
|
|
cmdline = argparse.ArgumentParser(
|
2023-08-01 13:24:23 -03:00
|
|
|
prog="clinic.py",
|
2019-06-02 13:58:10 -03:00
|
|
|
description="""Preprocessor for CPython C files.
|
|
|
|
|
|
|
|
The purpose of the Argument Clinic is automating all the boilerplate involved
|
|
|
|
with writing argument parsing code for builtins and providing introspection
|
|
|
|
signatures ("docstrings") for CPython builtins.
|
|
|
|
|
|
|
|
For more information see https://docs.python.org/3/howto/clinic.html""")
|
2023-07-26 19:08:43 -03:00
|
|
|
cmdline.add_argument("-f", "--force", action='store_true',
|
|
|
|
help="force output regeneration")
|
|
|
|
cmdline.add_argument("-o", "--output", type=str,
|
|
|
|
help="redirect file output to OUTPUT")
|
|
|
|
cmdline.add_argument("-v", "--verbose", action='store_true',
|
|
|
|
help="enable verbose mode")
|
|
|
|
cmdline.add_argument("--converters", action='store_true',
|
|
|
|
help=("print a list of all supported converters "
|
|
|
|
"and return converters"))
|
2017-05-27 20:40:45 -03:00
|
|
|
cmdline.add_argument("--make", action='store_true',
|
2023-07-26 19:08:43 -03:00
|
|
|
help="walk --srcdir to run over all relevant files")
|
2017-05-27 20:40:45 -03:00
|
|
|
cmdline.add_argument("--srcdir", type=str, default=os.curdir,
|
2023-07-26 19:08:43 -03:00
|
|
|
help="the directory tree to walk in --make mode")
|
|
|
|
cmdline.add_argument("filename", metavar="FILE", type=str, nargs="*",
|
|
|
|
help="the list of files to process")
|
2023-08-01 13:24:23 -03:00
|
|
|
return cmdline
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2023-08-01 13:24:23 -03:00
|
|
|
|
|
|
|
def run_clinic(parser: argparse.ArgumentParser, ns: argparse.Namespace) -> None:
|
2013-10-19 04:09:25 -03:00
|
|
|
if ns.converters:
|
|
|
|
if ns.filename:
|
2023-08-01 13:24:23 -03:00
|
|
|
parser.error(
|
|
|
|
"can't specify --converters and a filename at the same time"
|
|
|
|
)
|
2023-07-24 16:32:38 -03:00
|
|
|
converters: list[tuple[str, str]] = []
|
|
|
|
return_converters: list[tuple[str, str]] = []
|
2013-10-19 04:09:25 -03:00
|
|
|
ignored = set("""
|
|
|
|
add_c_converter
|
|
|
|
add_c_return_converter
|
|
|
|
add_default_legacy_c_converter
|
|
|
|
add_legacy_c_converter
|
|
|
|
""".strip().split())
|
|
|
|
module = globals()
|
|
|
|
for name in module:
|
|
|
|
for suffix, ids in (
|
|
|
|
("_return_converter", return_converters),
|
|
|
|
("_converter", converters),
|
|
|
|
):
|
|
|
|
if name in ignored:
|
|
|
|
continue
|
|
|
|
if name.endswith(suffix):
|
2023-05-20 08:08:28 -03:00
|
|
|
ids.append((name, name.removesuffix(suffix)))
|
2013-10-19 04:09:25 -03:00
|
|
|
break
|
|
|
|
print()
|
|
|
|
|
|
|
|
print("Legacy converters:")
|
|
|
|
legacy = sorted(legacy_converters)
|
|
|
|
print(' ' + ' '.join(c for c in legacy if c[0].isupper()))
|
|
|
|
print(' ' + ' '.join(c for c in legacy if c[0].islower()))
|
|
|
|
print()
|
|
|
|
|
|
|
|
for title, attribute, ids in (
|
|
|
|
("Converters", 'converter_init', converters),
|
|
|
|
("Return converters", 'return_converter_init', return_converters),
|
|
|
|
):
|
|
|
|
print(title + ":")
|
|
|
|
longest = -1
|
|
|
|
for name, short_name in ids:
|
|
|
|
longest = max(longest, len(short_name))
|
|
|
|
for name, short_name in sorted(ids, key=lambda x: x[1].lower()):
|
|
|
|
cls = module[name]
|
|
|
|
callable = getattr(cls, attribute, None)
|
|
|
|
if not callable:
|
|
|
|
continue
|
|
|
|
signature = inspect.signature(callable)
|
|
|
|
parameters = []
|
|
|
|
for parameter_name, parameter in signature.parameters.items():
|
|
|
|
if parameter.kind == inspect.Parameter.KEYWORD_ONLY:
|
|
|
|
if parameter.default != inspect.Parameter.empty:
|
2023-05-20 17:16:49 -03:00
|
|
|
s = f'{parameter_name}={parameter.default!r}'
|
2013-10-19 04:09:25 -03:00
|
|
|
else:
|
|
|
|
s = parameter_name
|
|
|
|
parameters.append(s)
|
|
|
|
print(' {}({})'.format(short_name, ', '.join(parameters)))
|
|
|
|
print()
|
2014-01-16 15:32:01 -04:00
|
|
|
print("All converters also accept (c_default=None, py_default=None, annotation=None).")
|
|
|
|
print("All return converters also accept (py_default=None).")
|
2023-08-01 13:24:23 -03:00
|
|
|
return
|
2013-10-19 04:09:25 -03:00
|
|
|
|
2013-11-23 18:58:45 -04:00
|
|
|
if ns.make:
|
|
|
|
if ns.output or ns.filename:
|
2023-08-01 13:24:23 -03:00
|
|
|
parser.error("can't use -o or filenames with --make")
|
2017-05-27 20:40:45 -03:00
|
|
|
if not ns.srcdir:
|
2023-08-01 13:24:23 -03:00
|
|
|
parser.error("--srcdir must not be empty with --make")
|
2017-05-27 20:40:45 -03:00
|
|
|
for root, dirs, files in os.walk(ns.srcdir):
|
2015-04-13 20:33:41 -03:00
|
|
|
for rcs_dir in ('.svn', '.git', '.hg', 'build', 'externals'):
|
2013-11-23 18:58:45 -04:00
|
|
|
if rcs_dir in dirs:
|
|
|
|
dirs.remove(rcs_dir)
|
|
|
|
for filename in files:
|
2023-06-01 20:31:58 -03:00
|
|
|
# handle .c, .cpp and .h files
|
|
|
|
if not filename.endswith(('.c', '.cpp', '.h')):
|
2013-11-23 18:58:45 -04:00
|
|
|
continue
|
|
|
|
path = os.path.join(root, filename)
|
2014-01-24 10:17:25 -04:00
|
|
|
if ns.verbose:
|
|
|
|
print(path)
|
2020-11-18 10:36:27 -04:00
|
|
|
parse_file(path, verify=not ns.force)
|
2013-11-23 18:58:45 -04:00
|
|
|
return
|
|
|
|
|
2013-10-19 04:09:25 -03:00
|
|
|
if not ns.filename:
|
2023-08-01 13:24:23 -03:00
|
|
|
parser.error("no input files")
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
if ns.output and len(ns.filename) > 1:
|
2023-08-01 13:24:23 -03:00
|
|
|
parser.error("can't use -o with multiple filenames")
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
for filename in ns.filename:
|
2014-01-24 10:17:25 -04:00
|
|
|
if ns.verbose:
|
|
|
|
print(filename)
|
2020-11-18 10:36:27 -04:00
|
|
|
parse_file(filename, output=ns.output, verify=not ns.force)
|
2013-10-19 04:09:25 -03:00
|
|
|
|
|
|
|
|
2023-08-01 13:24:23 -03:00
|
|
|
def main(argv: list[str] | None = None) -> NoReturn:
|
|
|
|
parser = create_cli()
|
|
|
|
args = parser.parse_args(argv)
|
2023-08-02 21:00:06 -03:00
|
|
|
try:
|
|
|
|
run_clinic(parser, args)
|
|
|
|
except ClinicError as exc:
|
|
|
|
sys.stderr.write(exc.report())
|
|
|
|
sys.exit(1)
|
|
|
|
else:
|
|
|
|
sys.exit(0)
|
2023-08-01 13:24:23 -03:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|