2020-05-18 15:27:40 -03:00
|
|
|
#!/usr/bin/env python3
|
2020-04-22 19:29:27 -03:00
|
|
|
|
|
|
|
import argparse
|
|
|
|
import ast
|
|
|
|
import sys
|
|
|
|
import os
|
|
|
|
from time import time
|
|
|
|
|
2020-05-18 15:27:40 -03:00
|
|
|
try:
|
|
|
|
import memory_profiler
|
|
|
|
except ModuleNotFoundError:
|
2020-06-11 21:55:35 -03:00
|
|
|
print(
|
|
|
|
"Please run `make venv` to create a virtual environment and install"
|
|
|
|
" all the dependencies, before running this script."
|
|
|
|
)
|
2020-05-18 15:27:40 -03:00
|
|
|
sys.exit(1)
|
2020-04-22 19:29:27 -03:00
|
|
|
|
|
|
|
sys.path.insert(0, os.getcwd())
|
|
|
|
from scripts.test_parse_directory import parse_directory
|
|
|
|
|
|
|
|
argparser = argparse.ArgumentParser(
|
|
|
|
prog="benchmark", description="Reproduce the various pegen benchmarks"
|
|
|
|
)
|
|
|
|
argparser.add_argument(
|
|
|
|
"--target",
|
|
|
|
action="store",
|
|
|
|
choices=["xxl", "stdlib"],
|
|
|
|
default="xxl",
|
|
|
|
help="Which target to use for the benchmark (default is xxl.py)",
|
|
|
|
)
|
|
|
|
|
|
|
|
subcommands = argparser.add_subparsers(title="Benchmarks", dest="subcommand")
|
|
|
|
command_compile = subcommands.add_parser(
|
|
|
|
"compile", help="Benchmark parsing and compiling to bytecode"
|
|
|
|
)
|
2020-06-11 21:55:35 -03:00
|
|
|
command_parse = subcommands.add_parser("parse", help="Benchmark parsing and generating an ast.AST")
|
2020-04-22 19:29:27 -03:00
|
|
|
|
|
|
|
|
|
|
|
def benchmark(func):
|
|
|
|
def wrapper(*args):
|
|
|
|
times = list()
|
|
|
|
for _ in range(3):
|
|
|
|
start = time()
|
|
|
|
result = func(*args)
|
|
|
|
end = time()
|
|
|
|
times.append(end - start)
|
|
|
|
memory = memory_profiler.memory_usage((func, args))
|
|
|
|
print(f"{func.__name__}")
|
|
|
|
print(f"\tTime: {sum(times)/3:.3f} seconds on an average of 3 runs")
|
|
|
|
print(f"\tMemory: {max(memory)} MiB on an average of 3 runs")
|
|
|
|
return result
|
|
|
|
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
|
|
|
@benchmark
|
2020-06-11 21:55:35 -03:00
|
|
|
def time_compile(source):
|
|
|
|
return compile(source, "<string>", "exec")
|
2020-04-22 19:29:27 -03:00
|
|
|
|
|
|
|
|
2020-06-06 01:21:40 -03:00
|
|
|
@benchmark
|
2020-06-11 21:55:35 -03:00
|
|
|
def time_parse(source):
|
|
|
|
return ast.parse(source)
|
2020-06-06 01:21:40 -03:00
|
|
|
|
|
|
|
|
2020-06-11 21:55:35 -03:00
|
|
|
def run_benchmark_xxl(subcommand, source):
|
2020-04-22 19:29:27 -03:00
|
|
|
if subcommand == "compile":
|
2020-06-11 21:55:35 -03:00
|
|
|
time_compile(source)
|
2020-04-22 19:29:27 -03:00
|
|
|
elif subcommand == "parse":
|
2020-06-11 21:55:35 -03:00
|
|
|
time_parse(source)
|
2020-04-22 19:29:27 -03:00
|
|
|
|
|
|
|
|
2020-06-11 21:55:35 -03:00
|
|
|
def run_benchmark_stdlib(subcommand):
|
|
|
|
modes = {"compile": 2, "parse": 1}
|
2020-04-22 19:29:27 -03:00
|
|
|
for _ in range(3):
|
|
|
|
parse_directory(
|
|
|
|
"../../Lib",
|
|
|
|
verbose=False,
|
2020-05-02 01:23:06 -03:00
|
|
|
excluded_files=["*/bad*", "*/lib2to3/tests/data/*",],
|
2020-04-22 19:29:27 -03:00
|
|
|
short=True,
|
2020-06-06 01:21:40 -03:00
|
|
|
mode=modes[subcommand],
|
2020-04-22 19:29:27 -03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
args = argparser.parse_args()
|
|
|
|
subcommand = args.subcommand
|
|
|
|
target = args.target
|
|
|
|
|
|
|
|
if subcommand is None:
|
|
|
|
argparser.error("A benchmark to run is required")
|
|
|
|
|
|
|
|
if target == "xxl":
|
|
|
|
with open(os.path.join("data", "xxl.py"), "r") as f:
|
|
|
|
source = f.read()
|
2020-06-11 21:55:35 -03:00
|
|
|
run_benchmark_xxl(subcommand, source)
|
2020-04-22 19:29:27 -03:00
|
|
|
elif target == "stdlib":
|
2020-06-11 21:55:35 -03:00
|
|
|
run_benchmark_stdlib(subcommand)
|
2020-04-22 19:29:27 -03:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|