mirror of https://github.com/python/cpython
gh-100176: Remove outdated Tools/{io,cc,string}bench (#101853)
Co-authored-by: Hugo van Kemenade <hugovk@users.noreply.github.com>
This commit is contained in:
parent
90dd653a61
commit
aba37d451f
|
@ -0,0 +1 @@
|
||||||
|
Remove outdated Tools/{io,cc,string}bench
|
|
@ -10,8 +10,6 @@ c-analyzer Tools to check no new global variables have been added.
|
||||||
|
|
||||||
cases_generator Tooling to generate interpreters.
|
cases_generator Tooling to generate interpreters.
|
||||||
|
|
||||||
ccbench A Python threads-based concurrency benchmark. (*)
|
|
||||||
|
|
||||||
clinic A preprocessor for CPython C files in order to automate
|
clinic A preprocessor for CPython C files in order to automate
|
||||||
the boilerplate involved with writing argument parsing
|
the boilerplate involved with writing argument parsing
|
||||||
code for "builtins".
|
code for "builtins".
|
||||||
|
@ -28,8 +26,6 @@ i18n Tools for internationalization. pygettext.py
|
||||||
|
|
||||||
importbench A set of micro-benchmarks for various import scenarios.
|
importbench A set of micro-benchmarks for various import scenarios.
|
||||||
|
|
||||||
iobench Benchmark for the new Python I/O system. (*)
|
|
||||||
|
|
||||||
msi Support for packaging Python as an MSI package on Windows.
|
msi Support for packaging Python as an MSI package on Windows.
|
||||||
|
|
||||||
nuget Files for the NuGet package manager for .NET.
|
nuget Files for the NuGet package manager for .NET.
|
||||||
|
@ -45,9 +41,6 @@ scripts A number of useful single-file programs, e.g. run_tests.py
|
||||||
ssl Scripts to generate ssl_data.h from OpenSSL sources, and run
|
ssl Scripts to generate ssl_data.h from OpenSSL sources, and run
|
||||||
tests against multiple installations of OpenSSL and LibreSSL.
|
tests against multiple installations of OpenSSL and LibreSSL.
|
||||||
|
|
||||||
stringbench A suite of micro-benchmarks for various operations on
|
|
||||||
strings (both 8-bit and unicode). (*)
|
|
||||||
|
|
||||||
tz A script to dump timezone from /usr/share/zoneinfo.
|
tz A script to dump timezone from /usr/share/zoneinfo.
|
||||||
|
|
||||||
unicode Tools for generating unicodedata and codecs from unicode.org
|
unicode Tools for generating unicodedata and codecs from unicode.org
|
||||||
|
@ -60,6 +53,4 @@ unittestgui A Tkinter based GUI test runner for unittest, with test
|
||||||
wasm Config and helpers to facilitate cross compilation of CPython
|
wasm Config and helpers to facilitate cross compilation of CPython
|
||||||
to WebAssembly (WASM).
|
to WebAssembly (WASM).
|
||||||
|
|
||||||
(*) A generic benchmark suite is maintained separately at https://github.com/python/performance
|
|
||||||
|
|
||||||
Note: The pynche color editor has moved to https://gitlab.com/warsaw/pynche
|
Note: The pynche color editor has moved to https://gitlab.com/warsaw/pynche
|
||||||
|
|
|
@ -1,606 +0,0 @@
|
||||||
# This file should be kept compatible with both Python 2.6 and Python >= 3.0.
|
|
||||||
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
"""
|
|
||||||
ccbench, a Python concurrency benchmark.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import itertools
|
|
||||||
import threading
|
|
||||||
import subprocess
|
|
||||||
import socket
|
|
||||||
from optparse import OptionParser, SUPPRESS_HELP
|
|
||||||
import platform
|
|
||||||
|
|
||||||
# Compatibility
|
|
||||||
try:
|
|
||||||
xrange
|
|
||||||
except NameError:
|
|
||||||
xrange = range
|
|
||||||
|
|
||||||
try:
|
|
||||||
map = itertools.imap
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
THROUGHPUT_DURATION = 2.0
|
|
||||||
|
|
||||||
LATENCY_PING_INTERVAL = 0.1
|
|
||||||
LATENCY_DURATION = 2.0
|
|
||||||
|
|
||||||
BANDWIDTH_PACKET_SIZE = 1024
|
|
||||||
BANDWIDTH_DURATION = 2.0
|
|
||||||
|
|
||||||
|
|
||||||
def task_pidigits():
|
|
||||||
"""Pi calculation (Python)"""
|
|
||||||
_map = map
|
|
||||||
_count = itertools.count
|
|
||||||
_islice = itertools.islice
|
|
||||||
|
|
||||||
def calc_ndigits(n):
|
|
||||||
# From http://shootout.alioth.debian.org/
|
|
||||||
def gen_x():
|
|
||||||
return _map(lambda k: (k, 4*k + 2, 0, 2*k + 1), _count(1))
|
|
||||||
|
|
||||||
def compose(a, b):
|
|
||||||
aq, ar, as_, at = a
|
|
||||||
bq, br, bs, bt = b
|
|
||||||
return (aq * bq,
|
|
||||||
aq * br + ar * bt,
|
|
||||||
as_ * bq + at * bs,
|
|
||||||
as_ * br + at * bt)
|
|
||||||
|
|
||||||
def extract(z, j):
|
|
||||||
q, r, s, t = z
|
|
||||||
return (q*j + r) // (s*j + t)
|
|
||||||
|
|
||||||
def pi_digits():
|
|
||||||
z = (1, 0, 0, 1)
|
|
||||||
x = gen_x()
|
|
||||||
while 1:
|
|
||||||
y = extract(z, 3)
|
|
||||||
while y != extract(z, 4):
|
|
||||||
z = compose(z, next(x))
|
|
||||||
y = extract(z, 3)
|
|
||||||
z = compose((10, -10*y, 0, 1), z)
|
|
||||||
yield y
|
|
||||||
|
|
||||||
return list(_islice(pi_digits(), n))
|
|
||||||
|
|
||||||
return calc_ndigits, (50, )
|
|
||||||
|
|
||||||
def task_regex():
|
|
||||||
"""regular expression (C)"""
|
|
||||||
# XXX this task gives horrendous latency results.
|
|
||||||
import re
|
|
||||||
# Taken from the `inspect` module
|
|
||||||
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)', re.MULTILINE)
|
|
||||||
with open(__file__, "r") as f:
|
|
||||||
arg = f.read(2000)
|
|
||||||
return pat.findall, (arg, )
|
|
||||||
|
|
||||||
def task_sort():
|
|
||||||
"""list sorting (C)"""
|
|
||||||
def list_sort(l):
|
|
||||||
l = l[::-1]
|
|
||||||
l.sort()
|
|
||||||
|
|
||||||
return list_sort, (list(range(1000)), )
|
|
||||||
|
|
||||||
def task_compress_zlib():
|
|
||||||
"""zlib compression (C)"""
|
|
||||||
import zlib
|
|
||||||
with open(__file__, "rb") as f:
|
|
||||||
arg = f.read(5000) * 3
|
|
||||||
|
|
||||||
def compress(s):
|
|
||||||
zlib.decompress(zlib.compress(s, 5))
|
|
||||||
return compress, (arg, )
|
|
||||||
|
|
||||||
def task_compress_bz2():
|
|
||||||
"""bz2 compression (C)"""
|
|
||||||
import bz2
|
|
||||||
with open(__file__, "rb") as f:
|
|
||||||
arg = f.read(3000) * 2
|
|
||||||
|
|
||||||
def compress(s):
|
|
||||||
bz2.compress(s)
|
|
||||||
return compress, (arg, )
|
|
||||||
|
|
||||||
def task_hashing():
|
|
||||||
"""SHA1 hashing (C)"""
|
|
||||||
import hashlib
|
|
||||||
with open(__file__, "rb") as f:
|
|
||||||
arg = f.read(5000) * 30
|
|
||||||
|
|
||||||
def compute(s):
|
|
||||||
hashlib.sha1(s).digest()
|
|
||||||
return compute, (arg, )
|
|
||||||
|
|
||||||
|
|
||||||
throughput_tasks = [task_pidigits, task_regex]
|
|
||||||
for mod in 'bz2', 'hashlib':
|
|
||||||
try:
|
|
||||||
globals()[mod] = __import__(mod)
|
|
||||||
except ImportError:
|
|
||||||
globals()[mod] = None
|
|
||||||
|
|
||||||
# For whatever reasons, zlib gives irregular results, so we prefer bz2 or
|
|
||||||
# hashlib if available.
|
|
||||||
# (NOTE: hashlib releases the GIL from 2.7 and 3.1 onwards)
|
|
||||||
if bz2 is not None:
|
|
||||||
throughput_tasks.append(task_compress_bz2)
|
|
||||||
elif hashlib is not None:
|
|
||||||
throughput_tasks.append(task_hashing)
|
|
||||||
else:
|
|
||||||
throughput_tasks.append(task_compress_zlib)
|
|
||||||
|
|
||||||
latency_tasks = throughput_tasks
|
|
||||||
bandwidth_tasks = [task_pidigits]
|
|
||||||
|
|
||||||
|
|
||||||
class TimedLoop:
|
|
||||||
def __init__(self, func, args):
|
|
||||||
self.func = func
|
|
||||||
self.args = args
|
|
||||||
|
|
||||||
def __call__(self, start_time, min_duration, end_event, do_yield=False):
|
|
||||||
step = 20
|
|
||||||
niters = 0
|
|
||||||
duration = 0.0
|
|
||||||
_time = time.time
|
|
||||||
_sleep = time.sleep
|
|
||||||
_func = self.func
|
|
||||||
_args = self.args
|
|
||||||
t1 = start_time
|
|
||||||
while True:
|
|
||||||
for i in range(step):
|
|
||||||
_func(*_args)
|
|
||||||
t2 = _time()
|
|
||||||
# If another thread terminated, the current measurement is invalid
|
|
||||||
# => return the previous one.
|
|
||||||
if end_event:
|
|
||||||
return niters, duration
|
|
||||||
niters += step
|
|
||||||
duration = t2 - start_time
|
|
||||||
if duration >= min_duration:
|
|
||||||
end_event.append(None)
|
|
||||||
return niters, duration
|
|
||||||
if t2 - t1 < 0.01:
|
|
||||||
# Minimize interference of measurement on overall runtime
|
|
||||||
step = step * 3 // 2
|
|
||||||
elif do_yield:
|
|
||||||
# OS scheduling of Python threads is sometimes so bad that we
|
|
||||||
# have to force thread switching ourselves, otherwise we get
|
|
||||||
# completely useless results.
|
|
||||||
_sleep(0.0001)
|
|
||||||
t1 = t2
|
|
||||||
|
|
||||||
|
|
||||||
def run_throughput_test(func, args, nthreads):
|
|
||||||
assert nthreads >= 1
|
|
||||||
|
|
||||||
# Warm up
|
|
||||||
func(*args)
|
|
||||||
|
|
||||||
results = []
|
|
||||||
loop = TimedLoop(func, args)
|
|
||||||
end_event = []
|
|
||||||
|
|
||||||
if nthreads == 1:
|
|
||||||
# Pure single-threaded performance, without any switching or
|
|
||||||
# synchronization overhead.
|
|
||||||
start_time = time.time()
|
|
||||||
results.append(loop(start_time, THROUGHPUT_DURATION,
|
|
||||||
end_event, do_yield=False))
|
|
||||||
return results
|
|
||||||
|
|
||||||
started = False
|
|
||||||
ready_cond = threading.Condition()
|
|
||||||
start_cond = threading.Condition()
|
|
||||||
ready = []
|
|
||||||
|
|
||||||
def run():
|
|
||||||
with ready_cond:
|
|
||||||
ready.append(None)
|
|
||||||
ready_cond.notify()
|
|
||||||
with start_cond:
|
|
||||||
while not started:
|
|
||||||
start_cond.wait()
|
|
||||||
results.append(loop(start_time, THROUGHPUT_DURATION,
|
|
||||||
end_event, do_yield=True))
|
|
||||||
|
|
||||||
threads = []
|
|
||||||
for i in range(nthreads):
|
|
||||||
threads.append(threading.Thread(target=run))
|
|
||||||
for t in threads:
|
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
# We don't want measurements to include thread startup overhead,
|
|
||||||
# so we arrange for timing to start after all threads are ready.
|
|
||||||
with ready_cond:
|
|
||||||
while len(ready) < nthreads:
|
|
||||||
ready_cond.wait()
|
|
||||||
with start_cond:
|
|
||||||
start_time = time.time()
|
|
||||||
started = True
|
|
||||||
start_cond.notify(nthreads)
|
|
||||||
for t in threads:
|
|
||||||
t.join()
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
def run_throughput_tests(max_threads):
|
|
||||||
for task in throughput_tasks:
|
|
||||||
print(task.__doc__)
|
|
||||||
print()
|
|
||||||
func, args = task()
|
|
||||||
nthreads = 1
|
|
||||||
baseline_speed = None
|
|
||||||
while nthreads <= max_threads:
|
|
||||||
results = run_throughput_test(func, args, nthreads)
|
|
||||||
# Taking the max duration rather than average gives pessimistic
|
|
||||||
# results rather than optimistic.
|
|
||||||
speed = sum(r[0] for r in results) / max(r[1] for r in results)
|
|
||||||
print("threads=%d: %d" % (nthreads, speed), end="")
|
|
||||||
if baseline_speed is None:
|
|
||||||
print(" iterations/s.")
|
|
||||||
baseline_speed = speed
|
|
||||||
else:
|
|
||||||
print(" ( %d %%)" % (speed / baseline_speed * 100))
|
|
||||||
nthreads += 1
|
|
||||||
print()
|
|
||||||
|
|
||||||
|
|
||||||
LAT_END = "END"
|
|
||||||
|
|
||||||
def _sendto(sock, s, addr):
|
|
||||||
sock.sendto(s.encode('ascii'), addr)
|
|
||||||
|
|
||||||
def _recv(sock, n):
|
|
||||||
return sock.recv(n).decode('ascii')
|
|
||||||
|
|
||||||
def latency_client(addr, nb_pings, interval):
|
|
||||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
||||||
try:
|
|
||||||
_time = time.time
|
|
||||||
_sleep = time.sleep
|
|
||||||
def _ping():
|
|
||||||
_sendto(sock, "%r\n" % _time(), addr)
|
|
||||||
# The first ping signals the parent process that we are ready.
|
|
||||||
_ping()
|
|
||||||
# We give the parent a bit of time to notice.
|
|
||||||
_sleep(1.0)
|
|
||||||
for i in range(nb_pings):
|
|
||||||
_sleep(interval)
|
|
||||||
_ping()
|
|
||||||
_sendto(sock, LAT_END + "\n", addr)
|
|
||||||
finally:
|
|
||||||
sock.close()
|
|
||||||
|
|
||||||
def run_latency_client(**kwargs):
|
|
||||||
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
|
|
||||||
cmd_line.extend(['--latclient', repr(kwargs)])
|
|
||||||
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
|
|
||||||
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
||||||
|
|
||||||
def run_latency_test(func, args, nthreads):
|
|
||||||
# Create a listening socket to receive the pings. We use UDP which should
|
|
||||||
# be painlessly cross-platform.
|
|
||||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
||||||
sock.bind(("127.0.0.1", 0))
|
|
||||||
addr = sock.getsockname()
|
|
||||||
|
|
||||||
interval = LATENCY_PING_INTERVAL
|
|
||||||
duration = LATENCY_DURATION
|
|
||||||
nb_pings = int(duration / interval)
|
|
||||||
|
|
||||||
results = []
|
|
||||||
threads = []
|
|
||||||
end_event = []
|
|
||||||
start_cond = threading.Condition()
|
|
||||||
started = False
|
|
||||||
if nthreads > 0:
|
|
||||||
# Warm up
|
|
||||||
func(*args)
|
|
||||||
|
|
||||||
results = []
|
|
||||||
loop = TimedLoop(func, args)
|
|
||||||
ready = []
|
|
||||||
ready_cond = threading.Condition()
|
|
||||||
|
|
||||||
def run():
|
|
||||||
with ready_cond:
|
|
||||||
ready.append(None)
|
|
||||||
ready_cond.notify()
|
|
||||||
with start_cond:
|
|
||||||
while not started:
|
|
||||||
start_cond.wait()
|
|
||||||
loop(start_time, duration * 1.5, end_event, do_yield=False)
|
|
||||||
|
|
||||||
for i in range(nthreads):
|
|
||||||
threads.append(threading.Thread(target=run))
|
|
||||||
for t in threads:
|
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
# Wait for threads to be ready
|
|
||||||
with ready_cond:
|
|
||||||
while len(ready) < nthreads:
|
|
||||||
ready_cond.wait()
|
|
||||||
|
|
||||||
# Run the client and wait for the first ping(s) to arrive before
|
|
||||||
# unblocking the background threads.
|
|
||||||
chunks = []
|
|
||||||
process = run_latency_client(addr=sock.getsockname(),
|
|
||||||
nb_pings=nb_pings, interval=interval)
|
|
||||||
s = _recv(sock, 4096)
|
|
||||||
_time = time.time
|
|
||||||
|
|
||||||
with start_cond:
|
|
||||||
start_time = _time()
|
|
||||||
started = True
|
|
||||||
start_cond.notify(nthreads)
|
|
||||||
|
|
||||||
while LAT_END not in s:
|
|
||||||
s = _recv(sock, 4096)
|
|
||||||
t = _time()
|
|
||||||
chunks.append((t, s))
|
|
||||||
|
|
||||||
# Tell the background threads to stop.
|
|
||||||
end_event.append(None)
|
|
||||||
for t in threads:
|
|
||||||
t.join()
|
|
||||||
process.wait()
|
|
||||||
sock.close()
|
|
||||||
|
|
||||||
for recv_time, chunk in chunks:
|
|
||||||
# NOTE: it is assumed that a line sent by a client wasn't received
|
|
||||||
# in two chunks because the lines are very small.
|
|
||||||
for line in chunk.splitlines():
|
|
||||||
line = line.strip()
|
|
||||||
if line and line != LAT_END:
|
|
||||||
send_time = eval(line)
|
|
||||||
assert isinstance(send_time, float)
|
|
||||||
results.append((send_time, recv_time))
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
def run_latency_tests(max_threads):
|
|
||||||
for task in latency_tasks:
|
|
||||||
print("Background CPU task:", task.__doc__)
|
|
||||||
print()
|
|
||||||
func, args = task()
|
|
||||||
nthreads = 0
|
|
||||||
while nthreads <= max_threads:
|
|
||||||
results = run_latency_test(func, args, nthreads)
|
|
||||||
n = len(results)
|
|
||||||
# We print out milliseconds
|
|
||||||
lats = [1000 * (t2 - t1) for (t1, t2) in results]
|
|
||||||
#print(list(map(int, lats)))
|
|
||||||
avg = sum(lats) / n
|
|
||||||
dev = (sum((x - avg) ** 2 for x in lats) / n) ** 0.5
|
|
||||||
print("CPU threads=%d: %d ms. (std dev: %d ms.)" % (nthreads, avg, dev), end="")
|
|
||||||
print()
|
|
||||||
#print(" [... from %d samples]" % n)
|
|
||||||
nthreads += 1
|
|
||||||
print()
|
|
||||||
|
|
||||||
|
|
||||||
BW_END = "END"
|
|
||||||
|
|
||||||
def bandwidth_client(addr, packet_size, duration):
|
|
||||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
||||||
sock.bind(("127.0.0.1", 0))
|
|
||||||
local_addr = sock.getsockname()
|
|
||||||
_time = time.time
|
|
||||||
_sleep = time.sleep
|
|
||||||
def _send_chunk(msg):
|
|
||||||
_sendto(sock, ("%r#%s\n" % (local_addr, msg)).rjust(packet_size), addr)
|
|
||||||
# We give the parent some time to be ready.
|
|
||||||
_sleep(1.0)
|
|
||||||
try:
|
|
||||||
start_time = _time()
|
|
||||||
end_time = start_time + duration * 2.0
|
|
||||||
i = 0
|
|
||||||
while _time() < end_time:
|
|
||||||
_send_chunk(str(i))
|
|
||||||
s = _recv(sock, packet_size)
|
|
||||||
assert len(s) == packet_size
|
|
||||||
i += 1
|
|
||||||
_send_chunk(BW_END)
|
|
||||||
finally:
|
|
||||||
sock.close()
|
|
||||||
|
|
||||||
def run_bandwidth_client(**kwargs):
|
|
||||||
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
|
|
||||||
cmd_line.extend(['--bwclient', repr(kwargs)])
|
|
||||||
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
|
|
||||||
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
||||||
|
|
||||||
def run_bandwidth_test(func, args, nthreads):
|
|
||||||
# Create a listening socket to receive the packets. We use UDP which should
|
|
||||||
# be painlessly cross-platform.
|
|
||||||
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
|
|
||||||
sock.bind(("127.0.0.1", 0))
|
|
||||||
addr = sock.getsockname()
|
|
||||||
|
|
||||||
duration = BANDWIDTH_DURATION
|
|
||||||
packet_size = BANDWIDTH_PACKET_SIZE
|
|
||||||
|
|
||||||
results = []
|
|
||||||
threads = []
|
|
||||||
end_event = []
|
|
||||||
start_cond = threading.Condition()
|
|
||||||
started = False
|
|
||||||
if nthreads > 0:
|
|
||||||
# Warm up
|
|
||||||
func(*args)
|
|
||||||
|
|
||||||
results = []
|
|
||||||
loop = TimedLoop(func, args)
|
|
||||||
ready = []
|
|
||||||
ready_cond = threading.Condition()
|
|
||||||
|
|
||||||
def run():
|
|
||||||
with ready_cond:
|
|
||||||
ready.append(None)
|
|
||||||
ready_cond.notify()
|
|
||||||
with start_cond:
|
|
||||||
while not started:
|
|
||||||
start_cond.wait()
|
|
||||||
loop(start_time, duration * 1.5, end_event, do_yield=False)
|
|
||||||
|
|
||||||
for i in range(nthreads):
|
|
||||||
threads.append(threading.Thread(target=run))
|
|
||||||
for t in threads:
|
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
# Wait for threads to be ready
|
|
||||||
with ready_cond:
|
|
||||||
while len(ready) < nthreads:
|
|
||||||
ready_cond.wait()
|
|
||||||
|
|
||||||
# Run the client and wait for the first packet to arrive before
|
|
||||||
# unblocking the background threads.
|
|
||||||
process = run_bandwidth_client(addr=addr,
|
|
||||||
packet_size=packet_size,
|
|
||||||
duration=duration)
|
|
||||||
_time = time.time
|
|
||||||
# This will also wait for the parent to be ready
|
|
||||||
s = _recv(sock, packet_size)
|
|
||||||
remote_addr = eval(s.partition('#')[0])
|
|
||||||
|
|
||||||
with start_cond:
|
|
||||||
start_time = _time()
|
|
||||||
started = True
|
|
||||||
start_cond.notify(nthreads)
|
|
||||||
|
|
||||||
n = 0
|
|
||||||
first_time = None
|
|
||||||
while not end_event and BW_END not in s:
|
|
||||||
_sendto(sock, s, remote_addr)
|
|
||||||
s = _recv(sock, packet_size)
|
|
||||||
if first_time is None:
|
|
||||||
first_time = _time()
|
|
||||||
n += 1
|
|
||||||
end_time = _time()
|
|
||||||
|
|
||||||
end_event.append(None)
|
|
||||||
for t in threads:
|
|
||||||
t.join()
|
|
||||||
process.kill()
|
|
||||||
|
|
||||||
return (n - 1) / (end_time - first_time)
|
|
||||||
|
|
||||||
def run_bandwidth_tests(max_threads):
|
|
||||||
for task in bandwidth_tasks:
|
|
||||||
print("Background CPU task:", task.__doc__)
|
|
||||||
print()
|
|
||||||
func, args = task()
|
|
||||||
nthreads = 0
|
|
||||||
baseline_speed = None
|
|
||||||
while nthreads <= max_threads:
|
|
||||||
results = run_bandwidth_test(func, args, nthreads)
|
|
||||||
speed = results
|
|
||||||
#speed = len(results) * 1.0 / results[-1][0]
|
|
||||||
print("CPU threads=%d: %.1f" % (nthreads, speed), end="")
|
|
||||||
if baseline_speed is None:
|
|
||||||
print(" packets/s.")
|
|
||||||
baseline_speed = speed
|
|
||||||
else:
|
|
||||||
print(" ( %d %%)" % (speed / baseline_speed * 100))
|
|
||||||
nthreads += 1
|
|
||||||
print()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
usage = "usage: %prog [-h|--help] [options]"
|
|
||||||
parser = OptionParser(usage=usage)
|
|
||||||
parser.add_option("-t", "--throughput",
|
|
||||||
action="store_true", dest="throughput", default=False,
|
|
||||||
help="run throughput tests")
|
|
||||||
parser.add_option("-l", "--latency",
|
|
||||||
action="store_true", dest="latency", default=False,
|
|
||||||
help="run latency tests")
|
|
||||||
parser.add_option("-b", "--bandwidth",
|
|
||||||
action="store_true", dest="bandwidth", default=False,
|
|
||||||
help="run I/O bandwidth tests")
|
|
||||||
parser.add_option("-i", "--interval",
|
|
||||||
action="store", type="int", dest="check_interval", default=None,
|
|
||||||
help="sys.setcheckinterval() value "
|
|
||||||
"(Python 3.8 and older)")
|
|
||||||
parser.add_option("-I", "--switch-interval",
|
|
||||||
action="store", type="float", dest="switch_interval", default=None,
|
|
||||||
help="sys.setswitchinterval() value "
|
|
||||||
"(Python 3.2 and newer)")
|
|
||||||
parser.add_option("-n", "--num-threads",
|
|
||||||
action="store", type="int", dest="nthreads", default=4,
|
|
||||||
help="max number of threads in tests")
|
|
||||||
|
|
||||||
# Hidden option to run the pinging and bandwidth clients
|
|
||||||
parser.add_option("", "--latclient",
|
|
||||||
action="store", dest="latclient", default=None,
|
|
||||||
help=SUPPRESS_HELP)
|
|
||||||
parser.add_option("", "--bwclient",
|
|
||||||
action="store", dest="bwclient", default=None,
|
|
||||||
help=SUPPRESS_HELP)
|
|
||||||
|
|
||||||
options, args = parser.parse_args()
|
|
||||||
if args:
|
|
||||||
parser.error("unexpected arguments")
|
|
||||||
|
|
||||||
if options.latclient:
|
|
||||||
kwargs = eval(options.latclient)
|
|
||||||
latency_client(**kwargs)
|
|
||||||
return
|
|
||||||
|
|
||||||
if options.bwclient:
|
|
||||||
kwargs = eval(options.bwclient)
|
|
||||||
bandwidth_client(**kwargs)
|
|
||||||
return
|
|
||||||
|
|
||||||
if not options.throughput and not options.latency and not options.bandwidth:
|
|
||||||
options.throughput = options.latency = options.bandwidth = True
|
|
||||||
if options.check_interval:
|
|
||||||
sys.setcheckinterval(options.check_interval)
|
|
||||||
if options.switch_interval:
|
|
||||||
sys.setswitchinterval(options.switch_interval)
|
|
||||||
|
|
||||||
print("== %s %s (%s) ==" % (
|
|
||||||
platform.python_implementation(),
|
|
||||||
platform.python_version(),
|
|
||||||
platform.python_build()[0],
|
|
||||||
))
|
|
||||||
# Processor identification often has repeated spaces
|
|
||||||
cpu = ' '.join(platform.processor().split())
|
|
||||||
print("== %s %s on '%s' ==" % (
|
|
||||||
platform.machine(),
|
|
||||||
platform.system(),
|
|
||||||
cpu,
|
|
||||||
))
|
|
||||||
print()
|
|
||||||
|
|
||||||
if options.throughput:
|
|
||||||
print("--- Throughput ---")
|
|
||||||
print()
|
|
||||||
run_throughput_tests(options.nthreads)
|
|
||||||
|
|
||||||
if options.latency:
|
|
||||||
print("--- Latency ---")
|
|
||||||
print()
|
|
||||||
run_latency_tests(options.nthreads)
|
|
||||||
|
|
||||||
if options.bandwidth:
|
|
||||||
print("--- I/O bandwidth ---")
|
|
||||||
print()
|
|
||||||
run_bandwidth_tests(options.nthreads)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
|
@ -1,568 +0,0 @@
|
||||||
import itertools
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
out = sys.stdout
|
|
||||||
|
|
||||||
TEXT_ENCODING = 'utf8'
|
|
||||||
NEWLINES = 'lf'
|
|
||||||
|
|
||||||
|
|
||||||
def text_open(fn, mode, encoding=None):
|
|
||||||
try:
|
|
||||||
return open(fn, mode, encoding=encoding or TEXT_ENCODING)
|
|
||||||
except TypeError:
|
|
||||||
return open(fn, mode)
|
|
||||||
|
|
||||||
|
|
||||||
def get_file_sizes():
|
|
||||||
for s in ['20 KiB', '400 KiB', '10 MiB']:
|
|
||||||
size, unit = s.split()
|
|
||||||
size = int(size) * {'KiB': 1024, 'MiB': 1024 ** 2}[unit]
|
|
||||||
yield s.replace(' ', ''), size
|
|
||||||
|
|
||||||
|
|
||||||
def get_binary_files():
|
|
||||||
return ((name + ".bin", size) for name, size in get_file_sizes())
|
|
||||||
|
|
||||||
|
|
||||||
def get_text_files():
|
|
||||||
return ((f"{name}-{TEXT_ENCODING}-{NEWLINES}.txt", size)
|
|
||||||
for name, size in get_file_sizes())
|
|
||||||
|
|
||||||
|
|
||||||
def with_open_mode(mode):
|
|
||||||
def decorate(f):
|
|
||||||
f.file_open_mode = mode
|
|
||||||
return f
|
|
||||||
return decorate
|
|
||||||
|
|
||||||
|
|
||||||
def with_sizes(*sizes):
|
|
||||||
def decorate(f):
|
|
||||||
f.file_sizes = sizes
|
|
||||||
return f
|
|
||||||
return decorate
|
|
||||||
|
|
||||||
|
|
||||||
# Here begin the tests
|
|
||||||
|
|
||||||
@with_open_mode("r")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def read_bytewise(f):
|
|
||||||
""" read one unit at a time """
|
|
||||||
f.seek(0)
|
|
||||||
while f.read(1):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("r")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def read_small_chunks(f):
|
|
||||||
""" read 20 units at a time """
|
|
||||||
f.seek(0)
|
|
||||||
while f.read(20):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("r")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def read_big_chunks(f):
|
|
||||||
""" read 4096 units at a time """
|
|
||||||
f.seek(0)
|
|
||||||
while f.read(4096):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("r")
|
|
||||||
@with_sizes("small", "medium", "large")
|
|
||||||
def read_whole_file(f):
|
|
||||||
""" read whole contents at once """
|
|
||||||
f.seek(0)
|
|
||||||
while f.read():
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("rt")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def read_lines(f):
|
|
||||||
""" read one line at a time """
|
|
||||||
f.seek(0)
|
|
||||||
for line in f:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("r")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def seek_forward_bytewise(f):
|
|
||||||
""" seek forward one unit at a time """
|
|
||||||
f.seek(0, 2)
|
|
||||||
size = f.tell()
|
|
||||||
f.seek(0, 0)
|
|
||||||
for i in range(0, size - 1):
|
|
||||||
f.seek(i, 0)
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("r")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def seek_forward_blockwise(f):
|
|
||||||
""" seek forward 1000 units at a time """
|
|
||||||
f.seek(0, 2)
|
|
||||||
size = f.tell()
|
|
||||||
f.seek(0, 0)
|
|
||||||
for i in range(0, size - 1, 1000):
|
|
||||||
f.seek(i, 0)
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("rb")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def read_seek_bytewise(f):
|
|
||||||
""" alternate read & seek one unit """
|
|
||||||
f.seek(0)
|
|
||||||
while f.read(1):
|
|
||||||
f.seek(1, 1)
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("rb")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def read_seek_blockwise(f):
|
|
||||||
""" alternate read & seek 1000 units """
|
|
||||||
f.seek(0)
|
|
||||||
while f.read(1000):
|
|
||||||
f.seek(1000, 1)
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("w")
|
|
||||||
@with_sizes("small")
|
|
||||||
def write_bytewise(f, source):
|
|
||||||
""" write one unit at a time """
|
|
||||||
for i in range(0, len(source)):
|
|
||||||
f.write(source[i:i+1])
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("w")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def write_small_chunks(f, source):
|
|
||||||
""" write 20 units at a time """
|
|
||||||
for i in range(0, len(source), 20):
|
|
||||||
f.write(source[i:i+20])
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("w")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def write_medium_chunks(f, source):
|
|
||||||
""" write 4096 units at a time """
|
|
||||||
for i in range(0, len(source), 4096):
|
|
||||||
f.write(source[i:i+4096])
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("w")
|
|
||||||
@with_sizes("large")
|
|
||||||
def write_large_chunks(f, source):
|
|
||||||
""" write 1e6 units at a time """
|
|
||||||
for i in range(0, len(source), 1000000):
|
|
||||||
f.write(source[i:i+1000000])
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("w+")
|
|
||||||
@with_sizes("small")
|
|
||||||
def modify_bytewise(f, source):
|
|
||||||
""" modify one unit at a time """
|
|
||||||
f.seek(0)
|
|
||||||
for i in range(0, len(source)):
|
|
||||||
f.write(source[i:i+1])
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("w+")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def modify_small_chunks(f, source):
|
|
||||||
""" modify 20 units at a time """
|
|
||||||
f.seek(0)
|
|
||||||
for i in range(0, len(source), 20):
|
|
||||||
f.write(source[i:i+20])
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("w+")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def modify_medium_chunks(f, source):
|
|
||||||
""" modify 4096 units at a time """
|
|
||||||
f.seek(0)
|
|
||||||
for i in range(0, len(source), 4096):
|
|
||||||
f.write(source[i:i+4096])
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("wb+")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def modify_seek_forward_bytewise(f, source):
|
|
||||||
""" alternate write & seek one unit """
|
|
||||||
f.seek(0)
|
|
||||||
for i in range(0, len(source), 2):
|
|
||||||
f.write(source[i:i+1])
|
|
||||||
f.seek(i+2)
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("wb+")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def modify_seek_forward_blockwise(f, source):
|
|
||||||
""" alternate write & seek 1000 units """
|
|
||||||
f.seek(0)
|
|
||||||
for i in range(0, len(source), 2000):
|
|
||||||
f.write(source[i:i+1000])
|
|
||||||
f.seek(i+2000)
|
|
||||||
|
|
||||||
|
|
||||||
# XXX the 2 following tests don't work with py3k's text IO
|
|
||||||
@with_open_mode("wb+")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def read_modify_bytewise(f, source):
|
|
||||||
""" alternate read & write one unit """
|
|
||||||
f.seek(0)
|
|
||||||
for i in range(0, len(source), 2):
|
|
||||||
f.read(1)
|
|
||||||
f.write(source[i+1:i+2])
|
|
||||||
|
|
||||||
|
|
||||||
@with_open_mode("wb+")
|
|
||||||
@with_sizes("medium")
|
|
||||||
def read_modify_blockwise(f, source):
|
|
||||||
""" alternate read & write 1000 units """
|
|
||||||
f.seek(0)
|
|
||||||
for i in range(0, len(source), 2000):
|
|
||||||
f.read(1000)
|
|
||||||
f.write(source[i+1000:i+2000])
|
|
||||||
|
|
||||||
|
|
||||||
read_tests = [
|
|
||||||
read_bytewise, read_small_chunks, read_lines, read_big_chunks,
|
|
||||||
None, read_whole_file, None,
|
|
||||||
seek_forward_bytewise, seek_forward_blockwise,
|
|
||||||
read_seek_bytewise, read_seek_blockwise,
|
|
||||||
]
|
|
||||||
|
|
||||||
write_tests = [
|
|
||||||
write_bytewise, write_small_chunks, write_medium_chunks, write_large_chunks,
|
|
||||||
]
|
|
||||||
|
|
||||||
modify_tests = [
|
|
||||||
modify_bytewise, modify_small_chunks, modify_medium_chunks,
|
|
||||||
None,
|
|
||||||
modify_seek_forward_bytewise, modify_seek_forward_blockwise,
|
|
||||||
read_modify_bytewise, read_modify_blockwise,
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def run_during(duration, func):
|
|
||||||
_t = time.time
|
|
||||||
n = 0
|
|
||||||
start = os.times()
|
|
||||||
start_timestamp = _t()
|
|
||||||
real_start = start[4] or start_timestamp
|
|
||||||
while True:
|
|
||||||
func()
|
|
||||||
n += 1
|
|
||||||
if _t() - start_timestamp > duration:
|
|
||||||
break
|
|
||||||
end = os.times()
|
|
||||||
real = (end[4] if start[4] else time.time()) - real_start
|
|
||||||
return n, real, sum(end[0:2]) - sum(start[0:2])
|
|
||||||
|
|
||||||
|
|
||||||
def warm_cache(filename):
|
|
||||||
with open(filename, "rb") as f:
|
|
||||||
f.read()
|
|
||||||
|
|
||||||
|
|
||||||
def run_all_tests(options):
|
|
||||||
def print_label(filename, func):
|
|
||||||
name = re.split(r'[-.]', filename)[0]
|
|
||||||
out.write(
|
|
||||||
f"[{name.center(7)}] {func.__doc__.strip()}... ".ljust(52))
|
|
||||||
out.flush()
|
|
||||||
|
|
||||||
def print_results(size, n, real, cpu):
|
|
||||||
bw = n * float(size) / 1024 ** 2 / real
|
|
||||||
bw = ("%4d MiB/s" if bw > 100 else "%.3g MiB/s") % bw
|
|
||||||
out.write(bw.rjust(12) + "\n")
|
|
||||||
if cpu < 0.90 * real:
|
|
||||||
out.write(" warning: test above used only "
|
|
||||||
f"{cpu / real:%} CPU, "
|
|
||||||
"result may be flawed!\n")
|
|
||||||
|
|
||||||
def run_one_test(name, size, open_func, test_func, *args):
|
|
||||||
mode = test_func.file_open_mode
|
|
||||||
print_label(name, test_func)
|
|
||||||
if "w" not in mode or "+" in mode:
|
|
||||||
warm_cache(name)
|
|
||||||
with open_func(name) as f:
|
|
||||||
n, real, cpu = run_during(1.5, lambda: test_func(f, *args))
|
|
||||||
print_results(size, n, real, cpu)
|
|
||||||
|
|
||||||
def run_test_family(tests, mode_filter, files, open_func, *make_args):
|
|
||||||
for test_func in tests:
|
|
||||||
if test_func is None:
|
|
||||||
out.write("\n")
|
|
||||||
continue
|
|
||||||
if mode_filter in test_func.file_open_mode:
|
|
||||||
continue
|
|
||||||
for s in test_func.file_sizes:
|
|
||||||
name, size = files[size_names[s]]
|
|
||||||
#name += file_ext
|
|
||||||
args = tuple(f(name, size) for f in make_args)
|
|
||||||
run_one_test(name, size,
|
|
||||||
open_func, test_func, *args)
|
|
||||||
|
|
||||||
size_names = {
|
|
||||||
"small": 0,
|
|
||||||
"medium": 1,
|
|
||||||
"large": 2,
|
|
||||||
}
|
|
||||||
|
|
||||||
print(f"Python {sys.version}")
|
|
||||||
print("Unicode: PEP 393")
|
|
||||||
print(platform.platform())
|
|
||||||
binary_files = list(get_binary_files())
|
|
||||||
text_files = list(get_text_files())
|
|
||||||
if "b" in options:
|
|
||||||
print("Binary unit = one byte")
|
|
||||||
if "t" in options:
|
|
||||||
print(f"Text unit = one character ({TEXT_ENCODING}-decoded)")
|
|
||||||
|
|
||||||
# Binary reads
|
|
||||||
if "b" in options and "r" in options:
|
|
||||||
print("\n** Binary input **\n")
|
|
||||||
run_test_family(read_tests, "t", binary_files, lambda fn: open(fn, "rb"))
|
|
||||||
|
|
||||||
# Text reads
|
|
||||||
if "t" in options and "r" in options:
|
|
||||||
print("\n** Text input **\n")
|
|
||||||
run_test_family(read_tests, "b", text_files, lambda fn: text_open(fn, "r"))
|
|
||||||
|
|
||||||
# Binary writes
|
|
||||||
if "b" in options and "w" in options:
|
|
||||||
print("\n** Binary append **\n")
|
|
||||||
|
|
||||||
def make_test_source(name, size):
|
|
||||||
with open(name, "rb") as f:
|
|
||||||
return f.read()
|
|
||||||
run_test_family(write_tests, "t", binary_files,
|
|
||||||
lambda fn: open(os.devnull, "wb"), make_test_source)
|
|
||||||
|
|
||||||
# Text writes
|
|
||||||
if "t" in options and "w" in options:
|
|
||||||
print("\n** Text append **\n")
|
|
||||||
|
|
||||||
def make_test_source(name, size):
|
|
||||||
with text_open(name, "r") as f:
|
|
||||||
return f.read()
|
|
||||||
run_test_family(write_tests, "b", text_files,
|
|
||||||
lambda fn: text_open(os.devnull, "w"), make_test_source)
|
|
||||||
|
|
||||||
# Binary overwrites
|
|
||||||
if "b" in options and "w" in options:
|
|
||||||
print("\n** Binary overwrite **\n")
|
|
||||||
|
|
||||||
def make_test_source(name, size):
|
|
||||||
with open(name, "rb") as f:
|
|
||||||
return f.read()
|
|
||||||
run_test_family(modify_tests, "t", binary_files,
|
|
||||||
lambda fn: open(fn, "r+b"), make_test_source)
|
|
||||||
|
|
||||||
# Text overwrites
|
|
||||||
if "t" in options and "w" in options:
|
|
||||||
print("\n** Text overwrite **\n")
|
|
||||||
|
|
||||||
def make_test_source(name, size):
|
|
||||||
with text_open(name, "r") as f:
|
|
||||||
return f.read()
|
|
||||||
run_test_family(modify_tests, "b", text_files,
|
|
||||||
lambda fn: text_open(fn, "r+"), make_test_source)
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_files():
|
|
||||||
print("Preparing files...")
|
|
||||||
# Binary files
|
|
||||||
for name, size in get_binary_files():
|
|
||||||
if os.path.isfile(name) and os.path.getsize(name) == size:
|
|
||||||
continue
|
|
||||||
with open(name, "wb") as f:
|
|
||||||
f.write(os.urandom(size))
|
|
||||||
# Text files
|
|
||||||
chunk = []
|
|
||||||
with text_open(__file__, "r", encoding='utf8') as f:
|
|
||||||
for line in f:
|
|
||||||
if line.startswith("# <iobench text chunk marker>"):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Couldn't find chunk marker in {__file__} !")
|
|
||||||
if NEWLINES == "all":
|
|
||||||
it = itertools.cycle(["\n", "\r", "\r\n"])
|
|
||||||
else:
|
|
||||||
it = itertools.repeat(
|
|
||||||
{"cr": "\r", "lf": "\n", "crlf": "\r\n"}[NEWLINES])
|
|
||||||
chunk = "".join(line.replace("\n", next(it)) for line in f)
|
|
||||||
if isinstance(chunk, bytes):
|
|
||||||
chunk = chunk.decode('utf8')
|
|
||||||
chunk = chunk.encode(TEXT_ENCODING)
|
|
||||||
for name, size in get_text_files():
|
|
||||||
if os.path.isfile(name) and os.path.getsize(name) == size:
|
|
||||||
continue
|
|
||||||
head = chunk * (size // len(chunk))
|
|
||||||
tail = chunk[:size % len(chunk)]
|
|
||||||
# Adjust tail to end on a character boundary
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
tail.decode(TEXT_ENCODING)
|
|
||||||
break
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
tail = tail[:-1]
|
|
||||||
with open(name, "wb") as f:
|
|
||||||
f.write(head)
|
|
||||||
f.write(tail)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
global TEXT_ENCODING, NEWLINES
|
|
||||||
|
|
||||||
usage = "usage: %prog [-h|--help] [options]"
|
|
||||||
parser = OptionParser(usage=usage)
|
|
||||||
parser.add_option("-b", "--binary",
|
|
||||||
action="store_true", dest="binary", default=False,
|
|
||||||
help="run binary I/O tests")
|
|
||||||
parser.add_option("-t", "--text",
|
|
||||||
action="store_true", dest="text", default=False,
|
|
||||||
help="run text I/O tests")
|
|
||||||
parser.add_option("-r", "--read",
|
|
||||||
action="store_true", dest="read", default=False,
|
|
||||||
help="run read tests")
|
|
||||||
parser.add_option("-w", "--write",
|
|
||||||
action="store_true", dest="write", default=False,
|
|
||||||
help="run write & modify tests")
|
|
||||||
parser.add_option("-E", "--encoding",
|
|
||||||
action="store", dest="encoding", default=None,
|
|
||||||
help=f"encoding for text tests (default: {TEXT_ENCODING})")
|
|
||||||
parser.add_option("-N", "--newlines",
|
|
||||||
action="store", dest="newlines", default='lf',
|
|
||||||
help="line endings for text tests "
|
|
||||||
"(one of: {lf (default), cr, crlf, all})")
|
|
||||||
parser.add_option("-m", "--io-module",
|
|
||||||
action="store", dest="io_module", default=None,
|
|
||||||
help="io module to test (default: builtin open())")
|
|
||||||
options, args = parser.parse_args()
|
|
||||||
if args:
|
|
||||||
parser.error("unexpected arguments")
|
|
||||||
NEWLINES = options.newlines.lower()
|
|
||||||
if NEWLINES not in ('lf', 'cr', 'crlf', 'all'):
|
|
||||||
parser.error(f"invalid 'newlines' option: {NEWLINES!r}")
|
|
||||||
|
|
||||||
test_options = ""
|
|
||||||
if options.read:
|
|
||||||
test_options += "r"
|
|
||||||
if options.write:
|
|
||||||
test_options += "w"
|
|
||||||
elif not options.read:
|
|
||||||
test_options += "rw"
|
|
||||||
if options.text:
|
|
||||||
test_options += "t"
|
|
||||||
if options.binary:
|
|
||||||
test_options += "b"
|
|
||||||
elif not options.text:
|
|
||||||
test_options += "tb"
|
|
||||||
|
|
||||||
if options.encoding:
|
|
||||||
TEXT_ENCODING = options.encoding
|
|
||||||
|
|
||||||
if options.io_module:
|
|
||||||
globals()['open'] = __import__(options.io_module, {}, {}, ['open']).open
|
|
||||||
|
|
||||||
prepare_files()
|
|
||||||
run_all_tests(test_options)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
||||||
|
|
||||||
# -- This part to exercise text reading. Don't change anything! --
|
|
||||||
# <iobench text chunk marker>
|
|
||||||
|
|
||||||
"""
|
|
||||||
1.
|
|
||||||
Gáttir allar,
|
|
||||||
áðr gangi fram,
|
|
||||||
um skoðask skyli,
|
|
||||||
um skyggnast skyli,
|
|
||||||
því at óvíst er at vita,
|
|
||||||
hvar óvinir
|
|
||||||
sitja á fleti fyrir.
|
|
||||||
|
|
||||||
2.
|
|
||||||
Gefendr heilir!
|
|
||||||
Gestr er inn kominn,
|
|
||||||
hvar skal sitja sjá?
|
|
||||||
Mjök er bráðr,
|
|
||||||
sá er á bröndum skal
|
|
||||||
síns of freista frama.
|
|
||||||
|
|
||||||
3.
|
|
||||||
Elds er þörf,
|
|
||||||
þeims inn er kominn
|
|
||||||
ok á kné kalinn;
|
|
||||||
matar ok váða
|
|
||||||
er manni þörf,
|
|
||||||
þeim er hefr um fjall farit.
|
|
||||||
|
|
||||||
4.
|
|
||||||
Vatns er þörf,
|
|
||||||
þeim er til verðar kemr,
|
|
||||||
þerru ok þjóðlaðar,
|
|
||||||
góðs of æðis,
|
|
||||||
ef sér geta mætti,
|
|
||||||
orðs ok endrþögu.
|
|
||||||
|
|
||||||
5.
|
|
||||||
Vits er þörf,
|
|
||||||
þeim er víða ratar;
|
|
||||||
dælt er heima hvat;
|
|
||||||
at augabragði verðr,
|
|
||||||
sá er ekki kann
|
|
||||||
ok með snotrum sitr.
|
|
||||||
|
|
||||||
6.
|
|
||||||
At hyggjandi sinni
|
|
||||||
skyli-t maðr hræsinn vera,
|
|
||||||
heldr gætinn at geði;
|
|
||||||
þá er horskr ok þögull
|
|
||||||
kemr heimisgarða til,
|
|
||||||
sjaldan verðr víti vörum,
|
|
||||||
því at óbrigðra vin
|
|
||||||
fær maðr aldregi
|
|
||||||
en mannvit mikit.
|
|
||||||
|
|
||||||
7.
|
|
||||||
Inn vari gestr,
|
|
||||||
er til verðar kemr,
|
|
||||||
þunnu hljóði þegir,
|
|
||||||
eyrum hlýðir,
|
|
||||||
en augum skoðar;
|
|
||||||
svá nýsisk fróðra hverr fyrir.
|
|
||||||
|
|
||||||
8.
|
|
||||||
Hinn er sæll,
|
|
||||||
er sér of getr
|
|
||||||
lof ok líknstafi;
|
|
||||||
ódælla er við þat,
|
|
||||||
er maðr eiga skal
|
|
||||||
annars brjóstum í.
|
|
||||||
"""
|
|
||||||
|
|
||||||
"""
|
|
||||||
C'est revenir tard, je le sens, sur un sujet trop rebattu et déjà presque oublié. Mon état, qui ne me permet plus aucun travail suivi, mon aversion pour le genre polémique, ont causé ma lenteur à écrire et ma répugnance à publier. J'aurais même tout à fait supprimé ces Lettres, ou plutôt je lie les aurais point écrites, s'il n'eût été question que de moi : Mais ma patrie ne m'est pas tellement devenue étrangère que je puisse voir tranquillement opprimer ses citoyens, surtout lorsqu'ils n'ont compromis leurs droits qu'en défendant ma cause. Je serais le dernier des hommes si dans une telle occasion j'écoutais un sentiment qui n'est plus ni douceur ni patience, mais faiblesse et lâcheté, dans celui qu'il empêche de remplir son devoir.
|
|
||||||
Rien de moins important pour le public, j'en conviens, que la matière de ces lettres. La constitution d'une petite République, le sort d'un petit particulier, l'exposé de quelques injustices, la réfutation de quelques sophismes ; tout cela n'a rien en soi d'assez considérable pour mériter beaucoup de lecteurs : mais si mes sujets sont petits mes objets sont grands, et dignes de l'attention de tout honnête homme. Laissons Genève à sa place, et Rousseau dans sa dépression ; mais la religion, mais la liberté, la justice ! voilà, qui que vous soyez, ce qui n'est pas au-dessous de vous.
|
|
||||||
Qu'on ne cherche pas même ici dans le style le dédommagement de l'aridité de la matière. Ceux que quelques traits heureux de ma plume ont si fort irrités trouveront de quoi s'apaiser dans ces lettres, L'honneur de défendre un opprimé eût enflammé mon coeur si j'avais parlé pour un autre. Réduit au triste emploi de me défendre moi-même, j'ai dû me borner à raisonner ; m'échauffer eût été m'avilir. J'aurai donc trouvé grâce en ce point devant ceux qui s'imaginent qu'il est essentiel à la vérité d'être dite froidement ; opinion que pourtant j'ai peine à comprendre. Lorsqu'une vive persuasion nous anime, le moyen d'employer un langage glacé ? Quand Archimède tout transporté courait nu dans les rues de Syracuse, en avait-il moins trouvé la vérité parce qu'il se passionnait pour elle ? Tout au contraire, celui qui la sent ne peut s'abstenir de l'adorer ; celui qui demeure froid ne l'a pas vue.
|
|
||||||
Quoi qu'il en soit, je prie les lecteurs de vouloir bien mettre à part mon beau style, et d'examiner seulement si je raisonne bien ou mal ; car enfin, de cela seul qu'un auteur s'exprime en bons termes, je ne vois pas comment il peut s'ensuivre que cet auteur ne sait ce qu'il dit.
|
|
||||||
"""
|
|
|
@ -1,68 +0,0 @@
|
||||||
stringbench is a set of performance tests comparing byte string
|
|
||||||
operations with unicode operations. The two string implementations
|
|
||||||
are loosely based on each other and sometimes the algorithm for one is
|
|
||||||
faster than the other.
|
|
||||||
|
|
||||||
These test set was started at the Need For Speed sprint in Reykjavik
|
|
||||||
to identify which string methods could be sped up quickly and to
|
|
||||||
identify obvious places for improvement.
|
|
||||||
|
|
||||||
Here is an example of a benchmark
|
|
||||||
|
|
||||||
|
|
||||||
@bench('"Andrew".startswith("A")', 'startswith single character', 1000)
|
|
||||||
def startswith_single(STR):
|
|
||||||
s1 = STR("Andrew")
|
|
||||||
s2 = STR("A")
|
|
||||||
s1_startswith = s1.startswith
|
|
||||||
for x in _RANGE_1000:
|
|
||||||
s1_startswith(s2)
|
|
||||||
|
|
||||||
The bench decorator takes three parameters. The first is a short
|
|
||||||
description of how the code works. In most cases this is Python code
|
|
||||||
snippet. It is not the code which is actually run because the real
|
|
||||||
code is hand-optimized to focus on the method being tested.
|
|
||||||
|
|
||||||
The second parameter is a group title. All benchmarks with the same
|
|
||||||
group title are listed together. This lets you compare different
|
|
||||||
implementations of the same algorithm, such as "t in s"
|
|
||||||
vs. "s.find(t)".
|
|
||||||
|
|
||||||
The last is a count. Each benchmark loops over the algorithm either
|
|
||||||
100 or 1000 times, depending on the algorithm performance. The output
|
|
||||||
time is the time per benchmark call so the reader needs a way to know
|
|
||||||
how to scale the performance.
|
|
||||||
|
|
||||||
These parameters become function attributes.
|
|
||||||
|
|
||||||
|
|
||||||
Here is an example of the output
|
|
||||||
|
|
||||||
|
|
||||||
========== count newlines
|
|
||||||
38.54 41.60 92.7 ...text.with.2000.newlines.count("\n") (*100)
|
|
||||||
========== early match, single character
|
|
||||||
1.14 1.18 96.8 ("A"*1000).find("A") (*1000)
|
|
||||||
0.44 0.41 105.6 "A" in "A"*1000 (*1000)
|
|
||||||
1.15 1.17 98.1 ("A"*1000).index("A") (*1000)
|
|
||||||
|
|
||||||
The first column is the run time in milliseconds for byte strings.
|
|
||||||
The second is the run time for unicode strings. The third is a
|
|
||||||
percentage; byte time / unicode time. It's the percentage by which
|
|
||||||
unicode is faster than byte strings.
|
|
||||||
|
|
||||||
The last column contains the code snippet and the repeat count for the
|
|
||||||
internal benchmark loop.
|
|
||||||
|
|
||||||
The times are computed with 'timeit.py' which repeats the test more
|
|
||||||
and more times until the total time takes over 0.2 seconds, returning
|
|
||||||
the best time for a single iteration.
|
|
||||||
|
|
||||||
The final line of the output is the cumulative time for byte and
|
|
||||||
unicode strings, and the overall performance of unicode relative to
|
|
||||||
bytes. For example
|
|
||||||
|
|
||||||
4079.83 5432.25 75.1 TOTAL
|
|
||||||
|
|
||||||
However, this has no meaning as it evenly weights every test.
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue