add the multiprocessing package to fulfill PEP 371

This commit is contained in:
Benjamin Peterson 2008-06-11 02:40:25 +00:00
parent d5299866f9
commit 190d56e009
32 changed files with 12500 additions and 0 deletions

View File

@ -0,0 +1,235 @@
#
# Simple benchmarks for the multiprocessing package
#
import time, sys, multiprocessing, threading, Queue, gc
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in xrange(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through the queue in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in xrange(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = multiprocessing.Pipe()
cond = multiprocessing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = multiprocessing.Process(target=pipe_func,
args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through connection in',elapsed,'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
a = seq[5]
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in xrange(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in xrange(iterations):
c.notify()
c.wait()
elapsed = _timer()-t
c.release()
p.join()
print iterations * 2, 'waits in', elapsed, 'seconds'
print 'average number/sec:', iterations * 2 / elapsed
####
def test():
manager = multiprocessing.Manager()
gc.disable()
print '\n\t######## testing Queue.Queue\n'
test_queuespeed(threading.Thread, Queue.Queue(),
threading.Condition())
print '\n\t######## testing multiprocessing.Queue\n'
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print '\n\t######## testing Queue managed by server process\n'
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print '\n\t######## testing multiprocessing.Pipe\n'
test_pipespeed()
print
print '\n\t######## testing list\n'
test_seqspeed(range(10))
print '\n\t######## testing list managed by server process\n'
test_seqspeed(manager.list(range(10)))
print '\n\t######## testing Array("i", ..., lock=False)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=False))
print '\n\t######## testing Array("i", ..., lock=True)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=True))
print
print '\n\t######## testing threading.Lock\n'
test_lockspeed(threading.Lock())
print '\n\t######## testing threading.RLock\n'
test_lockspeed(threading.RLock())
print '\n\t######## testing multiprocessing.Lock\n'
test_lockspeed(multiprocessing.Lock())
print '\n\t######## testing multiprocessing.RLock\n'
test_lockspeed(multiprocessing.RLock())
print '\n\t######## testing lock managed by server process\n'
test_lockspeed(manager.Lock())
print '\n\t######## testing rlock managed by server process\n'
test_lockspeed(manager.RLock())
print
print '\n\t######## testing threading.Condition\n'
test_conditionspeed(threading.Thread, threading.Condition())
print '\n\t######## testing multiprocessing.Condition\n'
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print '\n\t######## testing condition managed by a server process\n'
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
multiprocessing.freeze_support()
test()

View File

@ -0,0 +1,362 @@
#
# Module to allow spawning of processes on foreign host
#
# Depends on `multiprocessing` package -- tested with `processing-0.60`
#
__all__ = ['Cluster', 'Host', 'get_logger', 'current_process']
#
# Imports
#
import sys
import os
import tarfile
import shutil
import subprocess
import logging
import itertools
import Queue
try:
import cPickle as pickle
except ImportError:
import pickle
from multiprocessing import Process, current_process, cpu_count
from multiprocessing import util, managers, connection, forking, pool
#
# Logging
#
def get_logger():
return _logger
_logger = logging.getLogger('distributing')
_logger.propogate = 0
util.fix_up_logger(_logger)
_formatter = logging.Formatter(util.DEFAULT_LOGGING_FORMAT)
_handler = logging.StreamHandler()
_handler.setFormatter(_formatter)
_logger.addHandler(_handler)
info = _logger.info
debug = _logger.debug
#
# Get number of cpus
#
try:
slot_count = cpu_count()
except NotImplemented:
slot_count = 1
#
# Manager type which spawns subprocesses
#
class HostManager(managers.SyncManager):
'''
Manager type used for spawning processes on a (presumably) foreign host
'''
def __init__(self, address, authkey):
managers.SyncManager.__init__(self, address, authkey)
self._name = 'Host-unknown'
def Process(self, group=None, target=None, name=None, args=(), kwargs={}):
if hasattr(sys.modules['__main__'], '__file__'):
main_path = os.path.basename(sys.modules['__main__'].__file__)
else:
main_path = None
data = pickle.dumps((target, args, kwargs))
p = self._RemoteProcess(data, main_path)
if name is None:
temp = self._name.split('Host-')[-1] + '/Process-%s'
name = temp % ':'.join(map(str, p.get_identity()))
p.set_name(name)
return p
@classmethod
def from_address(cls, address, authkey):
manager = cls(address, authkey)
managers.transact(address, authkey, 'dummy')
manager._state.value = managers.State.STARTED
manager._name = 'Host-%s:%s' % manager.address
manager.shutdown = util.Finalize(
manager, HostManager._finalize_host,
args=(manager._address, manager._authkey, manager._name),
exitpriority=-10
)
return manager
@staticmethod
def _finalize_host(address, authkey, name):
managers.transact(address, authkey, 'shutdown')
def __repr__(self):
return '<Host(%s)>' % self._name
#
# Process subclass representing a process on (possibly) a remote machine
#
class RemoteProcess(Process):
'''
Represents a process started on a remote host
'''
def __init__(self, data, main_path):
assert not main_path or os.path.basename(main_path) == main_path
Process.__init__(self)
self._data = data
self._main_path = main_path
def _bootstrap(self):
forking.prepare({'main_path': self._main_path})
self._target, self._args, self._kwargs = pickle.loads(self._data)
return Process._bootstrap(self)
def get_identity(self):
return self._identity
HostManager.register('_RemoteProcess', RemoteProcess)
#
# A Pool class that uses a cluster
#
class DistributedPool(pool.Pool):
def __init__(self, cluster, processes=None, initializer=None, initargs=()):
self._cluster = cluster
self.Process = cluster.Process
pool.Pool.__init__(self, processes or len(cluster),
initializer, initargs)
def _setup_queues(self):
self._inqueue = self._cluster._SettableQueue()
self._outqueue = self._cluster._SettableQueue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
inqueue.set_contents([None] * size)
#
# Manager type which starts host managers on other machines
#
def LocalProcess(**kwds):
p = Process(**kwds)
p.set_name('localhost/' + p.get_name())
return p
class Cluster(managers.SyncManager):
'''
Represents collection of slots running on various hosts.
`Cluster` is a subclass of `SyncManager` so it allows creation of
various types of shared objects.
'''
def __init__(self, hostlist, modules):
managers.SyncManager.__init__(self, address=('localhost', 0))
self._hostlist = hostlist
self._modules = modules
if __name__ not in modules:
modules.append(__name__)
files = [sys.modules[name].__file__ for name in modules]
for i, file in enumerate(files):
if file.endswith('.pyc') or file.endswith('.pyo'):
files[i] = file[:-4] + '.py'
self._files = [os.path.abspath(file) for file in files]
def start(self):
managers.SyncManager.start(self)
l = connection.Listener(family='AF_INET', authkey=self._authkey)
for i, host in enumerate(self._hostlist):
host._start_manager(i, self._authkey, l.address, self._files)
for host in self._hostlist:
if host.hostname != 'localhost':
conn = l.accept()
i, address, cpus = conn.recv()
conn.close()
other_host = self._hostlist[i]
other_host.manager = HostManager.from_address(address,
self._authkey)
other_host.slots = other_host.slots or cpus
other_host.Process = other_host.manager.Process
else:
host.slots = host.slots or slot_count
host.Process = LocalProcess
self._slotlist = [
Slot(host) for host in self._hostlist for i in range(host.slots)
]
self._slot_iterator = itertools.cycle(self._slotlist)
self._base_shutdown = self.shutdown
del self.shutdown
def shutdown(self):
for host in self._hostlist:
if host.hostname != 'localhost':
host.manager.shutdown()
self._base_shutdown()
def Process(self, group=None, target=None, name=None, args=(), kwargs={}):
slot = self._slot_iterator.next()
return slot.Process(
group=group, target=target, name=name, args=args, kwargs=kwargs
)
def Pool(self, processes=None, initializer=None, initargs=()):
return DistributedPool(self, processes, initializer, initargs)
def __getitem__(self, i):
return self._slotlist[i]
def __len__(self):
return len(self._slotlist)
def __iter__(self):
return iter(self._slotlist)
#
# Queue subclass used by distributed pool
#
class SettableQueue(Queue.Queue):
def empty(self):
return not self.queue
def full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
def set_contents(self, contents):
# length of contents must be at least as large as the number of
# threads which have potentially called get()
self.not_empty.acquire()
try:
self.queue.clear()
self.queue.extend(contents)
self.not_empty.notifyAll()
finally:
self.not_empty.release()
Cluster.register('_SettableQueue', SettableQueue)
#
# Class representing a notional cpu in the cluster
#
class Slot(object):
def __init__(self, host):
self.host = host
self.Process = host.Process
#
# Host
#
class Host(object):
'''
Represents a host to use as a node in a cluster.
`hostname` gives the name of the host. If hostname is not
"localhost" then ssh is used to log in to the host. To log in as
a different user use a host name of the form
"username@somewhere.org"
`slots` is used to specify the number of slots for processes on
the host. This affects how often processes will be allocated to
this host. Normally this should be equal to the number of cpus on
that host.
'''
def __init__(self, hostname, slots=None):
self.hostname = hostname
self.slots = slots
def _start_manager(self, index, authkey, address, files):
if self.hostname != 'localhost':
tempdir = copy_to_remote_temporary_directory(self.hostname, files)
debug('startup files copied to %s:%s', self.hostname, tempdir)
p = subprocess.Popen(
['ssh', self.hostname, 'python', '-c',
'"import os; os.chdir(%r); '
'from distributing import main; main()"' % tempdir],
stdin=subprocess.PIPE
)
data = dict(
name='BoostrappingHost', index=index,
dist_log_level=_logger.getEffectiveLevel(),
dir=tempdir, authkey=str(authkey), parent_address=address
)
pickle.dump(data, p.stdin, pickle.HIGHEST_PROTOCOL)
p.stdin.close()
#
# Copy files to remote directory, returning name of directory
#
unzip_code = '''"
import tempfile, os, sys, tarfile
tempdir = tempfile.mkdtemp(prefix='distrib-')
os.chdir(tempdir)
tf = tarfile.open(fileobj=sys.stdin, mode='r|gz')
for ti in tf:
tf.extract(ti)
print tempdir
"'''
def copy_to_remote_temporary_directory(host, files):
p = subprocess.Popen(
['ssh', host, 'python', '-c', unzip_code],
stdout=subprocess.PIPE, stdin=subprocess.PIPE
)
tf = tarfile.open(fileobj=p.stdin, mode='w|gz')
for name in files:
tf.add(name, os.path.basename(name))
tf.close()
p.stdin.close()
return p.stdout.read().rstrip()
#
# Code which runs a host manager
#
def main():
# get data from parent over stdin
data = pickle.load(sys.stdin)
sys.stdin.close()
# set some stuff
_logger.setLevel(data['dist_log_level'])
forking.prepare(data)
# create server for a `HostManager` object
server = managers.Server(HostManager._registry, ('', 0), data['authkey'])
current_process()._server = server
# report server address and number of cpus back to parent
conn = connection.Client(data['parent_address'], authkey=data['authkey'])
conn.send((data['index'], server.address, slot_count))
conn.close()
# set name etc
current_process().set_name('Host-%s:%s' % server.address)
util._run_after_forkers()
# register a cleanup function
def cleanup(directory):
debug('removing directory %s', directory)
shutil.rmtree(directory)
debug('shutting down host manager')
util.Finalize(None, cleanup, args=[data['dir']], exitpriority=0)
# start host manager
debug('remote host manager starting in %s', data['dir'])
server.serve_forever()

View File

@ -0,0 +1,98 @@
#
# This module shows how to use arbitrary callables with a subclass of
# `BaseManager`.
#
from multiprocessing import freeze_support
from multiprocessing.managers import BaseManager, BaseProxy
import operator
##
class Foo(object):
def f(self):
print 'you called Foo.f()'
def g(self):
print 'you called Foo.g()'
def _h(self):
print 'you called Foo._h()'
# A simple generator function
def baz():
for i in xrange(10):
yield i*i
# Proxy type for generator objects
class GeneratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
# Function to return the operator module
def get_operator_module():
return operator
##
class MyManager(BaseManager):
pass
# register the Foo class; make `f()` and `g()` accessible via proxy
MyManager.register('Foo1', Foo)
# register the Foo class; make `g()` and `_h()` accessible via proxy
MyManager.register('Foo2', Foo, exposed=('g', '_h'))
# register the generator function baz; use `GeneratorProxy` to make proxies
MyManager.register('baz', baz, proxytype=GeneratorProxy)
# register get_operator_module(); make public functions accessible via proxy
MyManager.register('operator', get_operator_module)
##
def test():
manager = MyManager()
manager.start()
print '-' * 20
f1 = manager.Foo1()
f1.f()
f1.g()
assert not hasattr(f1, '_h')
assert sorted(f1._exposed_) == sorted(['f', 'g'])
print '-' * 20
f2 = manager.Foo2()
f2.g()
f2._h()
assert not hasattr(f2, 'f')
assert sorted(f2._exposed_) == sorted(['g', '_h'])
print '-' * 20
it = manager.baz()
for i in it:
print '<%d>' % i,
print
print '-' * 20
op = manager.operator()
print 'op.add(23, 45) =', op.add(23, 45)
print 'op.pow(2, 94) =', op.pow(2, 94)
print 'op.getslice(range(10), 2, 6) =', op.getslice(range(10), 2, 6)
print 'op.repeat(range(5), 3) =', op.repeat(range(5), 3)
print 'op._exposed_ =', op._exposed_
##
if __name__ == '__main__':
freeze_support()
test()

311
Doc/includes/mp_pool.py Normal file
View File

@ -0,0 +1,311 @@
#
# A test of `multiprocessing.Pool` class
#
import multiprocessing
import time
import random
import sys
#
# Functions used by test code
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % (
multiprocessing.current_process().get_name(),
func.__name__, args, result
)
def calculatestar(args):
return calculate(*args)
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
def f(x):
return 1.0 / (x-5.0)
def pow3(x):
return x**3
def noop(x):
pass
#
# Test code
#
def test():
print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
#
# Create pool
#
PROCESSES = 4
print 'Creating pool with %d processes\n' % PROCESSES
pool = multiprocessing.Pool(PROCESSES)
print 'pool = %s' % pool
print
#
# Tests
#
TASKS = [(mul, (i, 7)) for i in range(10)] + \
[(plus, (i, 8)) for i in range(10)]
results = [pool.apply_async(calculate, t) for t in TASKS]
imap_it = pool.imap(calculatestar, TASKS)
imap_unordered_it = pool.imap_unordered(calculatestar, TASKS)
print 'Ordered results using pool.apply_async():'
for r in results:
print '\t', r.get()
print
print 'Ordered results using pool.imap():'
for x in imap_it:
print '\t', x
print
print 'Unordered results using pool.imap_unordered():'
for x in imap_unordered_it:
print '\t', x
print
print 'Ordered results using pool.map() --- will block till complete:'
for x in pool.map(calculatestar, TASKS):
print '\t', x
print
#
# Simple benchmarks
#
N = 100000
print 'def pow3(x): return x**3'
t = time.time()
A = map(pow3, xrange(N))
print '\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
t = time.time()
B = pool.map(pow3, xrange(N))
print '\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
t = time.time()
C = list(pool.imap(pow3, xrange(N), chunksize=N//8))
print '\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \
' seconds' % (N, N//8, time.time() - t)
assert A == B == C, (len(A), len(B), len(C))
print
L = [None] * 1000000
print 'def noop(x): pass'
print 'L = [None] * 1000000'
t = time.time()
A = map(noop, L)
print '\tmap(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
t = time.time()
B = pool.map(noop, L)
print '\tpool.map(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
t = time.time()
C = list(pool.imap(noop, L, chunksize=len(L)//8))
print '\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \
(len(L)//8, time.time() - t)
assert A == B == C, (len(A), len(B), len(C))
print
del A, B, C, L
#
# Test error handling
#
print 'Testing error handling:'
try:
print pool.apply(f, (5,))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.apply()'
else:
raise AssertionError, 'expected ZeroDivisionError'
try:
print pool.map(f, range(10))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.map()'
else:
raise AssertionError, 'expected ZeroDivisionError'
try:
print list(pool.imap(f, range(10)))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from list(pool.imap())'
else:
raise AssertionError, 'expected ZeroDivisionError'
it = pool.imap(f, range(10))
for i in range(10):
try:
x = it.next()
except ZeroDivisionError:
if i == 5:
pass
except StopIteration:
break
else:
if i == 5:
raise AssertionError, 'expected ZeroDivisionError'
assert i == 9
print '\tGot ZeroDivisionError as expected from IMapIterator.next()'
print
#
# Testing timeouts
#
print 'Testing ApplyResult.get() with timeout:',
res = pool.apply_async(calculate, TASKS[0])
while 1:
sys.stdout.flush()
try:
sys.stdout.write('\n\t%s' % res.get(0.02))
break
except multiprocessing.TimeoutError:
sys.stdout.write('.')
print
print
print 'Testing IMapIterator.next() with timeout:',
it = pool.imap(calculatestar, TASKS)
while 1:
sys.stdout.flush()
try:
sys.stdout.write('\n\t%s' % it.next(0.02))
except StopIteration:
break
except multiprocessing.TimeoutError:
sys.stdout.write('.')
print
print
#
# Testing callback
#
print 'Testing callback:'
A = []
B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]
r = pool.apply_async(mul, (7, 8), callback=A.append)
r.wait()
r = pool.map_async(pow3, range(10), callback=A.extend)
r.wait()
if A == B:
print '\tcallbacks succeeded\n'
else:
print '\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)
#
# Check there are no outstanding tasks
#
assert not pool._cache, 'cache = %r' % pool._cache
#
# Check close() methods
#
print 'Testing close():'
for worker in pool._pool:
assert worker.is_alive()
result = pool.apply_async(time.sleep, [0.5])
pool.close()
pool.join()
assert result.get() is None
for worker in pool._pool:
assert not worker.is_alive()
print '\tclose() succeeded\n'
#
# Check terminate() method
#
print 'Testing terminate():'
pool = multiprocessing.Pool(2)
DELTA = 0.1
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)]
pool.terminate()
pool.join()
for worker in pool._pool:
assert not worker.is_alive()
print '\tterminate() succeeded\n'
#
# Check garbage collection
#
print 'Testing garbage collection:'
pool = multiprocessing.Pool(2)
DELTA = 0.1
processes = pool._pool
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)]
results = pool = None
time.sleep(DELTA * 2)
for worker in processes:
assert not worker.is_alive()
print '\tgarbage collection succeeded\n'
if __name__ == '__main__':
multiprocessing.freeze_support()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
import multiprocessing.dummy as multiprocessing
else:
print 'Usage:\n\t%s [processes | threads]' % sys.argv[0]
raise SystemExit(2)
test()

View File

@ -0,0 +1,273 @@
#
# A test file for the `multiprocessing` package
#
import time, sys, random
from Queue import Empty
import multiprocessing # may get overwritten
#### TEST_VALUE
def value_func(running, mutex):
random.seed()
time.sleep(random.random()*4)
mutex.acquire()
print '\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished'
running.value -= 1
mutex.release()
def test_value():
TASKS = 10
running = multiprocessing.Value('i', TASKS)
mutex = multiprocessing.Lock()
for i in range(TASKS):
p = multiprocessing.Process(target=value_func, args=(running, mutex))
p.start()
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print running.value,
sys.stdout.flush()
mutex.release()
print
print 'No more running processes'
#### TEST_QUEUE
def queue_func(queue):
for i in range(30):
time.sleep(0.5 * random.random())
queue.put(i*i)
queue.put('STOP')
def test_queue():
q = multiprocessing.Queue()
p = multiprocessing.Process(target=queue_func, args=(q,))
p.start()
o = None
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print o,
sys.stdout.flush()
except Empty:
print 'TIMEOUT'
print
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print '\t' + str(cond)
time.sleep(2)
print '\tchild is notifying'
print '\t' + str(cond)
cond.notify()
cond.release()
def test_condition():
cond = multiprocessing.Condition()
p = multiprocessing.Process(target=condition_func, args=(cond,))
print cond
cond.acquire()
print cond
cond.acquire()
print cond
p.start()
print 'main is waiting'
cond.wait()
print 'main has woken up'
print cond
cond.release()
print cond
cond.release()
p.join()
print cond
#### TEST_SEMAPHORE
def semaphore_func(sema, mutex, running):
sema.acquire()
mutex.acquire()
running.value += 1
print running.value, 'tasks are running'
mutex.release()
random.seed()
time.sleep(random.random()*2)
mutex.acquire()
running.value -= 1
print '%s has finished' % multiprocessing.current_process()
mutex.release()
sema.release()
def test_semaphore():
sema = multiprocessing.Semaphore(3)
mutex = multiprocessing.RLock()
running = multiprocessing.Value('i', 0)
processes = [
multiprocessing.Process(target=semaphore_func,
args=(sema, mutex, running))
for i in range(10)
]
for p in processes:
p.start()
for p in processes:
p.join()
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print '\tchild sleeping'
time.sleep(5.5)
print '\n\tchild terminating'
def test_join_timeout():
p = multiprocessing.Process(target=join_timeout_func)
p.start()
print 'waiting for process to finish'
while 1:
p.join(timeout=1)
if not p.is_alive():
break
print '.',
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print '\t%r is waiting' % multiprocessing.current_process()
event.wait()
print '\t%r has woken up' % multiprocessing.current_process()
def test_event():
event = multiprocessing.Event()
processes = [multiprocessing.Process(target=event_func, args=(event,))
for i in range(5)]
for p in processes:
p.start()
print 'main is sleeping'
time.sleep(2)
print 'main is setting event'
event.set()
for p in processes:
p.join()
#### TEST_SHAREDVALUES
def sharedvalues_func(values, arrays, shared_values, shared_arrays):
for i in range(len(values)):
v = values[i][1]
sv = shared_values[i].value
assert v == sv
for i in range(len(values)):
a = arrays[i][1]
sa = list(shared_arrays[i][:])
assert a == sa
print 'Tests passed'
def test_sharedvalues():
values = [
('i', 10),
('h', -2),
('d', 1.25)
]
arrays = [
('i', range(100)),
('d', [0.25 * i for i in range(100)]),
('H', range(1000))
]
shared_values = [multiprocessing.Value(id, v) for id, v in values]
shared_arrays = [multiprocessing.Array(id, a) for id, a in arrays]
p = multiprocessing.Process(
target=sharedvalues_func,
args=(values, arrays, shared_values, shared_arrays)
)
p.start()
p.join()
assert p.get_exitcode() == 0
####
def test(namespace=multiprocessing):
global multiprocessing
multiprocessing = namespace
for func in [ test_value, test_queue, test_condition,
test_semaphore, test_join_timeout, test_event,
test_sharedvalues ]:
print '\n\t######## %s\n' % func.__name__
func()
ignore = multiprocessing.active_children() # cleanup any old processes
if hasattr(multiprocessing, '_debug_info'):
info = multiprocessing._debug_info()
if info:
print info
raise ValueError, 'there should be no positive refcounts left'
if __name__ == '__main__':
multiprocessing.freeze_support()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
namespace = multiprocessing
elif sys.argv[1] == 'manager':
print ' Using processes and a manager '.center(79, '-')
namespace = multiprocessing.Manager()
namespace.Process = multiprocessing.Process
namespace.current_process = multiprocessing.current_process
namespace.active_children = multiprocessing.active_children
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
import multiprocessing.dummy as namespace
else:
print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]
raise SystemExit, 2
test(namespace)

View File

@ -0,0 +1,67 @@
#
# Example where a pool of http servers share a single listening socket
#
# On Windows this module depends on the ability to pickle a socket
# object so that the worker processes can inherit a copy of the server
# object. (We import `multiprocessing.reduction` to enable this pickling.)
#
# Not sure if we should synchronize access to `socket.accept()` method by
# using a process-shared lock -- does not seem to be necessary.
#
import os
import sys
from multiprocessing import Process, current_process, freeze_support
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
if sys.platform == 'win32':
import multiprocessing.reduction # make sockets pickable/inheritable
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (current_process().get_name(),format%args))
class RequestHandler(SimpleHTTPRequestHandler):
# we override log_message() to show which process is handling the request
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, RequestHandler)
# create child processes to act as workers
for i in range(number_of_processes-1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
def test():
DIR = os.path.join(os.path.dirname(__file__), '..')
ADDRESS = ('localhost', 8000)
NUMBER_OF_PROCESSES = 4
print 'Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)
print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']
os.chdir(DIR)
runpool(ADDRESS, NUMBER_OF_PROCESSES)
if __name__ == '__main__':
freeze_support()
test()

View File

@ -0,0 +1,87 @@
#
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
import time
import random
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().get_name(), func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS1)):
print '\t', done_queue.get()
# Add more tasks using `put()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freeze_support()
test()

File diff suppressed because it is too large Load Diff

View File

@ -18,6 +18,7 @@ some other systems as well (e.g. Windows or NT). Here's an overview:
threading.rst
dummy_thread.rst
dummy_threading.rst
multiprocessing.rst
mmap.rst
readline.rst
rlcompleter.rst

View File

@ -0,0 +1,269 @@
#
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray'
]
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
#
# Imports
#
import os
import sys
import _multiprocessing
from multiprocessing.process import Process, current_process, active_children
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
def Pipe(duplex=True):
'''
Returns two connection object connected by a pipe
'''
from multiprocessing.connection import Pipe
return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif sys.platform == 'darwin':
try:
num = int(os.popen('sysctl -n hw.ncpu').read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def Pool(processes=None, initializer=None, initargs=()):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']

View File

@ -0,0 +1,425 @@
#
# A higher level module for using sockets (or Windows named pipes)
#
# multiprocessing/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
import os
import sys
import socket
import time
import tempfile
import itertools
import _multiprocessing
from multiprocessing import current_process
from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug
from multiprocessing.forking import duplicate, close
#
#
#
BUFSIZE = 8192
_mmap_counter = itertools.count()
default_family = 'AF_INET'
families = ['AF_INET']
if hasattr(socket, 'AF_UNIX'):
default_family = 'AF_UNIX'
families += ['AF_UNIX']
if sys.platform == 'win32':
default_family = 'AF_PIPE'
families += ['AF_PIPE']
#
#
#
def arbitrary_address(family):
'''
Return an arbitrary free address for the given family
'''
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), _mmap_counter.next()))
else:
raise ValueError('unrecognized family')
def address_type(address):
'''
Return the types of the address
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
'''
if type(address) == tuple:
return 'AF_INET'
elif type(address) is str and address.startswith('\\\\'):
return 'AF_PIPE'
elif type(address) is str:
return 'AF_UNIX'
else:
raise ValueError('address type of %r unrecognized' % address)
#
# Public functions
#
class Listener(object):
'''
Returns a listener object.
This is a wrapper for a bound socket which is 'listening' for
connections, or for a Windows named pipe.
'''
def __init__(self, address=None, family=None, backlog=1, authkey=None):
family = family or (address and address_type(address)) \
or default_family
address = address or arbitrary_address(family)
if family == 'AF_PIPE':
self._listener = PipeListener(address, backlog)
else:
self._listener = SocketListener(address, family, backlog)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError, 'authkey should be a byte string'
self._authkey = authkey
def accept(self):
'''
Accept a connection on the bound socket or named pipe of `self`.
Returns a `Connection` object.
'''
c = self._listener.accept()
if self._authkey:
deliver_challenge(c, self._authkey)
answer_challenge(c, self._authkey)
return c
def close(self):
'''
Close the bound socket or named pipe of `self`.
'''
return self._listener.close()
address = property(lambda self: self._listener._address)
last_accepted = property(lambda self: self._listener._last_accepted)
def Client(address, family=None, authkey=None):
'''
Returns a connection to the address of a `Listener`
'''
family = family or address_type(address)
if family == 'AF_PIPE':
c = PipeClient(address)
else:
c = SocketClient(address)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError, 'authkey should be a byte string'
if authkey is not None:
answer_challenge(c, authkey)
deliver_challenge(c, authkey)
return c
if sys.platform != 'win32':
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
if duplex:
s1, s2 = socket.socketpair()
c1 = _multiprocessing.Connection(os.dup(s1.fileno()))
c2 = _multiprocessing.Connection(os.dup(s2.fileno()))
s1.close()
s2.close()
else:
fd1, fd2 = os.pipe()
c1 = _multiprocessing.Connection(fd1, writable=False)
c2 = _multiprocessing.Connection(fd2, readable=False)
return c1, c2
else:
from ._multiprocessing import win32
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
address = arbitrary_address('AF_PIPE')
if duplex:
openmode = win32.PIPE_ACCESS_DUPLEX
access = win32.GENERIC_READ | win32.GENERIC_WRITE
obsize, ibsize = BUFSIZE, BUFSIZE
else:
openmode = win32.PIPE_ACCESS_INBOUND
access = win32.GENERIC_WRITE
obsize, ibsize = 0, BUFSIZE
h1 = win32.CreateNamedPipe(
address, openmode,
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
win32.PIPE_WAIT,
1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL
)
h2 = win32.CreateFile(
address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
)
win32.SetNamedPipeHandleState(
h2, win32.PIPE_READMODE_MESSAGE, None, None
)
try:
win32.ConnectNamedPipe(h1, win32.NULL)
except WindowsError, e:
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
raise
c1 = _multiprocessing.PipeConnection(h1, writable=duplex)
c2 = _multiprocessing.PipeConnection(h2, readable=duplex)
return c1, c2
#
# Definitions for connections based on sockets
#
class SocketListener(object):
'''
Represtation of a socket which is bound to an address and listening
'''
def __init__(self, address, family, backlog=1):
self._socket = socket.socket(getattr(socket, family))
self._socket.bind(address)
self._socket.listen(backlog)
address = self._socket.getsockname()
if type(address) is tuple:
address = (socket.getfqdn(address[0]),) + address[1:]
self._address = address
self._family = family
self._last_accepted = None
sub_debug('listener bound to address %r', self._address)
if family == 'AF_UNIX':
self._unlink = Finalize(
self, os.unlink, args=(self._address,), exitpriority=0
)
else:
self._unlink = None
def accept(self):
s, self._last_accepted = self._socket.accept()
fd = duplicate(s.fileno())
conn = _multiprocessing.Connection(fd)
s.close()
return conn
def close(self):
self._socket.close()
if self._unlink is not None:
self._unlink()
def SocketClient(address):
'''
Return a connection object connected to the socket given by `address`
'''
family = address_type(address)
s = socket.socket( getattr(socket, family) )
while 1:
try:
s.connect(address)
except socket.error, e:
if e.args[0] != 10061: # 10061 => connection refused
debug('failed to connect to address %s', address)
raise
time.sleep(0.01)
else:
break
else:
raise
fd = duplicate(s.fileno())
conn = _multiprocessing.Connection(fd)
s.close()
return conn
#
# Definitions for connections based on named pipes
#
if sys.platform == 'win32':
class PipeListener(object):
'''
Representation of a named pipe
'''
def __init__(self, address, backlog=None):
self._address = address
handle = win32.CreateNamedPipe(
address, win32.PIPE_ACCESS_DUPLEX,
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
win32.PIPE_WAIT,
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
)
self._handle_queue = [handle]
self._last_accepted = None
sub_debug('listener created with address=%r', self._address)
self.close = Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
def accept(self):
newhandle = win32.CreateNamedPipe(
self._address, win32.PIPE_ACCESS_DUPLEX,
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
win32.PIPE_WAIT,
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
)
self._handle_queue.append(newhandle)
handle = self._handle_queue.pop(0)
try:
win32.ConnectNamedPipe(handle, win32.NULL)
except WindowsError, e:
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
raise
return _multiprocessing.PipeConnection(handle)
@staticmethod
def _finalize_pipe_listener(queue, address):
sub_debug('closing listener with address=%r', address)
for handle in queue:
close(handle)
def PipeClient(address):
'''
Return a connection object connected to the pipe given by `address`
'''
while 1:
try:
win32.WaitNamedPipe(address, 1000)
h = win32.CreateFile(
address, win32.GENERIC_READ | win32.GENERIC_WRITE,
0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
)
except WindowsError, e:
if e.args[0] not in (win32.ERROR_SEM_TIMEOUT,
win32.ERROR_PIPE_BUSY):
raise
else:
break
else:
raise
win32.SetNamedPipeHandleState(
h, win32.PIPE_READMODE_MESSAGE, None, None
)
return _multiprocessing.PipeConnection(h)
#
# Authentication stuff
#
MESSAGE_LENGTH = 20
CHALLENGE = '#CHALLENGE#'
WELCOME = '#WELCOME#'
FAILURE = '#FAILURE#'
if sys.version_info >= (3, 0): # XXX can use bytes literals in 2.6/3.0
CHALLENGE = CHALLENGE.encode('ascii')
WELCOME = WELCOME.encode('ascii')
FAILURE = FAILURE.encode('ascii')
def deliver_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = os.urandom(MESSAGE_LENGTH)
connection.send_bytes(CHALLENGE + message)
digest = hmac.new(authkey, message).digest()
response = connection.recv_bytes(256) # reject large message
if response == digest:
connection.send_bytes(WELCOME)
else:
connection.send_bytes(FAILURE)
raise AuthenticationError('digest received was wrong')
def answer_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = connection.recv_bytes(256) # reject large message
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
message = message[len(CHALLENGE):]
digest = hmac.new(authkey, message).digest()
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != WELCOME:
raise AuthenticationError('digest sent was rejected')
#
# Support for using xmlrpclib for serialization
#
class ConnectionWrapper(object):
def __init__(self, conn, dumps, loads):
self._conn = conn
self._dumps = dumps
self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
setattr(self, attr, obj)
def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)
def recv(self):
s = self._conn.recv_bytes()
return self._loads(s)
def _xml_dumps(obj):
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf8')
def _xml_loads(s):
(obj,), method = xmlrpclib.loads(s.decode('utf8'))
return obj
class XmlListener(Listener):
def accept(self):
global xmlrpclib
import xmlrpclib
obj = Listener.accept(self)
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
def XmlClient(*args, **kwds):
global xmlrpclib
import xmlrpclib
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)

View File

@ -0,0 +1,143 @@
#
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
import array
import itertools
from multiprocessing import TimeoutError, cpu_count
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event
from Queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
self._parent._children[self] = None
threading.Thread.start(self)
def get_exitcode(self):
if self._start_called and not self.isAlive():
return 0
else:
return None
# XXX
if sys.version_info < (3, 0):
is_alive = threading.Thread.isAlive.im_func
get_name = threading.Thread.getName.im_func
set_name = threading.Thread.setName.im_func
is_daemon = threading.Thread.isDaemon.im_func
set_daemon = threading.Thread.setDaemon.im_func
else:
is_alive = threading.Thread.isAlive
get_name = threading.Thread.getName
set_name = threading.Thread.setName
is_daemon = threading.Thread.isDaemon
set_daemon = threading.Thread.setDaemon
#
#
#
class Condition(threading._Condition):
# XXX
if sys.version_info < (3, 0):
notify_all = threading._Condition.notifyAll.im_func
else:
notify_all = threading._Condition.notifyAll
#
#
#
Process = DummyProcess
current_process = threading.currentThread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.isAlive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue

View File

@ -0,0 +1,61 @@
#
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
from Queue import Queue
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass

View File

@ -0,0 +1,429 @@
#
# Module for starting a process object using os.fork() or CreateProcess()
#
# multiprocessing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import os
import sys
import signal
from multiprocessing import util, process
__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close']
#
# Check that the current thread is spawning a child process
#
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)
#
# Unix
#
if sys.platform != 'win32':
import time
exit = os._exit
duplicate = os.dup
close = os.close
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
pid, sts = os.waitpid(self.pid, flag)
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError, e:
if self.wait(timeout=0.1) is None:
raise
@staticmethod
def thread_is_spawning():
return False
#
# Windows
#
else:
import thread
import msvcrt
import _subprocess
import copy_reg
import time
from ._multiprocessing import win32, Connection, PipeConnection
from .util import Finalize
try:
from cPickle import dump, load, HIGHEST_PROTOCOL
except ImportError:
from pickle import dump, load, HIGHEST_PROTOCOL
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
exit = win32.ExitProcess
close = win32.CloseHandle
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if sys.executable.lower().endswith('pythonservice.exe'):
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
#
#
#
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = thread._local()
def __init__(self, process_obj):
# create pipe for communication with child
rfd, wfd = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
ht.Close()
close(rhandle)
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if process.current_process()._identity==() and is_forking(sys.argv):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.''')
if getattr(sys, 'frozen', False):
return [sys.executable, '--multiprocessing-fork']
else:
prog = 'from multiprocessing.forking import main; main()'
return [_python_exe, '-c', prog, '--multiprocessing-fork']
def main():
'''
Run code specifed by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
from .util import _logger, _log_to_stderr
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().get_authkey(),
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if not os.path.isabs(main_path) and \
process.ORIGINAL_DIR is not None:
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
#
# Make (Pipe)Connection picklable
#
def reduce_connection(conn):
if not Popen.thread_is_spawning():
raise RuntimeError(
'By default %s objects can only be shared between processes\n'
'using inheritance' % type(conn).__name__
)
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
conn.readable, conn.writable)
copy_reg.pickle(Connection, reduce_connection)
copy_reg.pickle(PipeConnection, reduce_connection)
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().set_name(data['name'])
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name != 'ipython':
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass

201
Lib/multiprocessing/heap.py Normal file
View File

@ -0,0 +1,201 @@
#
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
import bisect
import mmap
import tempfile
import os
import sys
import threading
import itertools
import _multiprocessing
from multiprocessing.util import Finalize, info
from multiprocessing.forking import assert_spawning
__all__ = ['BufferWrapper']
#
# Inheirtable class which wraps an mmap, and from which blocks can be allocated
#
if sys.platform == 'win32':
from ._multiprocessing import win32
class Arena(object):
_counter = itertools.count()
def __init__(self, size):
self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == 0, 'tagname already in use'
self._state = (self.size, self.name)
def __getstate__(self):
assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
else:
class Arena(object):
def __init__(self, size):
self.buffer = mmap.mmap(-1, size)
self.size = size
self.name = None
#
# Class allowing allocation of chunks of memory from arenas
#
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
@staticmethod
def _roundup(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask
def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def free(self, block):
# free a block returned by malloc()
assert os.getpid() == self._lastpid
self._lock.acquire()
try:
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < sys.maxint
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
try:
size = self._roundup(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
#
# Class representing a chunk of an mmap -- can be inherited
#
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
assert 0 <= size < sys.maxint
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,))
def get_address(self):
(arena, start, stop), size = self._state
address, length = _multiprocessing.address_of_buffer(arena.buffer)
assert size <= length
return address + start
def get_size(self):
return self._state[1]

File diff suppressed because it is too large Load Diff

596
Lib/multiprocessing/pool.py Normal file
View File

@ -0,0 +1,596 @@
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Pool']
#
# Imports
#
import threading
import Queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return map(*args)
#
# Code run by worker processes
#
def worker(inqueue, outqueue, initializer=None, initargs=()):
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
while 1:
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception, e:
result = (False, e)
put((job, i, result))
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of the `apply()` builtin
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=()):
self._setup_queues()
self._taskqueue = Queue.Queue()
self._cache = {}
self._state = RUN
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
self._pool = []
for i in range(processes):
w = self.Process(
target=worker,
args=(self._inqueue, self._outqueue, initializer, initargs)
)
self._pool.append(w)
w.set_name(w.get_name().replace('Process', 'PoolWorker'))
w.set_daemon(True)
w.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.setDaemon(True)
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.setDaemon(True)
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._task_handler, self._result_handler, self._cache),
exitpriority=15
)
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Equivalent of `map()` builtin
'''
assert self._state == RUN
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `itertool.imap()` -- can be MUCH slower than `Pool.map()`
'''
assert self._state == RUN
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary
'''
assert self._state == RUN
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None):
'''
Asynchronous equivalent of `apply()` builtin
'''
assert self._state == RUN
result = ApplyResult(self._cache, callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None):
'''
Asynchronous equivalent of `map()` builtin
'''
assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.currentThread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.currentThread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._taskqueue.put(None)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.isAlive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
task_handler._state = TERMINATE
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.isAlive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
p.terminate()
debug('joining task handler')
task_handler.join(1e100)
debug('joining result handler')
result_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
p.join()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._ready = False
self._callback = callback
cache[self._job] = self
def ready(self):
return self._ready
def successful(self):
assert self._ready
return self._success
def wait(self, timeout=None):
self._cond.acquire()
try:
if not self._ready:
self._cond.wait(timeout)
finally:
self._cond.release()
def get(self, timeout=None):
self.wait(timeout)
if not self._ready:
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
del self._cache[self._job]
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._ready = True
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
else:
self._success = False
self._value = result
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue.Queue()
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notifyAll()
finally:
inqueue.not_empty.release()

View File

@ -0,0 +1,302 @@
#
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
try:
bytes
except NameError:
bytes = str # XXX not needed in Py2.6 and Py3.0
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = _current_process._counter.next()
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
def get_name(self):
'''
Return name of process
'''
return self._name
def set_name(self, name):
'''
Set name of process
'''
assert isinstance(name, str), 'name must be a string'
self._name = name
def is_daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
def set_daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
def get_authkey(self):
'''
Return authorization key of process
'''
return self._authkey
def set_authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
def get_exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
def get_ident(self):
'''
Return indentifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = property(get_ident)
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.get_exitcode()
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from . import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
try:
os.close(sys.stdin.fileno())
except (OSError, ValueError):
pass
_current_process = self
util._finalizer_registry.clear()
util._run_after_forkers()
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit, e:
if not e.args:
exitcode = 1
elif type(e.args[0]) is int:
exitcode = e.args[0]
else:
sys.stderr.write(e.args[0] + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.get_name())
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in signal.__dict__.items():
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name

View File

@ -0,0 +1,356 @@
#
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Queue', 'SimpleQueue']
import sys
import os
import threading
import collections
import time
import atexit
import weakref
from Queue import Empty, Full
import _multiprocessing
from multiprocessing import Pipe
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
from multiprocessing.util import debug, info, Finalize, register_after_fork
from multiprocessing.forking import assert_spawning
#
# Queue type using a pipe, buffer and thread
#
class Queue(object):
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
assert_spawning(self)
return (self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
(self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
finally:
self._notempty.release()
def get(self, block=True, timeout=None):
if block and timeout is None:
self._rlock.acquire()
try:
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if not self._poll(block and (deadline-time.time()) or 0.0):
raise Empty
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
def qsize(self):
# Raises NotImplementError on Mac OSX because of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()
def join_thread(self):
debug('Queue.join_thread()')
assert self._closed
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send,
self._wlock, self._writer.close),
name='QueueFeederThread'
)
self._thread.setDaemon(True)
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
notempty.acquire()
try:
buffer.append(_sentinel)
notempty.notify()
finally:
notempty.release()
@staticmethod
def _feed(buffer, notempty, send, writelock, close):
debug('starting thread to feed data to pipe')
from .util import is_exiting
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
if wacquire is None:
send(obj)
else:
wacquire()
try:
send(obj)
finally:
wrelease()
except IndexError:
pass
except Exception, e:
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
try:
if is_exiting():
info('error in queue thread: %s', e)
else:
import traceback
traceback.print_exc()
except Exception:
pass
_sentinel = object()
#
# A queue type which also supports join() and task_done() methods
#
# Note that if you do not call task_done() for each finished task then
# eventually the counter's semaphore may overflow causing Bad Things
# to happen.
#
class JoinableQueue(Queue):
def __init__(self, maxsize=0):
Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, item, block=True, timeout=None):
Queue.put(self, item, block, timeout)
self._unfinished_tasks.release()
def task_done(self):
self._cond.acquire()
try:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
finally:
self._cond.release()
def join(self):
self._cond.acquire()
try:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
finally:
self._cond.release()
#
# Simplified Queue type -- really just a locked pipe
#
class SimpleQueue(object):
def __init__(self):
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._make_methods()
def empty(self):
return not self._reader.poll()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
self._make_methods()
def _make_methods(self):
recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = self._writer.send
else:
send = self._writer.send
wacquire, wrelease = self._wlock.acquire, self._wlock.release
def put(obj):
wacquire()
try:
return send(obj)
finally:
wrelease()
self.put = put

View File

@ -0,0 +1,190 @@
#
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = []
import os
import sys
import socket
import threading
import copy_reg
import _multiprocessing
from multiprocessing import current_process
from multiprocessing.forking import Popen, duplicate, close
from multiprocessing.util import register_after_fork, debug, sub_debug
from multiprocessing.connection import Client, Listener
#
#
#
if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):
raise ImportError('pickling of connections not supported')
#
# Platform specific definitions
#
if sys.platform == 'win32':
import _subprocess
from ._multiprocessing import win32
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid):
_multiprocessing.sendfd(conn.fileno(), handle)
def recv_handle(conn):
return _multiprocessing.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
_cache = set()
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().get_authkey())
t = threading.Thread(target=_serve)
t.setDaemon(True)
t.start()
finally:
_lock.release()
return _listener
def _serve():
from .util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
import traceback
sub_warning(
'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().get_authkey())
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_multiprocessing.Connection` with `copy_reg`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.Connection(
handle, readable=readable, writable=writable
)
copy_reg.pickle(_multiprocessing.Connection, reduce_connection)
#
# Register `socket.socket` with `copy_reg`
#
def fromfd(fd, family, type_, proto=0):
s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
copy_reg.pickle(socket.socket, reduce_socket)
#
# Register `_multiprocessing.PipeConnection` with `copy_reg`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.PipeConnection(
handle, readable=readable, writable=writable
)
copy_reg.pickle(_multiprocessing.PipeConnection, reduce_pipe_connection)

View File

@ -0,0 +1,234 @@
#
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
import sys
import ctypes
import weakref
import copy_reg
from multiprocessing import heap, RLock
from multiprocessing.forking import assert_spawning
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
#
#
#
typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
#
#
#
def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return rebuild_ctype(type_, wrapper, None)
def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, int):
type_ = type_ * size_or_initializer
return _new_value(type_)
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result
def Value(typecode_or_type, *args, **kwds):
'''
Return a synchronization wrapper for a Value
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawValue(typecode_or_type, *args)
if lock is None:
lock = RLock()
assert hasattr(lock, 'acquire')
return synchronized(obj, lock)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is None:
lock = RLock()
assert hasattr(lock, 'acquire')
return synchronized(obj, lock)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)
#
# Functions for pickling/unpickling
#
def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
if sys.platform == 'win32' and type_ not in copy_reg.dispatch_table:
copy_reg.pickle(type_, reduce_ctype)
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj
#
# Function to create properties
#
def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec template % ((name,)*7) in d
prop_cache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
prop_cache = {}
class_cache = weakref.WeakKeyDictionary()
#
# Synchronized wrappers
#
class SynchronizedBase(object):
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def get_obj(self):
return self._obj
def get_lock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = make_property('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')

View File

@ -0,0 +1,294 @@
#
# Module implementing synchronization primitives
#
# multiprocessing/synchronize.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
]
import threading
import os
import sys
from time import time as _time, sleep as _sleep
import _multiprocessing
from multiprocessing.process import current_process
from multiprocessing.util import Finalize, register_after_fork, debug
from multiprocessing.forking import assert_spawning, Popen
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = range(2)
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
#
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
#
class SemLock(object):
def __init__(self, kind, value, maxvalue):
sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
debug('created semlock with handle %s' % sl.handle)
self._make_methods()
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
register_after_fork(self, _after_fork)
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
self.__enter__ = self._semlock.__enter__
self.__exit__ = self._semlock.__exit__
def __getstate__(self):
assert_spawning(self)
sl = self._semlock
return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)
def __setstate__(self, state):
self._semlock = _multiprocessing.SemLock._rebuild(*state)
debug('recreated blocker with handle %r' % state[0])
self._make_methods()
#
# Semaphore
#
class Semaphore(SemLock):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
def get_value(self):
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<Semaphore(value=%s)>' % value
#
# Bounded semaphore
#
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, value)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<BoundedSemaphore(value=%s, maxvalue=%s)>' % \
(value, self._semlock.maxvalue)
#
# Non-recursive lock
#
class Lock(SemLock):
def __init__(self):
SemLock.__init__(self, SEMAPHORE, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = current_process().get_name()
if threading.currentThread().getName() != 'MainThread':
name += '|' + threading.currentThread().getName()
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<Lock(owner=%s)>' % name
#
# Recursive lock
#
class RLock(SemLock):
def __init__(self):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = current_process().get_name()
if threading.currentThread().getName() != 'MainThread':
name += '|' + threading.currentThread().getName()
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<RLock(%s, %s)>' % (name, count)
#
# Condition variable
#
class Condition(object):
def __init__(self, lock=None):
self._lock = lock or RLock()
self._sleeping_count = Semaphore(0)
self._woken_count = Semaphore(0)
self._wait_semaphore = Semaphore(0)
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
self.__enter__ = self._lock.__enter__
self.__exit__ = self._lock.__exit__
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unkown'
return '<Condition(%s, %s)>' % (self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in xrange(count):
self._lock.release()
try:
# wait for notification or timeout
self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in xrange(count):
self._lock.acquire()
def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in xrange(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
#
# Event
#
class Event(object):
def __init__(self):
self._cond = Condition(Lock())
self._flag = Semaphore(0)
def is_set(self):
self._cond.acquire()
try:
if self._flag.acquire(False):
self._flag.release()
return True
return False
finally:
self._cond.release()
def set(self):
self._cond.acquire()
try:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
self._cond.acquire()
try:
self._flag.acquire(False)
finally:
self._cond.release()
def wait(self, timeout=None):
self._cond.acquire()
try:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
finally:
self._cond.release()

336
Lib/multiprocessing/util.py Normal file
View File

@ -0,0 +1,336 @@
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import itertools
import weakref
import copy_reg
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal'
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
if not _logger:
import logging, atexit
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
_check_logger_class()
_logger = logging.getLogger(LOGGER_NAME)
return _logger
def _check_logger_class():
'''
Make sure process name is recorded when loggers are used
'''
# XXX This function is unnecessary once logging is patched
import logging
if hasattr(logging, 'multiprocessing'):
return
logging._acquireLock()
try:
OldLoggerClass = logging.getLoggerClass()
if not getattr(OldLoggerClass, '_process_aware', False):
class ProcessAwareLogger(OldLoggerClass):
_process_aware = True
def makeRecord(self, *args, **kwds):
record = OldLoggerClass.makeRecord(self, *args, **kwds)
record.processName = current_process()._name
return record
logging.setLoggerClass(ProcessAwareLogger)
finally:
logging._releaseLock()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level is not None:
logger.setLevel(level)
_log_to_stderr = True
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception, e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, _finalizer_counter.next())
_finalizer_registry[self._key] = self
def __call__(self, wr=None):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in _finalizer_registry.items() if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function():
global _exiting
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.get_name())
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.get_name())
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
register_after_fork(self, ForkAwareThreadLock.__init__)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
#
# Try making some callable types picklable
#
def _reduce_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(type(Finalize.__init__), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
copy_reg.pickle(type(list.append), _reduce_method_descriptor)
copy_reg.pickle(type(int.__add__), _reduce_method_descriptor)
def _reduce_builtin_function_or_method(m):
return getattr, (m.__self__, m.__name__)
copy_reg.pickle(type(list().append), _reduce_builtin_function_or_method)
copy_reg.pickle(type(int().__add__), _reduce_builtin_function_or_method)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
copy_reg.pickle(partial, _reduce_partial)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,515 @@
/*
* Definition of a `Connection` type.
* Used by `socket_connection.c` and `pipe_connection.c`.
*
* connection.h
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#ifndef CONNECTION_H
#define CONNECTION_H
/*
* Read/write flags
*/
#define READABLE 1
#define WRITABLE 2
#define CHECK_READABLE(self) \
if (!(self->flags & READABLE)) { \
PyErr_SetString(PyExc_IOError, "connection is write-only"); \
return NULL; \
}
#define CHECK_WRITABLE(self) \
if (!(self->flags & WRITABLE)) { \
PyErr_SetString(PyExc_IOError, "connection is read-only"); \
return NULL; \
}
/*
* Allocation and deallocation
*/
static PyObject *
connection_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
ConnectionObject *self;
HANDLE handle;
BOOL readable = TRUE, writable = TRUE;
static char *kwlist[] = {"handle", "readable", "writable", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, F_HANDLE "|ii", kwlist,
&handle, &readable, &writable))
return NULL;
if (handle == INVALID_HANDLE_VALUE || (Py_ssize_t)handle < 0) {
PyErr_Format(PyExc_IOError, "invalid handle %"
PY_FORMAT_SIZE_T "d", (Py_ssize_t)handle);
return NULL;
}
if (!readable && !writable) {
PyErr_SetString(PyExc_ValueError,
"either readable or writable must be true");
return NULL;
}
self = PyObject_New(ConnectionObject, type);
if (self == NULL)
return NULL;
self->weakreflist = NULL;
self->handle = handle;
self->flags = 0;
if (readable)
self->flags |= READABLE;
if (writable)
self->flags |= WRITABLE;
assert(self->flags >= 1 && self->flags <= 3);
return (PyObject*)self;
}
static void
connection_dealloc(ConnectionObject* self)
{
if (self->weakreflist != NULL)
PyObject_ClearWeakRefs((PyObject*)self);
if (self->handle != INVALID_HANDLE_VALUE) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
}
PyObject_Del(self);
}
/*
* Functions for transferring buffers
*/
static PyObject *
connection_sendbytes(ConnectionObject *self, PyObject *args)
{
char *buffer;
Py_ssize_t length, offset=0, size=PY_SSIZE_T_MIN;
int res;
if (!PyArg_ParseTuple(args, F_RBUFFER "#|" F_PY_SSIZE_T F_PY_SSIZE_T,
&buffer, &length, &offset, &size))
return NULL;
CHECK_WRITABLE(self);
if (offset < 0) {
PyErr_SetString(PyExc_ValueError, "offset is negative");
return NULL;
}
if (length < offset) {
PyErr_SetString(PyExc_ValueError, "buffer length < offset");
return NULL;
}
if (size == PY_SSIZE_T_MIN) {
size = length - offset;
} else {
if (size < 0) {
PyErr_SetString(PyExc_ValueError, "size is negative");
return NULL;
}
if (offset + size > length) {
PyErr_SetString(PyExc_ValueError,
"buffer length < offset + size");
return NULL;
}
}
Py_BEGIN_ALLOW_THREADS
res = conn_send_string(self, buffer + offset, size);
Py_END_ALLOW_THREADS
if (res < 0)
return mp_SetError(PyExc_IOError, res);
Py_RETURN_NONE;
}
static PyObject *
connection_recvbytes(ConnectionObject *self, PyObject *args)
{
char *freeme = NULL;
Py_ssize_t res, maxlength = PY_SSIZE_T_MAX;
PyObject *result = NULL;
if (!PyArg_ParseTuple(args, "|" F_PY_SSIZE_T, &maxlength))
return NULL;
CHECK_READABLE(self);
if (maxlength < 0) {
PyErr_SetString(PyExc_ValueError, "maxlength < 0");
return NULL;
}
Py_BEGIN_ALLOW_THREADS
res = conn_recv_string(self, self->buffer, CONNECTION_BUFFER_SIZE,
&freeme, maxlength);
Py_END_ALLOW_THREADS
if (res < 0) {
if (res == MP_BAD_MESSAGE_LENGTH) {
if ((self->flags & WRITABLE) == 0) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
self->handle = INVALID_HANDLE_VALUE;
} else {
self->flags = WRITABLE;
}
}
mp_SetError(PyExc_IOError, res);
} else {
if (freeme == NULL) {
result = PyString_FromStringAndSize(self->buffer, res);
} else {
result = PyString_FromStringAndSize(freeme, res);
PyMem_Free(freeme);
}
}
return result;
}
static PyObject *
connection_recvbytes_into(ConnectionObject *self, PyObject *args)
{
char *freeme = NULL, *buffer = NULL;
Py_ssize_t res, length, offset = 0;
PyObject *result = NULL;
if (!PyArg_ParseTuple(args, "w#|" F_PY_SSIZE_T,
&buffer, &length, &offset))
return NULL;
CHECK_READABLE(self);
if (offset < 0) {
PyErr_SetString(PyExc_ValueError, "negative offset");
return NULL;
}
if (offset > length) {
PyErr_SetString(PyExc_ValueError, "offset too large");
return NULL;
}
Py_BEGIN_ALLOW_THREADS
res = conn_recv_string(self, buffer+offset, length-offset,
&freeme, PY_SSIZE_T_MAX);
Py_END_ALLOW_THREADS
if (res < 0) {
if (res == MP_BAD_MESSAGE_LENGTH) {
if ((self->flags & WRITABLE) == 0) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
self->handle = INVALID_HANDLE_VALUE;
} else {
self->flags = WRITABLE;
}
}
mp_SetError(PyExc_IOError, res);
} else {
if (freeme == NULL) {
result = PyInt_FromSsize_t(res);
} else {
result = PyObject_CallFunction(BufferTooShort,
F_RBUFFER "#",
freeme, res);
PyMem_Free(freeme);
if (result) {
PyErr_SetObject(BufferTooShort, result);
Py_DECREF(result);
}
return NULL;
}
}
return result;
}
/*
* Functions for transferring objects
*/
static PyObject *
connection_send_obj(ConnectionObject *self, PyObject *obj)
{
char *buffer;
int res;
Py_ssize_t length;
PyObject *pickled_string = NULL;
CHECK_WRITABLE(self);
pickled_string = PyObject_CallFunctionObjArgs(pickle_dumps, obj,
pickle_protocol, NULL);
if (!pickled_string)
goto failure;
if (PyString_AsStringAndSize(pickled_string, &buffer, &length) < 0)
goto failure;
Py_BEGIN_ALLOW_THREADS
res = conn_send_string(self, buffer, (int)length);
Py_END_ALLOW_THREADS
if (res < 0) {
mp_SetError(PyExc_IOError, res);
goto failure;
}
Py_XDECREF(pickled_string);
Py_RETURN_NONE;
failure:
Py_XDECREF(pickled_string);
return NULL;
}
static PyObject *
connection_recv_obj(ConnectionObject *self)
{
char *freeme = NULL;
Py_ssize_t res;
PyObject *temp = NULL, *result = NULL;
CHECK_READABLE(self);
Py_BEGIN_ALLOW_THREADS
res = conn_recv_string(self, self->buffer, CONNECTION_BUFFER_SIZE,
&freeme, PY_SSIZE_T_MAX);
Py_END_ALLOW_THREADS
if (res < 0) {
if (res == MP_BAD_MESSAGE_LENGTH) {
if ((self->flags & WRITABLE) == 0) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
self->handle = INVALID_HANDLE_VALUE;
} else {
self->flags = WRITABLE;
}
}
mp_SetError(PyExc_IOError, res);
} else {
if (freeme == NULL) {
temp = PyString_FromStringAndSize(self->buffer, res);
} else {
temp = PyString_FromStringAndSize(freeme, res);
PyMem_Free(freeme);
}
}
if (temp)
result = PyObject_CallFunctionObjArgs(pickle_loads,
temp, NULL);
Py_XDECREF(temp);
return result;
}
/*
* Other functions
*/
static PyObject *
connection_poll(ConnectionObject *self, PyObject *args)
{
PyObject *timeout_obj = NULL;
double timeout = 0.0;
int res;
CHECK_READABLE(self);
if (!PyArg_ParseTuple(args, "|O", &timeout_obj))
return NULL;
if (timeout_obj == NULL) {
timeout = 0.0;
} else if (timeout_obj == Py_None) {
timeout = -1.0; /* block forever */
} else {
timeout = PyFloat_AsDouble(timeout_obj);
if (PyErr_Occurred())
return NULL;
if (timeout < 0.0)
timeout = 0.0;
}
Py_BEGIN_ALLOW_THREADS
res = conn_poll(self, timeout);
Py_END_ALLOW_THREADS
switch (res) {
case TRUE:
Py_RETURN_TRUE;
case FALSE:
Py_RETURN_FALSE;
default:
return mp_SetError(PyExc_IOError, res);
}
}
static PyObject *
connection_fileno(ConnectionObject* self)
{
if (self->handle == INVALID_HANDLE_VALUE) {
PyErr_SetString(PyExc_IOError, "handle is invalid");
return NULL;
}
return PyInt_FromLong((long)self->handle);
}
static PyObject *
connection_close(ConnectionObject *self)
{
if (self->handle != INVALID_HANDLE_VALUE) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
self->handle = INVALID_HANDLE_VALUE;
}
Py_RETURN_NONE;
}
static PyObject *
connection_repr(ConnectionObject *self)
{
static char *conn_type[] = {"read-only", "write-only", "read-write"};
assert(self->flags >= 1 && self->flags <= 3);
return FROM_FORMAT("<%s %s, handle %" PY_FORMAT_SIZE_T "d>",
conn_type[self->flags - 1],
CONNECTION_NAME, (Py_ssize_t)self->handle);
}
/*
* Getters and setters
*/
static PyObject *
connection_closed(ConnectionObject *self, void *closure)
{
return PyBool_FromLong((long)(self->handle == INVALID_HANDLE_VALUE));
}
static PyObject *
connection_readable(ConnectionObject *self, void *closure)
{
return PyBool_FromLong((long)(self->flags & READABLE));
}
static PyObject *
connection_writable(ConnectionObject *self, void *closure)
{
return PyBool_FromLong((long)(self->flags & WRITABLE));
}
/*
* Tables
*/
static PyMethodDef connection_methods[] = {
{"send_bytes", (PyCFunction)connection_sendbytes, METH_VARARGS,
"send the byte data from a readable buffer-like object"},
{"recv_bytes", (PyCFunction)connection_recvbytes, METH_VARARGS,
"receive byte data as a string"},
{"recv_bytes_into",(PyCFunction)connection_recvbytes_into,METH_VARARGS,
"receive byte data into a writeable buffer-like object\n"
"returns the number of bytes read"},
{"send", (PyCFunction)connection_send_obj, METH_O,
"send a (picklable) object"},
{"recv", (PyCFunction)connection_recv_obj, METH_NOARGS,
"receive a (picklable) object"},
{"poll", (PyCFunction)connection_poll, METH_VARARGS,
"whether there is any input available to be read"},
{"fileno", (PyCFunction)connection_fileno, METH_NOARGS,
"file descriptor or handle of the connection"},
{"close", (PyCFunction)connection_close, METH_NOARGS,
"close the connection"},
{NULL} /* Sentinel */
};
static PyGetSetDef connection_getset[] = {
{"closed", (getter)connection_closed, NULL,
"True if the connection is closed", NULL},
{"readable", (getter)connection_readable, NULL,
"True if the connection is readable", NULL},
{"writable", (getter)connection_writable, NULL,
"True if the connection is writable", NULL},
{NULL}
};
/*
* Connection type
*/
PyDoc_STRVAR(connection_doc,
"Connection type whose constructor signature is\n\n"
" Connection(handle, readable=True, writable=True).\n\n"
"The constructor does *not* duplicate the handle.");
PyTypeObject CONNECTION_TYPE = {
PyVarObject_HEAD_INIT(NULL, 0)
/* tp_name */ "_multiprocessing." CONNECTION_NAME,
/* tp_basicsize */ sizeof(ConnectionObject),
/* tp_itemsize */ 0,
/* tp_dealloc */ (destructor)connection_dealloc,
/* tp_print */ 0,
/* tp_getattr */ 0,
/* tp_setattr */ 0,
/* tp_compare */ 0,
/* tp_repr */ (reprfunc)connection_repr,
/* tp_as_number */ 0,
/* tp_as_sequence */ 0,
/* tp_as_mapping */ 0,
/* tp_hash */ 0,
/* tp_call */ 0,
/* tp_str */ 0,
/* tp_getattro */ 0,
/* tp_setattro */ 0,
/* tp_as_buffer */ 0,
/* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE |
Py_TPFLAGS_HAVE_WEAKREFS,
/* tp_doc */ connection_doc,
/* tp_traverse */ 0,
/* tp_clear */ 0,
/* tp_richcompare */ 0,
/* tp_weaklistoffset */ offsetof(ConnectionObject, weakreflist),
/* tp_iter */ 0,
/* tp_iternext */ 0,
/* tp_methods */ connection_methods,
/* tp_members */ 0,
/* tp_getset */ connection_getset,
/* tp_base */ 0,
/* tp_dict */ 0,
/* tp_descr_get */ 0,
/* tp_descr_set */ 0,
/* tp_dictoffset */ 0,
/* tp_init */ 0,
/* tp_alloc */ 0,
/* tp_new */ connection_new,
};
#endif /* CONNECTION_H */

View File

@ -0,0 +1,308 @@
/*
* Extension module used by mutliprocessing package
*
* multiprocessing.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
PyObject *create_win32_namespace(void);
PyObject *pickle_dumps, *pickle_loads, *pickle_protocol;
PyObject *ProcessError, *BufferTooShort;
/*
* Function which raises exceptions based on error codes
*/
PyObject *
mp_SetError(PyObject *Type, int num)
{
switch (num) {
#ifdef MS_WINDOWS
case MP_STANDARD_ERROR:
if (Type == NULL)
Type = PyExc_WindowsError;
PyErr_SetExcFromWindowsErr(Type, 0);
break;
case MP_SOCKET_ERROR:
if (Type == NULL)
Type = PyExc_WindowsError;
PyErr_SetExcFromWindowsErr(Type, WSAGetLastError());
break;
#else /* !MS_WINDOWS */
case MP_STANDARD_ERROR:
case MP_SOCKET_ERROR:
if (Type == NULL)
Type = PyExc_OSError;
PyErr_SetFromErrno(Type);
break;
#endif /* !MS_WINDOWS */
case MP_MEMORY_ERROR:
PyErr_NoMemory();
break;
case MP_END_OF_FILE:
PyErr_SetNone(PyExc_EOFError);
break;
case MP_EARLY_END_OF_FILE:
PyErr_SetString(PyExc_IOError,
"got end of file during message");
break;
case MP_BAD_MESSAGE_LENGTH:
PyErr_SetString(PyExc_IOError, "bad message length");
break;
case MP_EXCEPTION_HAS_BEEN_SET:
break;
default:
PyErr_Format(PyExc_RuntimeError,
"unkown error number %d", num);
}
return NULL;
}
/*
* Windows only
*/
#ifdef MS_WINDOWS
/* On Windows we set an event to signal Ctrl-C; compare with timemodule.c */
HANDLE sigint_event = NULL;
static BOOL WINAPI
ProcessingCtrlHandler(DWORD dwCtrlType)
{
SetEvent(sigint_event);
return FALSE;
}
/*
* Unix only
*/
#else /* !MS_WINDOWS */
#if HAVE_FD_TRANSFER
/* Functions for transferring file descriptors between processes.
Reimplements some of the functionality of the fdcred
module at http://www.mca-ltd.com/resources/fdcred_1.tgz. */
static PyObject *
multiprocessing_sendfd(PyObject *self, PyObject *args)
{
int conn, fd, res;
char dummy_char;
char buf[CMSG_SPACE(sizeof(int))];
struct msghdr msg = {0};
struct iovec dummy_iov;
struct cmsghdr *cmsg;
if (!PyArg_ParseTuple(args, "ii", &conn, &fd))
return NULL;
dummy_iov.iov_base = &dummy_char;
dummy_iov.iov_len = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
msg.msg_iov = &dummy_iov;
msg.msg_iovlen = 1;
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
msg.msg_controllen = cmsg->cmsg_len;
*(int*)CMSG_DATA(cmsg) = fd;
Py_BEGIN_ALLOW_THREADS
res = sendmsg(conn, &msg, 0);
Py_END_ALLOW_THREADS
if (res < 0)
return PyErr_SetFromErrno(PyExc_OSError);
Py_RETURN_NONE;
}
static PyObject *
multiprocessing_recvfd(PyObject *self, PyObject *args)
{
int conn, fd, res;
char dummy_char;
char buf[CMSG_SPACE(sizeof(int))];
struct msghdr msg = {0};
struct iovec dummy_iov;
struct cmsghdr *cmsg;
if (!PyArg_ParseTuple(args, "i", &conn))
return NULL;
dummy_iov.iov_base = &dummy_char;
dummy_iov.iov_len = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
msg.msg_iov = &dummy_iov;
msg.msg_iovlen = 1;
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
msg.msg_controllen = cmsg->cmsg_len;
Py_BEGIN_ALLOW_THREADS
res = recvmsg(conn, &msg, 0);
Py_END_ALLOW_THREADS
if (res < 0)
return PyErr_SetFromErrno(PyExc_OSError);
fd = *(int*)CMSG_DATA(cmsg);
return Py_BuildValue("i", fd);
}
#endif /* HAVE_FD_TRANSFER */
#endif /* !MS_WINDOWS */
/*
* All platforms
*/
static PyObject*
multiprocessing_address_of_buffer(PyObject *self, PyObject *obj)
{
void *buffer;
Py_ssize_t buffer_len;
if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0)
return NULL;
return Py_BuildValue("N" F_PY_SSIZE_T,
PyLong_FromVoidPtr(buffer), buffer_len);
}
/*
* Function table
*/
static PyMethodDef module_methods[] = {
{"address_of_buffer", multiprocessing_address_of_buffer, METH_O,
"address_of_buffer(obj) -> int\n"
"Return address of obj assuming obj supports buffer inteface"},
#if HAVE_FD_TRANSFER
{"sendfd", multiprocessing_sendfd, METH_VARARGS,
"sendfd(sockfd, fd) -> None\n"
"Send file descriptor given by fd over the unix domain socket\n"
"whose file decriptor is sockfd"},
{"recvfd", multiprocessing_recvfd, METH_VARARGS,
"recvfd(sockfd) -> fd\n"
"Receive a file descriptor over a unix domain socket\n"
"whose file decriptor is sockfd"},
#endif
{NULL}
};
/*
* Initialize
*/
PyMODINIT_FUNC
init_multiprocessing(void)
{
PyObject *module, *temp;
/* Initialize module */
module = Py_InitModule("_multiprocessing", module_methods);
if (!module)
return;
/* Get copy of objects from pickle */
temp = PyImport_ImportModule(PICKLE_MODULE);
if (!temp)
return;
pickle_dumps = PyObject_GetAttrString(temp, "dumps");
pickle_loads = PyObject_GetAttrString(temp, "loads");
pickle_protocol = PyObject_GetAttrString(temp, "HIGHEST_PROTOCOL");
Py_XDECREF(temp);
/* Get copy of BufferTooShort */
temp = PyImport_ImportModule("multiprocessing");
if (!temp)
return;
BufferTooShort = PyObject_GetAttrString(temp, "BufferTooShort");
Py_XDECREF(temp);
/* Add connection type to module */
if (PyType_Ready(&ConnectionType) < 0)
return;
Py_INCREF(&ConnectionType);
PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType);
#if defined(MS_WINDOWS) || HAVE_SEM_OPEN
/* Add SemLock type to module */
if (PyType_Ready(&SemLockType) < 0)
return;
Py_INCREF(&SemLockType);
PyDict_SetItemString(SemLockType.tp_dict, "SEM_VALUE_MAX",
Py_BuildValue("i", SEM_VALUE_MAX));
PyModule_AddObject(module, "SemLock", (PyObject*)&SemLockType);
#endif
#ifdef MS_WINDOWS
/* Add PipeConnection to module */
if (PyType_Ready(&PipeConnectionType) < 0)
return;
Py_INCREF(&PipeConnectionType);
PyModule_AddObject(module, "PipeConnection",
(PyObject*)&PipeConnectionType);
/* Initialize win32 class and add to multiprocessing */
temp = create_win32_namespace();
if (!temp)
return;
PyModule_AddObject(module, "win32", temp);
/* Initialize the event handle used to signal Ctrl-C */
sigint_event = CreateEvent(NULL, TRUE, FALSE, NULL);
if (!sigint_event) {
PyErr_SetFromWindowsErr(0);
return;
}
if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) {
PyErr_SetFromWindowsErr(0);
return;
}
#endif
/* Add configuration macros */
temp = PyDict_New();
if (!temp)
return;
if (PyModule_AddObject(module, "flags", temp) < 0)
return;
#define ADD_FLAG(name) \
if (PyDict_SetItemString(temp, #name, Py_BuildValue("i", name)) < 0) return
#ifdef HAVE_SEM_OPEN
ADD_FLAG(HAVE_SEM_OPEN);
#endif
#ifdef HAVE_SEM_TIMEDWAIT
ADD_FLAG(HAVE_SEM_TIMEDWAIT);
#endif
#ifdef HAVE_FD_TRANSFER
ADD_FLAG(HAVE_FD_TRANSFER);
#endif
#ifdef HAVE_BROKEN_SEM_GETVALUE
ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE);
#endif
#ifdef HAVE_BROKEN_SEM_UNLINK
ADD_FLAG(HAVE_BROKEN_SEM_UNLINK);
#endif
}

View File

@ -0,0 +1,163 @@
#ifndef MULTIPROCESSING_H
#define MULTIPROCESSING_H
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#include "structmember.h"
#include "pythread.h"
/*
* Platform includes and definitions
*/
#ifdef MS_WINDOWS
# define WIN32_LEAN_AND_MEAN
# include <windows.h>
# include <winsock2.h>
# include <process.h> /* getpid() */
# define SEM_HANDLE HANDLE
# define SEM_VALUE_MAX LONG_MAX
#else
# include <fcntl.h> /* O_CREAT and O_EXCL */
# include <sys/socket.h>
# include <arpa/inet.h> /* htonl() and ntohl() */
# if HAVE_SEM_OPEN
# include <semaphore.h>
typedef sem_t *SEM_HANDLE;
# endif
# define HANDLE int
# define SOCKET int
# define BOOL int
# define UINT32 uint32_t
# define INT32 int32_t
# define TRUE 1
# define FALSE 0
# define INVALID_HANDLE_VALUE (-1)
#endif
/*
* Make sure Py_ssize_t available
*/
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
typedef int Py_ssize_t;
# define PY_SSIZE_T_MAX INT_MAX
# define PY_SSIZE_T_MIN INT_MIN
# define F_PY_SSIZE_T "i"
# define PY_FORMAT_SIZE_T ""
# define PyInt_FromSsize_t(n) PyInt_FromLong((long)n)
#else
# define F_PY_SSIZE_T "n"
#endif
/*
* Format codes
*/
#if SIZEOF_VOID_P == SIZEOF_LONG
# define F_POINTER "k"
# define T_POINTER T_ULONG
#elif defined(HAVE_LONG_LONG) && (SIZEOF_VOID_P == SIZEOF_LONG_LONG)
# define F_POINTER "K"
# define T_POINTER T_ULONGLONG
#else
# error "can't find format code for unsigned integer of same size as void*"
#endif
#ifdef MS_WINDOWS
# define F_HANDLE F_POINTER
# define T_HANDLE T_POINTER
# define F_SEM_HANDLE F_HANDLE
# define T_SEM_HANDLE T_HANDLE
# define F_DWORD "k"
# define T_DWORD T_ULONG
#else
# define F_HANDLE "i"
# define T_HANDLE T_INT
# define F_SEM_HANDLE F_POINTER
# define T_SEM_HANDLE T_POINTER
#endif
#if PY_VERSION_HEX >= 0x03000000
# define F_RBUFFER "y"
#else
# define F_RBUFFER "s"
#endif
/*
* Error codes which can be returned by functions called without GIL
*/
#define MP_SUCCESS (0)
#define MP_STANDARD_ERROR (-1)
#define MP_MEMORY_ERROR (-1001)
#define MP_END_OF_FILE (-1002)
#define MP_EARLY_END_OF_FILE (-1003)
#define MP_BAD_MESSAGE_LENGTH (-1004)
#define MP_SOCKET_ERROR (-1005)
#define MP_EXCEPTION_HAS_BEEN_SET (-1006)
PyObject *mp_SetError(PyObject *Type, int num);
/*
* Externs - not all will really exist on all platforms
*/
extern PyObject *pickle_dumps;
extern PyObject *pickle_loads;
extern PyObject *pickle_protocol;
extern PyObject *BufferTooShort;
extern PyTypeObject SemLockType;
extern PyTypeObject ConnectionType;
extern PyTypeObject PipeConnectionType;
extern HANDLE sigint_event;
/*
* Py3k compatibility
*/
#if PY_VERSION_HEX >= 0x03000000
# define PICKLE_MODULE "pickle"
# define FROM_FORMAT PyUnicode_FromFormat
# define PyInt_FromLong PyLong_FromLong
# define PyInt_FromSsize_t PyLong_FromSsize_t
#else
# define PICKLE_MODULE "cPickle"
# define FROM_FORMAT PyString_FromFormat
#endif
#ifndef PyVarObject_HEAD_INIT
# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
#endif
#ifndef Py_TPFLAGS_HAVE_WEAKREFS
# define Py_TPFLAGS_HAVE_WEAKREFS 0
#endif
/*
* Connection definition
*/
#define CONNECTION_BUFFER_SIZE 1024
typedef struct {
PyObject_HEAD
HANDLE handle;
int flags;
PyObject *weakreflist;
char buffer[CONNECTION_BUFFER_SIZE];
} ConnectionObject;
/*
* Miscellaneous
*/
#define MAX_MESSAGE_LENGTH 0x7fffffff
#ifndef MIN
# define MIN(x, y) ((x) < (y) ? x : y)
# define MAX(x, y) ((x) > (y) ? x : y)
#endif
#endif /* MULTIPROCESSING_H */

View File

@ -0,0 +1,136 @@
/*
* A type which wraps a pipe handle in message oriented mode
*
* pipe_connection.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
#define CLOSE(h) CloseHandle(h)
/*
* Send string to the pipe; assumes in message oriented mode
*/
static Py_ssize_t
conn_send_string(ConnectionObject *conn, char *string, size_t length)
{
DWORD amount_written;
return WriteFile(conn->handle, string, length, &amount_written, NULL)
? MP_SUCCESS : MP_STANDARD_ERROR;
}
/*
* Attempts to read into buffer, or if buffer too small into *newbuffer.
*
* Returns number of bytes read. Assumes in message oriented mode.
*/
static Py_ssize_t
conn_recv_string(ConnectionObject *conn, char *buffer,
size_t buflength, char **newbuffer, size_t maxlength)
{
DWORD left, length, full_length, err;
*newbuffer = NULL;
if (ReadFile(conn->handle, buffer, MIN(buflength, maxlength),
&length, NULL))
return length;
err = GetLastError();
if (err != ERROR_MORE_DATA) {
if (err == ERROR_BROKEN_PIPE)
return MP_END_OF_FILE;
return MP_STANDARD_ERROR;
}
if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, NULL, &left))
return MP_STANDARD_ERROR;
full_length = length + left;
if (full_length > maxlength)
return MP_BAD_MESSAGE_LENGTH;
*newbuffer = PyMem_Malloc(full_length);
if (*newbuffer == NULL)
return MP_MEMORY_ERROR;
memcpy(*newbuffer, buffer, length);
if (ReadFile(conn->handle, *newbuffer+length, left, &length, NULL)) {
assert(length == left);
return full_length;
} else {
PyMem_Free(*newbuffer);
return MP_STANDARD_ERROR;
}
}
/*
* Check whether any data is available for reading
*/
#define conn_poll(conn, timeout) conn_poll_save(conn, timeout, _save)
static int
conn_poll_save(ConnectionObject *conn, double timeout, PyThreadState *_save)
{
DWORD bytes, deadline, delay;
int difference, res;
BOOL block = FALSE;
if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))
return MP_STANDARD_ERROR;
if (timeout == 0.0)
return bytes > 0;
if (timeout < 0.0)
block = TRUE;
else
/* XXX does not check for overflow */
deadline = GetTickCount() + (DWORD)(1000 * timeout + 0.5);
Sleep(0);
for (delay = 1 ; ; delay += 1) {
if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))
return MP_STANDARD_ERROR;
else if (bytes > 0)
return TRUE;
if (!block) {
difference = deadline - GetTickCount();
if (difference < 0)
return FALSE;
if ((int)delay > difference)
delay = difference;
}
if (delay > 20)
delay = 20;
Sleep(delay);
/* check for signals */
Py_BLOCK_THREADS
res = PyErr_CheckSignals();
Py_UNBLOCK_THREADS
if (res)
return MP_EXCEPTION_HAS_BEEN_SET;
}
}
/*
* "connection.h" defines the PipeConnection type using the definitions above
*/
#define CONNECTION_NAME "PipeConnection"
#define CONNECTION_TYPE PipeConnectionType
#include "connection.h"

View File

@ -0,0 +1,625 @@
/*
* A type which wraps a semaphore
*
* semaphore.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
enum { RECURSIVE_MUTEX, SEMAPHORE };
typedef struct {
PyObject_HEAD
SEM_HANDLE handle;
long last_tid;
int count;
int maxvalue;
int kind;
} SemLockObject;
#define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid)
#ifdef MS_WINDOWS
/*
* Windows definitions
*/
#define SEM_FAILED NULL
#define SEM_CLEAR_ERROR() SetLastError(0)
#define SEM_GET_LAST_ERROR() GetLastError()
#define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL)
#define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1)
#define SEM_GETVALUE(sem, pval) _GetSemaphoreValue(sem, pval)
#define SEM_UNLINK(name) 0
static int
_GetSemaphoreValue(HANDLE handle, long *value)
{
long previous;
switch (WaitForSingleObject(handle, 0)) {
case WAIT_OBJECT_0:
if (!ReleaseSemaphore(handle, 1, &previous))
return MP_STANDARD_ERROR;
*value = previous + 1;
return 0;
case WAIT_TIMEOUT:
*value = 0;
return 0;
default:
return MP_STANDARD_ERROR;
}
}
static PyObject *
semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds)
{
int blocking = 1;
double timeout;
PyObject *timeout_obj = Py_None;
DWORD res, full_msecs, msecs, start, ticks;
static char *kwlist[] = {"block", "timeout", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist,
&blocking, &timeout_obj))
return NULL;
/* calculate timeout */
if (!blocking) {
full_msecs = 0;
} else if (timeout_obj == Py_None) {
full_msecs = INFINITE;
} else {
timeout = PyFloat_AsDouble(timeout_obj);
if (PyErr_Occurred())
return NULL;
timeout *= 1000.0; /* convert to millisecs */
if (timeout < 0.0) {
timeout = 0.0;
} else if (timeout >= 0.5 * INFINITE) { /* 25 days */
PyErr_SetString(PyExc_OverflowError,
"timeout is too large");
return NULL;
}
full_msecs = (DWORD)(timeout + 0.5);
}
/* check whether we already own the lock */
if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) {
++self->count;
Py_RETURN_TRUE;
}
/* check whether we can acquire without blocking */
if (WaitForSingleObject(self->handle, 0) == WAIT_OBJECT_0) {
self->last_tid = GetCurrentThreadId();
++self->count;
Py_RETURN_TRUE;
}
msecs = full_msecs;
start = GetTickCount();
for ( ; ; ) {
HANDLE handles[2] = {self->handle, sigint_event};
/* do the wait */
Py_BEGIN_ALLOW_THREADS
ResetEvent(sigint_event);
res = WaitForMultipleObjects(2, handles, FALSE, msecs);
Py_END_ALLOW_THREADS
/* handle result */
if (res != WAIT_OBJECT_0 + 1)
break;
/* got SIGINT so give signal handler a chance to run */
Sleep(1);
/* if this is main thread let KeyboardInterrupt be raised */
if (PyErr_CheckSignals())
return NULL;
/* recalculate timeout */
if (msecs != INFINITE) {
ticks = GetTickCount();
if ((DWORD)(ticks - start) >= full_msecs)
Py_RETURN_FALSE;
msecs = full_msecs - (ticks - start);
}
}
/* handle result */
switch (res) {
case WAIT_TIMEOUT:
Py_RETURN_FALSE;
case WAIT_OBJECT_0:
self->last_tid = GetCurrentThreadId();
++self->count;
Py_RETURN_TRUE;
case WAIT_FAILED:
return PyErr_SetFromWindowsErr(0);
default:
PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or "
"WaitForMultipleObjects() gave unrecognized "
"value %d", res);
return NULL;
}
}
static PyObject *
semlock_release(SemLockObject *self, PyObject *args)
{
if (self->kind == RECURSIVE_MUTEX) {
if (!ISMINE(self)) {
PyErr_SetString(PyExc_AssertionError, "attempt to "
"release recursive lock not owned "
"by thread");
return NULL;
}
if (self->count > 1) {
--self->count;
Py_RETURN_NONE;
}
assert(self->count == 1);
}
if (!ReleaseSemaphore(self->handle, 1, NULL)) {
if (GetLastError() == ERROR_TOO_MANY_POSTS) {
PyErr_SetString(PyExc_ValueError, "semaphore or lock "
"released too many times");
return NULL;
} else {
return PyErr_SetFromWindowsErr(0);
}
}
--self->count;
Py_RETURN_NONE;
}
#else /* !MS_WINDOWS */
/*
* Unix definitions
*/
#define SEM_CLEAR_ERROR()
#define SEM_GET_LAST_ERROR() 0
#define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val)
#define SEM_CLOSE(sem) sem_close(sem)
#define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval)
#define SEM_UNLINK(name) sem_unlink(name)
#if HAVE_BROKEN_SEM_UNLINK
# define sem_unlink(name) 0
#endif
#if !HAVE_SEM_TIMEDWAIT
# define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save)
int
sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save)
{
int res;
unsigned long delay, difference;
struct timeval now, tvdeadline, tvdelay;
errno = 0;
tvdeadline.tv_sec = deadline->tv_sec;
tvdeadline.tv_usec = deadline->tv_nsec / 1000;
for (delay = 0 ; ; delay += 1000) {
/* poll */
if (sem_trywait(sem) == 0)
return 0;
else if (errno != EAGAIN)
return MP_STANDARD_ERROR;
/* get current time */
if (gettimeofday(&now, NULL) < 0)
return MP_STANDARD_ERROR;
/* check for timeout */
if (tvdeadline.tv_sec < now.tv_sec ||
(tvdeadline.tv_sec == now.tv_sec &&
tvdeadline.tv_usec <= now.tv_usec)) {
errno = ETIMEDOUT;
return MP_STANDARD_ERROR;
}
/* calculate how much time is left */
difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 +
(tvdeadline.tv_usec - now.tv_usec);
/* check delay not too long -- maximum is 20 msecs */
if (delay > 20000)
delay = 20000;
if (delay > difference)
delay = difference;
/* sleep */
tvdelay.tv_sec = delay / 1000000;
tvdelay.tv_usec = delay % 1000000;
if (select(0, NULL, NULL, NULL, &tvdelay) < 0)
return MP_STANDARD_ERROR;
/* check for signals */
Py_BLOCK_THREADS
res = PyErr_CheckSignals();
Py_UNBLOCK_THREADS
if (res) {
errno = EINTR;
return MP_EXCEPTION_HAS_BEEN_SET;
}
}
}
#endif /* !HAVE_SEM_TIMEDWAIT */
static PyObject *
semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds)
{
int blocking = 1, res;
double timeout;
PyObject *timeout_obj = Py_None;
struct timespec deadline = {0};
struct timeval now;
long sec, nsec;
static char *kwlist[] = {"block", "timeout", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist,
&blocking, &timeout_obj))
return NULL;
if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) {
++self->count;
Py_RETURN_TRUE;
}
if (timeout_obj != Py_None) {
timeout = PyFloat_AsDouble(timeout_obj);
if (PyErr_Occurred())
return NULL;
if (timeout < 0.0)
timeout = 0.0;
if (gettimeofday(&now, NULL) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
sec = (long) timeout;
nsec = (long) (1e9 * (timeout - sec) + 0.5);
deadline.tv_sec = now.tv_sec + sec;
deadline.tv_nsec = now.tv_usec * 1000 + nsec;
deadline.tv_sec += (deadline.tv_nsec / 1000000000);
deadline.tv_nsec %= 1000000000;
}
do {
Py_BEGIN_ALLOW_THREADS
if (blocking && timeout_obj == Py_None)
res = sem_wait(self->handle);
else if (!blocking)
res = sem_trywait(self->handle);
else
res = sem_timedwait(self->handle, &deadline);
Py_END_ALLOW_THREADS
if (res == MP_EXCEPTION_HAS_BEEN_SET)
break;
} while (res < 0 && errno == EINTR && !PyErr_CheckSignals());
if (res < 0) {
if (errno == EAGAIN || errno == ETIMEDOUT)
Py_RETURN_FALSE;
else if (errno == EINTR)
return NULL;
else
return PyErr_SetFromErrno(PyExc_OSError);
}
++self->count;
self->last_tid = PyThread_get_thread_ident();
Py_RETURN_TRUE;
}
static PyObject *
semlock_release(SemLockObject *self, PyObject *args)
{
if (self->kind == RECURSIVE_MUTEX) {
if (!ISMINE(self)) {
PyErr_SetString(PyExc_AssertionError, "attempt to "
"release recursive lock not owned "
"by thread");
return NULL;
}
if (self->count > 1) {
--self->count;
Py_RETURN_NONE;
}
assert(self->count == 1);
} else {
#if HAVE_BROKEN_SEM_GETVALUE
/* We will only check properly the maxvalue == 1 case */
if (self->maxvalue == 1) {
/* make sure that already locked */
if (sem_trywait(self->handle) < 0) {
if (errno != EAGAIN) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
/* it is already locked as expected */
} else {
/* it was not locked so undo wait and raise */
if (sem_post(self->handle) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
PyErr_SetString(PyExc_ValueError, "semaphore "
"or lock released too many "
"times");
return NULL;
}
}
#else
int sval;
/* This check is not an absolute guarantee that the semaphore
does not rise above maxvalue. */
if (sem_getvalue(self->handle, &sval) < 0) {
return PyErr_SetFromErrno(PyExc_OSError);
} else if (sval >= self->maxvalue) {
PyErr_SetString(PyExc_ValueError, "semaphore or lock "
"released too many times");
return NULL;
}
#endif
}
if (sem_post(self->handle) < 0)
return PyErr_SetFromErrno(PyExc_OSError);
--self->count;
Py_RETURN_NONE;
}
#endif /* !MS_WINDOWS */
/*
* All platforms
*/
static PyObject *
newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue)
{
SemLockObject *self;
self = PyObject_New(SemLockObject, type);
if (!self)
return NULL;
self->handle = handle;
self->kind = kind;
self->count = 0;
self->last_tid = 0;
self->maxvalue = maxvalue;
return (PyObject*)self;
}
static PyObject *
semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
char buffer[256];
SEM_HANDLE handle = SEM_FAILED;
int kind, maxvalue, value;
PyObject *result;
static char *kwlist[] = {"kind", "value", "maxvalue", NULL};
static int counter = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwlist,
&kind, &value, &maxvalue))
return NULL;
if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) {
PyErr_SetString(PyExc_ValueError, "unrecognized kind");
return NULL;
}
PyOS_snprintf(buffer, sizeof(buffer), "/mp%d-%d", getpid(), counter++);
SEM_CLEAR_ERROR();
handle = SEM_CREATE(buffer, value, maxvalue);
/* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */
if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0)
goto failure;
if (SEM_UNLINK(buffer) < 0)
goto failure;
result = newsemlockobject(type, handle, kind, maxvalue);
if (!result)
goto failure;
return result;
failure:
if (handle != SEM_FAILED)
SEM_CLOSE(handle);
mp_SetError(NULL, MP_STANDARD_ERROR);
return NULL;
}
static PyObject *
semlock_rebuild(PyTypeObject *type, PyObject *args)
{
SEM_HANDLE handle;
int kind, maxvalue;
if (!PyArg_ParseTuple(args, F_SEM_HANDLE "ii",
&handle, &kind, &maxvalue))
return NULL;
return newsemlockobject(type, handle, kind, maxvalue);
}
static void
semlock_dealloc(SemLockObject* self)
{
if (self->handle != SEM_FAILED)
SEM_CLOSE(self->handle);
PyObject_Del(self);
}
static PyObject *
semlock_count(SemLockObject *self)
{
return PyInt_FromLong((long)self->count);
}
static PyObject *
semlock_ismine(SemLockObject *self)
{
/* only makes sense for a lock */
return PyBool_FromLong(ISMINE(self));
}
static PyObject *
semlock_getvalue(SemLockObject *self)
{
#if HAVE_BROKEN_SEM_GETVALUE
PyErr_SetNone(PyExc_NotImplementedError);
return NULL;
#else
int sval;
if (SEM_GETVALUE(self->handle, &sval) < 0)
return mp_SetError(NULL, MP_STANDARD_ERROR);
/* some posix implementations use negative numbers to indicate
the number of waiting threads */
if (sval < 0)
sval = 0;
return PyInt_FromLong((long)sval);
#endif
}
static PyObject *
semlock_iszero(SemLockObject *self)
{
int sval;
#if HAVE_BROKEN_SEM_GETVALUE
if (sem_trywait(self->handle) < 0) {
if (errno == EAGAIN)
Py_RETURN_TRUE;
return mp_SetError(NULL, MP_STANDARD_ERROR);
} else {
if (sem_post(self->handle) < 0)
return mp_SetError(NULL, MP_STANDARD_ERROR);
Py_RETURN_FALSE;
}
#else
if (SEM_GETVALUE(self->handle, &sval) < 0)
return mp_SetError(NULL, MP_STANDARD_ERROR);
return PyBool_FromLong((long)sval == 0);
#endif
}
static PyObject *
semlock_afterfork(SemLockObject *self)
{
self->count = 0;
Py_RETURN_NONE;
}
/*
* Semaphore methods
*/
static PyMethodDef semlock_methods[] = {
{"acquire", (PyCFunction)semlock_acquire, METH_VARARGS | METH_KEYWORDS,
"acquire the semaphore/lock"},
{"release", (PyCFunction)semlock_release, METH_NOARGS,
"release the semaphore/lock"},
{"__enter__", (PyCFunction)semlock_acquire, METH_VARARGS,
"enter the semaphore/lock"},
{"__exit__", (PyCFunction)semlock_release, METH_VARARGS,
"exit the semaphore/lock"},
{"_count", (PyCFunction)semlock_count, METH_NOARGS,
"num of `acquire()`s minus num of `release()`s for this process"},
{"_is_mine", (PyCFunction)semlock_ismine, METH_NOARGS,
"whether the lock is owned by this thread"},
{"_get_value", (PyCFunction)semlock_getvalue, METH_NOARGS,
"get the value of the semaphore"},
{"_is_zero", (PyCFunction)semlock_iszero, METH_NOARGS,
"returns whether semaphore has value zero"},
{"_rebuild", (PyCFunction)semlock_rebuild, METH_VARARGS | METH_CLASS,
""},
{"_after_fork", (PyCFunction)semlock_afterfork, METH_NOARGS,
"rezero the net acquisition count after fork()"},
{NULL}
};
/*
* Member table
*/
static PyMemberDef semlock_members[] = {
{"handle", T_SEM_HANDLE, offsetof(SemLockObject, handle), READONLY,
""},
{"kind", T_INT, offsetof(SemLockObject, kind), READONLY,
""},
{"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY,
""},
{NULL}
};
/*
* Semaphore type
*/
PyTypeObject SemLockType = {
PyVarObject_HEAD_INIT(NULL, 0)
/* tp_name */ "_multiprocessing.SemLock",
/* tp_basicsize */ sizeof(SemLockObject),
/* tp_itemsize */ 0,
/* tp_dealloc */ (destructor)semlock_dealloc,
/* tp_print */ 0,
/* tp_getattr */ 0,
/* tp_setattr */ 0,
/* tp_compare */ 0,
/* tp_repr */ 0,
/* tp_as_number */ 0,
/* tp_as_sequence */ 0,
/* tp_as_mapping */ 0,
/* tp_hash */ 0,
/* tp_call */ 0,
/* tp_str */ 0,
/* tp_getattro */ 0,
/* tp_setattro */ 0,
/* tp_as_buffer */ 0,
/* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
/* tp_doc */ "Semaphore/Mutex type",
/* tp_traverse */ 0,
/* tp_clear */ 0,
/* tp_richcompare */ 0,
/* tp_weaklistoffset */ 0,
/* tp_iter */ 0,
/* tp_iternext */ 0,
/* tp_methods */ semlock_methods,
/* tp_members */ semlock_members,
/* tp_getset */ 0,
/* tp_base */ 0,
/* tp_dict */ 0,
/* tp_descr_get */ 0,
/* tp_descr_set */ 0,
/* tp_dictoffset */ 0,
/* tp_init */ 0,
/* tp_alloc */ 0,
/* tp_new */ semlock_new,
};

View File

@ -0,0 +1,180 @@
/*
* A type which wraps a socket
*
* socket_connection.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
#ifdef MS_WINDOWS
# define WRITE(h, buffer, length) send((SOCKET)h, buffer, length, 0)
# define READ(h, buffer, length) recv((SOCKET)h, buffer, length, 0)
# define CLOSE(h) closesocket((SOCKET)h)
#else
# define WRITE(h, buffer, length) write(h, buffer, length)
# define READ(h, buffer, length) read(h, buffer, length)
# define CLOSE(h) close(h)
#endif
/*
* Send string to file descriptor
*/
static Py_ssize_t
_conn_sendall(HANDLE h, char *string, size_t length)
{
char *p = string;
Py_ssize_t res;
while (length > 0) {
res = WRITE(h, p, length);
if (res < 0)
return MP_SOCKET_ERROR;
length -= res;
p += res;
}
return MP_SUCCESS;
}
/*
* Receive string of exact length from file descriptor
*/
static Py_ssize_t
_conn_recvall(HANDLE h, char *buffer, size_t length)
{
size_t remaining = length;
Py_ssize_t temp;
char *p = buffer;
while (remaining > 0) {
temp = READ(h, p, remaining);
if (temp <= 0) {
if (temp == 0)
return remaining == length ?
MP_END_OF_FILE : MP_EARLY_END_OF_FILE;
else
return temp;
}
remaining -= temp;
p += temp;
}
return MP_SUCCESS;
}
/*
* Send a string prepended by the string length in network byte order
*/
static Py_ssize_t
conn_send_string(ConnectionObject *conn, char *string, size_t length)
{
/* The "header" of the message is a 32 bit unsigned number (in
network order) which specifies the length of the "body". If
the message is shorter than about 16kb then it is quicker to
combine the "header" and the "body" of the message and send
them at once. */
if (length < (16*1024)) {
char *message;
int res;
message = PyMem_Malloc(length+4);
if (message == NULL)
return MP_MEMORY_ERROR;
*(UINT32*)message = htonl((UINT32)length);
memcpy(message+4, string, length);
res = _conn_sendall(conn->handle, message, length+4);
PyMem_Free(message);
return res;
} else {
UINT32 lenbuff;
if (length > MAX_MESSAGE_LENGTH)
return MP_BAD_MESSAGE_LENGTH;
lenbuff = htonl((UINT32)length);
return _conn_sendall(conn->handle, (char*)&lenbuff, 4) ||
_conn_sendall(conn->handle, string, length);
}
}
/*
* Attempts to read into buffer, or failing that into *newbuffer
*
* Returns number of bytes read.
*/
static Py_ssize_t
conn_recv_string(ConnectionObject *conn, char *buffer,
size_t buflength, char **newbuffer, size_t maxlength)
{
int res;
UINT32 ulength;
*newbuffer = NULL;
res = _conn_recvall(conn->handle, (char*)&ulength, 4);
if (res < 0)
return res;
ulength = ntohl(ulength);
if (ulength > maxlength)
return MP_BAD_MESSAGE_LENGTH;
if (ulength <= buflength) {
res = _conn_recvall(conn->handle, buffer, (size_t)ulength);
return res < 0 ? res : ulength;
} else {
*newbuffer = PyMem_Malloc((size_t)ulength);
if (*newbuffer == NULL)
return MP_MEMORY_ERROR;
res = _conn_recvall(conn->handle, *newbuffer, (size_t)ulength);
return res < 0 ? (Py_ssize_t)res : (Py_ssize_t)ulength;
}
}
/*
* Check whether any data is available for reading -- neg timeout blocks
*/
static int
conn_poll(ConnectionObject *conn, double timeout)
{
int res;
fd_set rfds;
FD_ZERO(&rfds);
FD_SET((SOCKET)conn->handle, &rfds);
if (timeout < 0.0) {
res = select((int)conn->handle+1, &rfds, NULL, NULL, NULL);
} else {
struct timeval tv;
tv.tv_sec = (long)timeout;
tv.tv_usec = (long)((timeout - tv.tv_sec) * 1e6 + 0.5);
res = select((int)conn->handle+1, &rfds, NULL, NULL, &tv);
}
if (res < 0) {
return MP_SOCKET_ERROR;
} else if (FD_ISSET(conn->handle, &rfds)) {
return TRUE;
} else {
assert(res == 0);
return FALSE;
}
}
/*
* "connection.h" defines the Connection type using defs above
*/
#define CONNECTION_NAME "Connection"
#define CONNECTION_TYPE ConnectionType
#include "connection.h"

View File

@ -0,0 +1,260 @@
/*
* Win32 functions used by multiprocessing package
*
* win32_functions.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
#define WIN32_FUNCTION(func) \
{#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_STATIC, ""}
#define WIN32_CONSTANT(fmt, con) \
PyDict_SetItemString(Win32Type.tp_dict, #con, Py_BuildValue(fmt, con))
static PyObject *
win32_CloseHandle(PyObject *self, PyObject *args)
{
HANDLE hObject;
BOOL success;
if (!PyArg_ParseTuple(args, F_HANDLE, &hObject))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = CloseHandle(hObject);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_ConnectNamedPipe(PyObject *self, PyObject *args)
{
HANDLE hNamedPipe;
LPOVERLAPPED lpOverlapped;
BOOL success;
if (!PyArg_ParseTuple(args, F_HANDLE F_POINTER,
&hNamedPipe, &lpOverlapped))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = ConnectNamedPipe(hNamedPipe, lpOverlapped);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_CreateFile(PyObject *self, PyObject *args)
{
LPCTSTR lpFileName;
DWORD dwDesiredAccess;
DWORD dwShareMode;
LPSECURITY_ATTRIBUTES lpSecurityAttributes;
DWORD dwCreationDisposition;
DWORD dwFlagsAndAttributes;
HANDLE hTemplateFile;
HANDLE handle;
if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_POINTER
F_DWORD F_DWORD F_HANDLE,
&lpFileName, &dwDesiredAccess, &dwShareMode,
&lpSecurityAttributes, &dwCreationDisposition,
&dwFlagsAndAttributes, &hTemplateFile))
return NULL;
Py_BEGIN_ALLOW_THREADS
handle = CreateFile(lpFileName, dwDesiredAccess,
dwShareMode, lpSecurityAttributes,
dwCreationDisposition,
dwFlagsAndAttributes, hTemplateFile);
Py_END_ALLOW_THREADS
if (handle == INVALID_HANDLE_VALUE)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, handle);
}
static PyObject *
win32_CreateNamedPipe(PyObject *self, PyObject *args)
{
LPCTSTR lpName;
DWORD dwOpenMode;
DWORD dwPipeMode;
DWORD nMaxInstances;
DWORD nOutBufferSize;
DWORD nInBufferSize;
DWORD nDefaultTimeOut;
LPSECURITY_ATTRIBUTES lpSecurityAttributes;
HANDLE handle;
if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_DWORD
F_DWORD F_DWORD F_DWORD F_POINTER,
&lpName, &dwOpenMode, &dwPipeMode,
&nMaxInstances, &nOutBufferSize,
&nInBufferSize, &nDefaultTimeOut,
&lpSecurityAttributes))
return NULL;
Py_BEGIN_ALLOW_THREADS
handle = CreateNamedPipe(lpName, dwOpenMode, dwPipeMode,
nMaxInstances, nOutBufferSize,
nInBufferSize, nDefaultTimeOut,
lpSecurityAttributes);
Py_END_ALLOW_THREADS
if (handle == INVALID_HANDLE_VALUE)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, handle);
}
static PyObject *
win32_ExitProcess(PyObject *self, PyObject *args)
{
UINT uExitCode;
if (!PyArg_ParseTuple(args, "I", &uExitCode))
return NULL;
ExitProcess(uExitCode);
return NULL;
}
static PyObject *
win32_GetLastError(PyObject *self, PyObject *args)
{
return Py_BuildValue(F_DWORD, GetLastError());
}
static PyObject *
win32_OpenProcess(PyObject *self, PyObject *args)
{
DWORD dwDesiredAccess;
BOOL bInheritHandle;
DWORD dwProcessId;
HANDLE handle;
if (!PyArg_ParseTuple(args, F_DWORD "i" F_DWORD,
&dwDesiredAccess, &bInheritHandle, &dwProcessId))
return NULL;
handle = OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId);
if (handle == NULL)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, handle);
}
static PyObject *
win32_SetNamedPipeHandleState(PyObject *self, PyObject *args)
{
HANDLE hNamedPipe;
PyObject *oArgs[3];
DWORD dwArgs[3], *pArgs[3] = {NULL, NULL, NULL};
int i;
if (!PyArg_ParseTuple(args, F_HANDLE "OOO",
&hNamedPipe, &oArgs[0], &oArgs[1], &oArgs[2]))
return NULL;
PyErr_Clear();
for (i = 0 ; i < 3 ; i++) {
if (oArgs[i] != Py_None) {
dwArgs[i] = PyInt_AsUnsignedLongMask(oArgs[i]);
if (PyErr_Occurred())
return NULL;
pArgs[i] = &dwArgs[i];
}
}
if (!SetNamedPipeHandleState(hNamedPipe, pArgs[0], pArgs[1], pArgs[2]))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_WaitNamedPipe(PyObject *self, PyObject *args)
{
LPCTSTR lpNamedPipeName;
DWORD nTimeOut;
BOOL success;
if (!PyArg_ParseTuple(args, "s" F_DWORD, &lpNamedPipeName, &nTimeOut))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = WaitNamedPipe(lpNamedPipeName, nTimeOut);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyMethodDef win32_methods[] = {
WIN32_FUNCTION(CloseHandle),
WIN32_FUNCTION(GetLastError),
WIN32_FUNCTION(OpenProcess),
WIN32_FUNCTION(ExitProcess),
WIN32_FUNCTION(ConnectNamedPipe),
WIN32_FUNCTION(CreateFile),
WIN32_FUNCTION(CreateNamedPipe),
WIN32_FUNCTION(SetNamedPipeHandleState),
WIN32_FUNCTION(WaitNamedPipe),
{NULL}
};
PyTypeObject Win32Type = {
PyVarObject_HEAD_INIT(NULL, 0)
};
PyObject *
create_win32_namespace(void)
{
Win32Type.tp_name = "_multiprocessing.win32";
Win32Type.tp_methods = win32_methods;
if (PyType_Ready(&Win32Type) < 0)
return NULL;
Py_INCREF(&Win32Type);
WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);
WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
WIN32_CONSTANT(F_DWORD, GENERIC_READ);
WIN32_CONSTANT(F_DWORD, GENERIC_WRITE);
WIN32_CONSTANT(F_DWORD, INFINITE);
WIN32_CONSTANT(F_DWORD, NMPWAIT_WAIT_FOREVER);
WIN32_CONSTANT(F_DWORD, OPEN_EXISTING);
WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_DUPLEX);
WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_INBOUND);
WIN32_CONSTANT(F_DWORD, PIPE_READMODE_MESSAGE);
WIN32_CONSTANT(F_DWORD, PIPE_TYPE_MESSAGE);
WIN32_CONSTANT(F_DWORD, PIPE_UNLIMITED_INSTANCES);
WIN32_CONSTANT(F_DWORD, PIPE_WAIT);
WIN32_CONSTANT(F_DWORD, PROCESS_ALL_ACCESS);
WIN32_CONSTANT("i", NULL);
return (PyObject*)&Win32Type;
}

View File

@ -1236,6 +1236,58 @@ class PyBuildExt(build_ext):
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif platform == 'darwin': # Mac OSX
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
HAVE_BROKEN_SEM_GETVALUE=1
)
libraries = []
elif platform == 'cygwin': # Cygwin
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=0,
HAVE_BROKEN_SEM_UNLINK=1
)
libraries = []
else: # Linux and other unices
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1
)
libraries = ['rt']
if platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
'_multiprocessing/socket_connection.c',
'_multiprocessing/win32_functions.c'
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/socket_connection.c'
]
if macros.get('HAVE_SEM_OPEN', False):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=macros.items(),
include_dirs=["Modules/_multiprocessing"]))
# End multiprocessing
# Platform-specific libraries
if platform == 'linux2':
# Linux-specific modules