2010-09-18 19:35:02 -03:00
|
|
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
|
|
|
# Licensed to PSF under a Contributor Agreement.
|
|
|
|
|
|
|
|
"""Implements ThreadPoolExecutor."""
|
|
|
|
|
|
|
|
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
|
|
|
|
|
|
|
import atexit
|
|
|
|
from concurrent.futures import _base
|
|
|
|
import queue
|
|
|
|
import threading
|
|
|
|
import weakref
|
2014-09-02 14:39:18 -03:00
|
|
|
import os
|
2010-09-18 19:35:02 -03:00
|
|
|
|
|
|
|
# Workers are created as daemon threads. This is done to allow the interpreter
|
|
|
|
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
|
|
|
|
# pool (i.e. shutdown() was not called). However, allowing workers to die with
|
|
|
|
# the interpreter has two undesirable properties:
|
2016-08-30 14:47:49 -03:00
|
|
|
# - The workers would still be running during interpreter shutdown,
|
2010-09-18 19:35:02 -03:00
|
|
|
# meaning that they would fail in unpredictable ways.
|
|
|
|
# - The workers could be killed while evaluating a work item, which could
|
|
|
|
# be bad if the callable being evaluated has external side-effects e.g.
|
|
|
|
# writing to a file.
|
|
|
|
#
|
|
|
|
# To work around this problem, an exit handler is installed which tells the
|
|
|
|
# workers to exit when their work queues are empty and then waits until the
|
|
|
|
# threads finish.
|
|
|
|
|
2011-03-26 15:29:44 -03:00
|
|
|
_threads_queues = weakref.WeakKeyDictionary()
|
2010-09-18 19:35:02 -03:00
|
|
|
_shutdown = False
|
|
|
|
|
|
|
|
def _python_exit():
|
|
|
|
global _shutdown
|
|
|
|
_shutdown = True
|
2011-03-26 15:29:44 -03:00
|
|
|
items = list(_threads_queues.items())
|
|
|
|
for t, q in items:
|
|
|
|
q.put(None)
|
|
|
|
for t, q in items:
|
|
|
|
t.join()
|
2010-09-18 19:35:02 -03:00
|
|
|
|
|
|
|
atexit.register(_python_exit)
|
|
|
|
|
|
|
|
class _WorkItem(object):
|
|
|
|
def __init__(self, future, fn, args, kwargs):
|
|
|
|
self.future = future
|
|
|
|
self.fn = fn
|
|
|
|
self.args = args
|
|
|
|
self.kwargs = kwargs
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
if not self.future.set_running_or_notify_cancel():
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
result = self.fn(*self.args, **self.kwargs)
|
|
|
|
except BaseException as e:
|
|
|
|
self.future.set_exception(e)
|
|
|
|
else:
|
|
|
|
self.future.set_result(result)
|
|
|
|
|
|
|
|
def _worker(executor_reference, work_queue):
|
|
|
|
try:
|
|
|
|
while True:
|
2011-04-12 12:48:46 -03:00
|
|
|
work_item = work_queue.get(block=True)
|
|
|
|
if work_item is not None:
|
|
|
|
work_item.run()
|
2012-11-03 10:36:01 -03:00
|
|
|
# Delete references to object. See issue16284
|
|
|
|
del work_item
|
2011-04-12 12:48:46 -03:00
|
|
|
continue
|
2011-03-26 15:29:44 -03:00
|
|
|
executor = executor_reference()
|
|
|
|
# Exit if:
|
|
|
|
# - The interpreter is shutting down OR
|
|
|
|
# - The executor that owns the worker has been collected OR
|
|
|
|
# - The executor that owns the worker has been shutdown.
|
|
|
|
if _shutdown or executor is None or executor._shutdown:
|
|
|
|
# Notice other workers
|
|
|
|
work_queue.put(None)
|
|
|
|
return
|
|
|
|
del executor
|
2011-11-11 15:05:50 -04:00
|
|
|
except BaseException:
|
2010-09-18 19:35:02 -03:00
|
|
|
_base.LOGGER.critical('Exception in worker', exc_info=True)
|
|
|
|
|
|
|
|
class ThreadPoolExecutor(_base.Executor):
|
2016-08-07 14:19:20 -03:00
|
|
|
def __init__(self, max_workers=None, thread_name_prefix=''):
|
2010-09-18 19:35:02 -03:00
|
|
|
"""Initializes a new ThreadPoolExecutor instance.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
max_workers: The maximum number of threads that can be used to
|
|
|
|
execute the given calls.
|
2016-08-07 14:19:20 -03:00
|
|
|
thread_name_prefix: An optional name prefix to give our threads.
|
2010-09-18 19:35:02 -03:00
|
|
|
"""
|
2014-09-02 14:39:18 -03:00
|
|
|
if max_workers is None:
|
|
|
|
# Use this number because ThreadPoolExecutor is often
|
|
|
|
# used to overlap I/O instead of CPU work.
|
|
|
|
max_workers = (os.cpu_count() or 1) * 5
|
2014-05-17 17:51:10 -03:00
|
|
|
if max_workers <= 0:
|
|
|
|
raise ValueError("max_workers must be greater than 0")
|
|
|
|
|
2010-09-18 19:35:02 -03:00
|
|
|
self._max_workers = max_workers
|
|
|
|
self._work_queue = queue.Queue()
|
|
|
|
self._threads = set()
|
|
|
|
self._shutdown = False
|
|
|
|
self._shutdown_lock = threading.Lock()
|
2016-08-07 14:19:20 -03:00
|
|
|
self._thread_name_prefix = thread_name_prefix
|
2010-09-18 19:35:02 -03:00
|
|
|
|
|
|
|
def submit(self, fn, *args, **kwargs):
|
|
|
|
with self._shutdown_lock:
|
|
|
|
if self._shutdown:
|
|
|
|
raise RuntimeError('cannot schedule new futures after shutdown')
|
|
|
|
|
|
|
|
f = _base.Future()
|
|
|
|
w = _WorkItem(f, fn, args, kwargs)
|
|
|
|
|
|
|
|
self._work_queue.put(w)
|
|
|
|
self._adjust_thread_count()
|
|
|
|
return f
|
|
|
|
submit.__doc__ = _base.Executor.submit.__doc__
|
|
|
|
|
|
|
|
def _adjust_thread_count(self):
|
2011-03-26 15:29:44 -03:00
|
|
|
# When the executor gets lost, the weakref callback will wake up
|
|
|
|
# the worker threads.
|
|
|
|
def weakref_cb(_, q=self._work_queue):
|
|
|
|
q.put(None)
|
2010-09-18 19:35:02 -03:00
|
|
|
# TODO(bquinlan): Should avoid creating new threads if there are more
|
|
|
|
# idle threads than items in the work queue.
|
2016-08-07 14:19:20 -03:00
|
|
|
num_threads = len(self._threads)
|
|
|
|
if num_threads < self._max_workers:
|
|
|
|
thread_name = '%s_%d' % (self._thread_name_prefix or self,
|
|
|
|
num_threads)
|
|
|
|
t = threading.Thread(name=thread_name, target=_worker,
|
2011-03-26 15:29:44 -03:00
|
|
|
args=(weakref.ref(self, weakref_cb),
|
|
|
|
self._work_queue))
|
2010-09-18 19:35:02 -03:00
|
|
|
t.daemon = True
|
|
|
|
t.start()
|
|
|
|
self._threads.add(t)
|
2011-03-26 15:29:44 -03:00
|
|
|
_threads_queues[t] = self._work_queue
|
2010-09-18 19:35:02 -03:00
|
|
|
|
|
|
|
def shutdown(self, wait=True):
|
|
|
|
with self._shutdown_lock:
|
|
|
|
self._shutdown = True
|
2011-03-26 15:29:44 -03:00
|
|
|
self._work_queue.put(None)
|
2010-09-18 19:35:02 -03:00
|
|
|
if wait:
|
|
|
|
for t in self._threads:
|
|
|
|
t.join()
|
|
|
|
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|