diff --git a/Lib/asyncio/coroutines.py b/Lib/asyncio/coroutines.py index 3e305f90abb..b6f81a4b0f5 100644 --- a/Lib/asyncio/coroutines.py +++ b/Lib/asyncio/coroutines.py @@ -9,16 +9,14 @@ import sys import traceback import types +from collections.abc import Awaitable, Coroutine + from . import constants from . import events from . import base_futures from .log import logger -# Opcode of "yield from" instruction -_YIELD_FROM = opcode.opmap['YIELD_FROM'] - - def _is_debug_mode(): # If you set _DEBUG to true, @coroutine will wrap the resulting # generator objects in a CoroWrapper instance (defined below). That @@ -39,51 +37,6 @@ def _is_debug_mode(): _DEBUG = _is_debug_mode() -try: - _types_coroutine = types.coroutine - _types_CoroutineType = types.CoroutineType -except AttributeError: - # Python 3.4 - _types_coroutine = None - _types_CoroutineType = None - -try: - _inspect_iscoroutinefunction = inspect.iscoroutinefunction -except AttributeError: - # Python 3.4 - _inspect_iscoroutinefunction = lambda func: False - -try: - from collections.abc import Coroutine as _CoroutineABC, \ - Awaitable as _AwaitableABC -except ImportError: - _CoroutineABC = _AwaitableABC = None - - -# Check for CPython issue #21209 -def has_yield_from_bug(): - class MyGen: - def __init__(self): - self.send_args = None - def __iter__(self): - return self - def __next__(self): - return 42 - def send(self, *what): - self.send_args = what - return None - def yield_from_gen(gen): - yield from gen - value = (1, 2, 3) - gen = MyGen() - coro = yield_from_gen(gen) - next(coro) - coro.send(value) - return gen.send_args != (value,) -_YIELD_FROM_BUG = has_yield_from_bug() -del has_yield_from_bug - - def debug_wrapper(gen): # This function is called from 'sys.set_coroutine_wrapper'. # We only wrap here coroutines defined via 'async def' syntax. @@ -116,21 +69,8 @@ class CoroWrapper: def __next__(self): return self.gen.send(None) - if _YIELD_FROM_BUG: - # For for CPython issue #21209: using "yield from" and a custom - # generator, generator.send(tuple) unpacks the tuple instead of passing - # the tuple unchanged. Check if the caller is a generator using "yield - # from" to decide if the parameter should be unpacked or not. - def send(self, *value): - frame = sys._getframe() - caller = frame.f_back - assert caller.f_lasti >= 0 - if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM: - value = value[0] - return self.gen.send(value) - else: - def send(self, value): - return self.gen.send(value) + def send(self, value): + return self.gen.send(value) def throw(self, type, value=None, traceback=None): return self.gen.throw(type, value, traceback) @@ -202,7 +142,7 @@ def coroutine(func): If the coroutine is not yielded from before it is destroyed, an error message is logged. """ - if _inspect_iscoroutinefunction(func): + if inspect.iscoroutinefunction(func): # In Python 3.5 that's all we need to do for coroutines # defined with "async def". # Wrapping in CoroWrapper will happen via @@ -218,7 +158,7 @@ def coroutine(func): if (base_futures.isfuture(res) or inspect.isgenerator(res) or isinstance(res, CoroWrapper)): res = yield from res - elif _AwaitableABC is not None: + else: # If 'func' returns an Awaitable (new in 3.5) we # want to run it. try: @@ -226,15 +166,12 @@ def coroutine(func): except AttributeError: pass else: - if isinstance(res, _AwaitableABC): + if isinstance(res, Awaitable): res = yield from await_meth() return res if not _DEBUG: - if _types_coroutine is None: - wrapper = coro - else: - wrapper = _types_coroutine(coro) + wrapper = types.coroutine(coro) else: @functools.wraps(func) def wrapper(*args, **kwds): @@ -259,17 +196,14 @@ _is_coroutine = object() def iscoroutinefunction(func): """Return True if func is a decorated coroutine function.""" - return (getattr(func, '_is_coroutine', None) is _is_coroutine or - _inspect_iscoroutinefunction(func)) + return (inspect.iscoroutinefunction(func) or + getattr(func, '_is_coroutine', None) is _is_coroutine) -_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper) -if _CoroutineABC is not None: - _COROUTINE_TYPES += (_CoroutineABC,) -if _types_CoroutineType is not None: - # Prioritize native coroutine check to speed-up - # asyncio.iscoroutine. - _COROUTINE_TYPES = (_types_CoroutineType,) + _COROUTINE_TYPES +# Prioritize native coroutine check to speed-up +# asyncio.iscoroutine. +_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, + Coroutine, CoroWrapper) def iscoroutine(obj): diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py index 06bcdcc5ed1..ab818da1dfa 100644 --- a/Lib/asyncio/unix_events.py +++ b/Lib/asyncio/unix_events.py @@ -38,13 +38,6 @@ def _sighandler_noop(signum, frame): pass -try: - _fspath = os.fspath -except AttributeError: - # Python 3.5 or earlier - _fspath = lambda path: path - - class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): """Unix event loop. @@ -74,7 +67,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): Raise RuntimeError if there is a problem setting up the handler. """ if (coroutines.iscoroutine(callback) - or coroutines.iscoroutinefunction(callback)): + or coroutines.iscoroutinefunction(callback)): raise TypeError("coroutines cannot be used " "with add_signal_handler()") self._check_signal(sig) @@ -226,7 +219,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): raise ValueError( 'path and sock can not be specified at the same time') - path = _fspath(path) + path = os.fspath(path) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) try: sock.setblocking(False) @@ -260,7 +253,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): raise ValueError( 'path and sock can not be specified at the same time') - path = _fspath(path) + path = os.fspath(path) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) # Check for abstract socket. `str` and `bytes` paths are supported. @@ -272,7 +265,8 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): pass except OSError as err: # Directory may have permissions only to create socket. - logger.error('Unable to check or remove stale UNIX socket %r: %r', path, err) + logger.error('Unable to check or remove stale UNIX socket ' + '%r: %r', path, err) try: sock.bind(path) @@ -306,18 +300,6 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): return server -if hasattr(os, 'set_blocking'): - def _set_nonblocking(fd): - os.set_blocking(fd, False) -else: - import fcntl - - def _set_nonblocking(fd): - flags = fcntl.fcntl(fd, fcntl.F_GETFL) - flags = flags | os.O_NONBLOCK - fcntl.fcntl(fd, fcntl.F_SETFL, flags) - - class _UnixReadPipeTransport(transports.ReadTransport): max_size = 256 * 1024 # max bytes we read in one event loop iteration @@ -340,7 +322,7 @@ class _UnixReadPipeTransport(transports.ReadTransport): self._protocol = None raise ValueError("Pipe transport is for pipes/sockets only.") - _set_nonblocking(self._fileno) + os.set_blocking(self._fileno, False) self._loop.call_soon(self._protocol.connection_made, self) # only start reading when connection_made() has been called @@ -469,7 +451,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin, raise ValueError("Pipe transport is only for " "pipes, sockets and character devices") - _set_nonblocking(self._fileno) + os.set_blocking(self._fileno, False) self._loop.call_soon(self._protocol.connection_made, self) # On AIX, the reader trick (to be notified when the read end of the @@ -648,22 +630,6 @@ class _UnixWritePipeTransport(transports._FlowControlMixin, self._loop = None -if hasattr(os, 'set_inheritable'): - # Python 3.4 and newer - _set_inheritable = os.set_inheritable -else: - import fcntl - - def _set_inheritable(fd, inheritable): - cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1) - - old = fcntl.fcntl(fd, fcntl.F_GETFD) - if not inheritable: - fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag) - else: - fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag) - - class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport): def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): @@ -675,12 +641,6 @@ class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport): # other end). Notably this is needed on AIX, and works # just fine on other platforms. stdin, stdin_w = socket.socketpair() - - # Mark the write end of the stdin pipe as non-inheritable, - # needed by close_fds=False on Python 3.3 and older - # (Python 3.4 implements the PEP 446, socketpair returns - # non-inheritable sockets) - _set_inheritable(stdin_w.fileno(), False) self._proc = subprocess.Popen( args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, universal_newlines=False, bufsize=bufsize, **kwargs) @@ -1035,8 +995,8 @@ class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy): super().set_event_loop(loop) - if self._watcher is not None and \ - isinstance(threading.current_thread(), threading._MainThread): + if (self._watcher is not None and + isinstance(threading.current_thread(), threading._MainThread)): self._watcher.attach_loop(loop) def get_child_watcher(self): diff --git a/Lib/test/test_asyncio/test_unix_events.py b/Lib/test/test_asyncio/test_unix_events.py index 04284fa57d1..284c73d8daf 100644 --- a/Lib/test/test_asyncio/test_unix_events.py +++ b/Lib/test/test_asyncio/test_unix_events.py @@ -394,7 +394,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase): self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe.fileno.return_value = 5 - blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking') + blocking_patcher = mock.patch('os.set_blocking') blocking_patcher.start() self.addCleanup(blocking_patcher.stop) @@ -544,7 +544,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase): self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe.fileno.return_value = 5 - blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking') + blocking_patcher = mock.patch('os.set_blocking') blocking_patcher.start() self.addCleanup(blocking_patcher.stop)