bpo-32166: Drop Python 3.4 code from asyncio (#4612)

* Drop Python 3.4 code from asyncio

* Fix notes

* Add missing imports

* Restore comment

* Resort imports

* Drop Python 3.4-3.5 specific code

* Drop redunant check

* Fix tests

* Restore _COROUTINE_TYPES order

* Remove useless code
This commit is contained in:
Andrew Svetlov 2017-11-29 18:23:43 +02:00 committed by GitHub
parent 5d39e04290
commit cc83920ad2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 25 additions and 131 deletions

View File

@ -9,16 +9,14 @@ import sys
import traceback import traceback
import types import types
from collections.abc import Awaitable, Coroutine
from . import constants from . import constants
from . import events from . import events
from . import base_futures from . import base_futures
from .log import logger from .log import logger
# Opcode of "yield from" instruction
_YIELD_FROM = opcode.opmap['YIELD_FROM']
def _is_debug_mode(): def _is_debug_mode():
# If you set _DEBUG to true, @coroutine will wrap the resulting # If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That # generator objects in a CoroWrapper instance (defined below). That
@ -39,51 +37,6 @@ def _is_debug_mode():
_DEBUG = _is_debug_mode() _DEBUG = _is_debug_mode()
try:
_types_coroutine = types.coroutine
_types_CoroutineType = types.CoroutineType
except AttributeError:
# Python 3.4
_types_coroutine = None
_types_CoroutineType = None
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# Python 3.4
_inspect_iscoroutinefunction = lambda func: False
try:
from collections.abc import Coroutine as _CoroutineABC, \
Awaitable as _AwaitableABC
except ImportError:
_CoroutineABC = _AwaitableABC = None
# Check for CPython issue #21209
def has_yield_from_bug():
class MyGen:
def __init__(self):
self.send_args = None
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, *what):
self.send_args = what
return None
def yield_from_gen(gen):
yield from gen
value = (1, 2, 3)
gen = MyGen()
coro = yield_from_gen(gen)
next(coro)
coro.send(value)
return gen.send_args != (value,)
_YIELD_FROM_BUG = has_yield_from_bug()
del has_yield_from_bug
def debug_wrapper(gen): def debug_wrapper(gen):
# This function is called from 'sys.set_coroutine_wrapper'. # This function is called from 'sys.set_coroutine_wrapper'.
# We only wrap here coroutines defined via 'async def' syntax. # We only wrap here coroutines defined via 'async def' syntax.
@ -116,19 +69,6 @@ class CoroWrapper:
def __next__(self): def __next__(self):
return self.gen.send(None) return self.gen.send(None)
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
# generator, generator.send(tuple) unpacks the tuple instead of passing
# the tuple unchanged. Check if the caller is a generator using "yield
# from" to decide if the parameter should be unpacked or not.
def send(self, *value):
frame = sys._getframe()
caller = frame.f_back
assert caller.f_lasti >= 0
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
value = value[0]
return self.gen.send(value)
else:
def send(self, value): def send(self, value):
return self.gen.send(value) return self.gen.send(value)
@ -202,7 +142,7 @@ def coroutine(func):
If the coroutine is not yielded from before it is destroyed, If the coroutine is not yielded from before it is destroyed,
an error message is logged. an error message is logged.
""" """
if _inspect_iscoroutinefunction(func): if inspect.iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines # In Python 3.5 that's all we need to do for coroutines
# defined with "async def". # defined with "async def".
# Wrapping in CoroWrapper will happen via # Wrapping in CoroWrapper will happen via
@ -218,7 +158,7 @@ def coroutine(func):
if (base_futures.isfuture(res) or inspect.isgenerator(res) or if (base_futures.isfuture(res) or inspect.isgenerator(res) or
isinstance(res, CoroWrapper)): isinstance(res, CoroWrapper)):
res = yield from res res = yield from res
elif _AwaitableABC is not None: else:
# If 'func' returns an Awaitable (new in 3.5) we # If 'func' returns an Awaitable (new in 3.5) we
# want to run it. # want to run it.
try: try:
@ -226,15 +166,12 @@ def coroutine(func):
except AttributeError: except AttributeError:
pass pass
else: else:
if isinstance(res, _AwaitableABC): if isinstance(res, Awaitable):
res = yield from await_meth() res = yield from await_meth()
return res return res
if not _DEBUG: if not _DEBUG:
if _types_coroutine is None: wrapper = types.coroutine(coro)
wrapper = coro
else:
wrapper = _types_coroutine(coro)
else: else:
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwds): def wrapper(*args, **kwds):
@ -259,17 +196,14 @@ _is_coroutine = object()
def iscoroutinefunction(func): def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function.""" """Return True if func is a decorated coroutine function."""
return (getattr(func, '_is_coroutine', None) is _is_coroutine or return (inspect.iscoroutinefunction(func) or
_inspect_iscoroutinefunction(func)) getattr(func, '_is_coroutine', None) is _is_coroutine)
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
if _CoroutineABC is not None:
_COROUTINE_TYPES += (_CoroutineABC,)
if _types_CoroutineType is not None:
# Prioritize native coroutine check to speed-up # Prioritize native coroutine check to speed-up
# asyncio.iscoroutine. # asyncio.iscoroutine.
_COROUTINE_TYPES = (_types_CoroutineType,) + _COROUTINE_TYPES _COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType,
Coroutine, CoroWrapper)
def iscoroutine(obj): def iscoroutine(obj):

View File

@ -38,13 +38,6 @@ def _sighandler_noop(signum, frame):
pass pass
try:
_fspath = os.fspath
except AttributeError:
# Python 3.5 or earlier
_fspath = lambda path: path
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""Unix event loop. """Unix event loop.
@ -226,7 +219,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
raise ValueError( raise ValueError(
'path and sock can not be specified at the same time') 'path and sock can not be specified at the same time')
path = _fspath(path) path = os.fspath(path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try: try:
sock.setblocking(False) sock.setblocking(False)
@ -260,7 +253,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
raise ValueError( raise ValueError(
'path and sock can not be specified at the same time') 'path and sock can not be specified at the same time')
path = _fspath(path) path = os.fspath(path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Check for abstract socket. `str` and `bytes` paths are supported. # Check for abstract socket. `str` and `bytes` paths are supported.
@ -272,7 +265,8 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
pass pass
except OSError as err: except OSError as err:
# Directory may have permissions only to create socket. # Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX socket %r: %r', path, err) logger.error('Unable to check or remove stale UNIX socket '
'%r: %r', path, err)
try: try:
sock.bind(path) sock.bind(path)
@ -306,18 +300,6 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
return server return server
if hasattr(os, 'set_blocking'):
def _set_nonblocking(fd):
os.set_blocking(fd, False)
else:
import fcntl
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
class _UnixReadPipeTransport(transports.ReadTransport): class _UnixReadPipeTransport(transports.ReadTransport):
max_size = 256 * 1024 # max bytes we read in one event loop iteration max_size = 256 * 1024 # max bytes we read in one event loop iteration
@ -340,7 +322,7 @@ class _UnixReadPipeTransport(transports.ReadTransport):
self._protocol = None self._protocol = None
raise ValueError("Pipe transport is for pipes/sockets only.") raise ValueError("Pipe transport is for pipes/sockets only.")
_set_nonblocking(self._fileno) os.set_blocking(self._fileno, False)
self._loop.call_soon(self._protocol.connection_made, self) self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called # only start reading when connection_made() has been called
@ -469,7 +451,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
raise ValueError("Pipe transport is only for " raise ValueError("Pipe transport is only for "
"pipes, sockets and character devices") "pipes, sockets and character devices")
_set_nonblocking(self._fileno) os.set_blocking(self._fileno, False)
self._loop.call_soon(self._protocol.connection_made, self) self._loop.call_soon(self._protocol.connection_made, self)
# On AIX, the reader trick (to be notified when the read end of the # On AIX, the reader trick (to be notified when the read end of the
@ -648,22 +630,6 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
self._loop = None self._loop = None
if hasattr(os, 'set_inheritable'):
# Python 3.4 and newer
_set_inheritable = os.set_inheritable
else:
import fcntl
def _set_inheritable(fd, inheritable):
cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1)
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if not inheritable:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport): class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
@ -675,12 +641,6 @@ class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
# other end). Notably this is needed on AIX, and works # other end). Notably this is needed on AIX, and works
# just fine on other platforms. # just fine on other platforms.
stdin, stdin_w = socket.socketpair() stdin, stdin_w = socket.socketpair()
# Mark the write end of the stdin pipe as non-inheritable,
# needed by close_fds=False on Python 3.3 and older
# (Python 3.4 implements the PEP 446, socketpair returns
# non-inheritable sockets)
_set_inheritable(stdin_w.fileno(), False)
self._proc = subprocess.Popen( self._proc = subprocess.Popen(
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
universal_newlines=False, bufsize=bufsize, **kwargs) universal_newlines=False, bufsize=bufsize, **kwargs)
@ -1035,8 +995,8 @@ class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
super().set_event_loop(loop) super().set_event_loop(loop)
if self._watcher is not None and \ if (self._watcher is not None and
isinstance(threading.current_thread(), threading._MainThread): isinstance(threading.current_thread(), threading._MainThread)):
self._watcher.attach_loop(loop) self._watcher.attach_loop(loop)
def get_child_watcher(self): def get_child_watcher(self):

View File

@ -394,7 +394,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5 self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking') blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start() blocking_patcher.start()
self.addCleanup(blocking_patcher.stop) self.addCleanup(blocking_patcher.stop)
@ -544,7 +544,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5 self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking') blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start() blocking_patcher.start()
self.addCleanup(blocking_patcher.stop) self.addCleanup(blocking_patcher.stop)