bpo-32193: Convert asyncio to async/await usage (#4753)

* Convert asyncio/tasks.py to async/await

* Convert asyncio/queues.py to async/await

* Convert asyncio/test_utils.py to async/await

* Convert asyncio/base_subprocess.py to async/await

* Convert asyncio/subprocess.py to async/await

* Convert asyncio/streams.py to async/await

* Fix comments

* Convert asyncio/locks.py to async/await

* Convert asyncio.sleep to async def

* Add a comment

* Add missing news

* Convert stubs from AbstrctEventLoop to async functions

* Convert subprocess_shell/subprocess_exec

* Convert connect_read_pipe/connect_write_pip to async/await syntax

* Convert create_datagram_endpoint

* Convert create_unix_server/create_unix_connection

* Get rid of old style coroutines in unix_events.py

* Convert selector_events.py to async/await

* Convert wait_closed and create_connection

* Drop redundant line

* Convert base_events.py

* Code cleanup

* Drop redundant comments

* Fix indentation

* Add explicit tests for compatibility between old and new coroutines

* Convert windows event loop to use async/await

* Fix double awaiting of async function

* Convert asyncio/locks.py

* Improve docstring

* Convert tests to async/await

* Convert more tests

* Convert more tests

* Convert more tests

* Convert tests

* Improve test
This commit is contained in:
Andrew Svetlov 2017-12-09 00:23:48 +02:00 committed by GitHub
parent ede157331b
commit 5f841b5538
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 647 additions and 771 deletions

View File

@ -33,7 +33,6 @@ from . import coroutines
from . import events from . import events
from . import futures from . import futures
from . import tasks from . import tasks
from .coroutines import coroutine
from .log import logger from .log import logger
@ -220,13 +219,12 @@ class Server(events.AbstractServer):
if not waiter.done(): if not waiter.done():
waiter.set_result(waiter) waiter.set_result(waiter)
@coroutine async def wait_closed(self):
def wait_closed(self):
if self.sockets is None or self._waiters is None: if self.sockets is None or self._waiters is None:
return return
waiter = self._loop.create_future() waiter = self._loop.create_future()
self._waiters.append(waiter) self._waiters.append(waiter)
yield from waiter await waiter
class BaseEventLoop(events.AbstractEventLoop): class BaseEventLoop(events.AbstractEventLoop):
@ -330,8 +328,7 @@ class BaseEventLoop(events.AbstractEventLoop):
"""Create write pipe transport.""" """Create write pipe transport."""
raise NotImplementedError raise NotImplementedError
@coroutine async def _make_subprocess_transport(self, protocol, args, shell,
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize, stdin, stdout, stderr, bufsize,
extra=None, **kwargs): extra=None, **kwargs):
"""Create subprocess transport.""" """Create subprocess transport."""
@ -371,8 +368,7 @@ class BaseEventLoop(events.AbstractEventLoop):
self._asyncgens.add(agen) self._asyncgens.add(agen)
@coroutine async def shutdown_asyncgens(self):
def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators.""" """Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True self._asyncgens_shutdown_called = True
@ -384,12 +380,11 @@ class BaseEventLoop(events.AbstractEventLoop):
closing_agens = list(self._asyncgens) closing_agens = list(self._asyncgens)
self._asyncgens.clear() self._asyncgens.clear()
shutdown_coro = tasks.gather( results = await tasks.gather(
*[ag.aclose() for ag in closing_agens], *[ag.aclose() for ag in closing_agens],
return_exceptions=True, return_exceptions=True,
loop=self) loop=self)
results = yield from shutdown_coro
for result, agen in zip(results, closing_agens): for result, agen in zip(results, closing_agens):
if isinstance(result, Exception): if isinstance(result, Exception):
self.call_exception_handler({ self.call_exception_handler({
@ -671,9 +666,9 @@ class BaseEventLoop(events.AbstractEventLoop):
def getnameinfo(self, sockaddr, flags=0): def getnameinfo(self, sockaddr, flags=0):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags) return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
@coroutine async def create_connection(self, protocol_factory, host=None, port=None,
def create_connection(self, protocol_factory, host=None, port=None, *, *, ssl=None, family=0,
ssl=None, family=0, proto=0, flags=0, sock=None, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None): local_addr=None, server_hostname=None):
"""Connect to a TCP server. """Connect to a TCP server.
@ -722,7 +717,7 @@ class BaseEventLoop(events.AbstractEventLoop):
else: else:
f2 = None f2 = None
yield from tasks.wait(fs, loop=self) await tasks.wait(fs, loop=self)
infos = f1.result() infos = f1.result()
if not infos: if not infos:
@ -755,7 +750,7 @@ class BaseEventLoop(events.AbstractEventLoop):
continue continue
if self._debug: if self._debug:
logger.debug("connect %r to %r", sock, address) logger.debug("connect %r to %r", sock, address)
yield from self.sock_connect(sock, address) await self.sock_connect(sock, address)
except OSError as exc: except OSError as exc:
if sock is not None: if sock is not None:
sock.close() sock.close()
@ -793,7 +788,7 @@ class BaseEventLoop(events.AbstractEventLoop):
raise ValueError( raise ValueError(
'A Stream Socket was expected, got {!r}'.format(sock)) 'A Stream Socket was expected, got {!r}'.format(sock))
transport, protocol = yield from self._create_connection_transport( transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname) sock, protocol_factory, ssl, server_hostname)
if self._debug: if self._debug:
# Get the socket from the transport because SSL transport closes # Get the socket from the transport because SSL transport closes
@ -803,8 +798,7 @@ class BaseEventLoop(events.AbstractEventLoop):
sock, host, port, transport, protocol) sock, host, port, transport, protocol)
return transport, protocol return transport, protocol
@coroutine async def _create_connection_transport(self, sock, protocol_factory, ssl,
def _create_connection_transport(self, sock, protocol_factory, ssl,
server_hostname, server_side=False): server_hostname, server_side=False):
sock.setblocking(False) sock.setblocking(False)
@ -820,15 +814,14 @@ class BaseEventLoop(events.AbstractEventLoop):
transport = self._make_socket_transport(sock, protocol, waiter) transport = self._make_socket_transport(sock, protocol, waiter)
try: try:
yield from waiter await waiter
except: except:
transport.close() transport.close()
raise raise
return transport, protocol return transport, protocol
@coroutine async def create_datagram_endpoint(self, protocol_factory,
def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *, local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0, family=0, proto=0, flags=0,
reuse_address=None, reuse_port=None, reuse_address=None, reuse_port=None,
@ -872,7 +865,7 @@ class BaseEventLoop(events.AbstractEventLoop):
assert isinstance(addr, tuple) and len(addr) == 2, ( assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected') '2-tuple is expected')
infos = yield from _ensure_resolved( infos = await _ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM, addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self) proto=proto, flags=flags, loop=self)
if not infos: if not infos:
@ -918,7 +911,7 @@ class BaseEventLoop(events.AbstractEventLoop):
if local_addr: if local_addr:
sock.bind(local_address) sock.bind(local_address)
if remote_addr: if remote_addr:
yield from self.sock_connect(sock, remote_address) await self.sock_connect(sock, remote_address)
r_addr = remote_address r_addr = remote_address
except OSError as exc: except OSError as exc:
if sock is not None: if sock is not None:
@ -948,24 +941,22 @@ class BaseEventLoop(events.AbstractEventLoop):
remote_addr, transport, protocol) remote_addr, transport, protocol)
try: try:
yield from waiter await waiter
except: except:
transport.close() transport.close()
raise raise
return transport, protocol return transport, protocol
@coroutine async def _create_server_getaddrinfo(self, host, port, family, flags):
def _create_server_getaddrinfo(self, host, port, family, flags): infos = await _ensure_resolved((host, port), family=family,
infos = yield from _ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM, type=socket.SOCK_STREAM,
flags=flags, loop=self) flags=flags, loop=self)
if not infos: if not infos:
raise OSError('getaddrinfo({!r}) returned empty list'.format(host)) raise OSError('getaddrinfo({!r}) returned empty list'.format(host))
return infos return infos
@coroutine async def create_server(self, protocol_factory, host=None, port=None,
def create_server(self, protocol_factory, host=None, port=None,
*, *,
family=socket.AF_UNSPEC, family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE, flags=socket.AI_PASSIVE,
@ -1011,7 +1002,7 @@ class BaseEventLoop(events.AbstractEventLoop):
fs = [self._create_server_getaddrinfo(host, port, family=family, fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags) flags=flags)
for host in hosts] for host in hosts]
infos = yield from tasks.gather(*fs, loop=self) infos = await tasks.gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos)) infos = set(itertools.chain.from_iterable(infos))
completed = False completed = False
@ -1068,8 +1059,8 @@ class BaseEventLoop(events.AbstractEventLoop):
logger.info("%r is serving", server) logger.info("%r is serving", server)
return server return server
@coroutine async def connect_accepted_socket(self, protocol_factory, sock,
def connect_accepted_socket(self, protocol_factory, sock, *, ssl=None): *, ssl=None):
"""Handle an accepted connection. """Handle an accepted connection.
This is used by servers that accept connections outside of This is used by servers that accept connections outside of
@ -1082,7 +1073,7 @@ class BaseEventLoop(events.AbstractEventLoop):
raise ValueError( raise ValueError(
'A Stream Socket was expected, got {!r}'.format(sock)) 'A Stream Socket was expected, got {!r}'.format(sock))
transport, protocol = yield from self._create_connection_transport( transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True) sock, protocol_factory, ssl, '', server_side=True)
if self._debug: if self._debug:
# Get the socket from the transport because SSL transport closes # Get the socket from the transport because SSL transport closes
@ -1091,14 +1082,13 @@ class BaseEventLoop(events.AbstractEventLoop):
logger.debug("%r handled: (%r, %r)", sock, transport, protocol) logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol return transport, protocol
@coroutine async def connect_read_pipe(self, protocol_factory, pipe):
def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory() protocol = protocol_factory()
waiter = self.create_future() waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter) transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try: try:
yield from waiter await waiter
except: except:
transport.close() transport.close()
raise raise
@ -1108,14 +1098,13 @@ class BaseEventLoop(events.AbstractEventLoop):
pipe.fileno(), transport, protocol) pipe.fileno(), transport, protocol)
return transport, protocol return transport, protocol
@coroutine async def connect_write_pipe(self, protocol_factory, pipe):
def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory() protocol = protocol_factory()
waiter = self.create_future() waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter) transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try: try:
yield from waiter await waiter
except: except:
transport.close() transport.close()
raise raise
@ -1138,10 +1127,12 @@ class BaseEventLoop(events.AbstractEventLoop):
info.append('stderr=%s' % _format_pipe(stderr)) info.append('stderr=%s' % _format_pipe(stderr))
logger.debug(' '.join(info)) logger.debug(' '.join(info))
@coroutine async def subprocess_shell(self, protocol_factory, cmd, *,
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
universal_newlines=False, shell=True, bufsize=0, stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
**kwargs): **kwargs):
if not isinstance(cmd, (bytes, str)): if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string") raise ValueError("cmd must be a string")
@ -1157,14 +1148,13 @@ class BaseEventLoop(events.AbstractEventLoop):
# (password) and may be too long # (password) and may be too long
debug_log = 'run shell command %r' % cmd debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr) self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport( transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs) protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug: if self._debug:
logger.info('%s: %r', debug_log, transport) logger.info('%s: %r', debug_log, transport)
return transport, protocol return transport, protocol
@coroutine async def subprocess_exec(self, protocol_factory, program, *args,
def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False, stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0, **kwargs): shell=False, bufsize=0, **kwargs):
@ -1186,7 +1176,7 @@ class BaseEventLoop(events.AbstractEventLoop):
# (password) and may be too long # (password) and may be too long
debug_log = 'execute program %r' % program debug_log = 'execute program %r' % program
self._log_subprocess(debug_log, stdin, stdout, stderr) self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport( transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr, protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs) bufsize, **kwargs)
if self._debug: if self._debug:

View File

@ -4,7 +4,6 @@ import warnings
from . import protocols from . import protocols
from . import transports from . import transports
from .coroutines import coroutine
from .log import logger from .log import logger
@ -154,26 +153,25 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
self._check_proc() self._check_proc()
self._proc.kill() self._proc.kill()
@coroutine async def _connect_pipes(self, waiter):
def _connect_pipes(self, waiter):
try: try:
proc = self._proc proc = self._proc
loop = self._loop loop = self._loop
if proc.stdin is not None: if proc.stdin is not None:
_, pipe = yield from loop.connect_write_pipe( _, pipe = await loop.connect_write_pipe(
lambda: WriteSubprocessPipeProto(self, 0), lambda: WriteSubprocessPipeProto(self, 0),
proc.stdin) proc.stdin)
self._pipes[0] = pipe self._pipes[0] = pipe
if proc.stdout is not None: if proc.stdout is not None:
_, pipe = yield from loop.connect_read_pipe( _, pipe = await loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 1), lambda: ReadSubprocessPipeProto(self, 1),
proc.stdout) proc.stdout)
self._pipes[1] = pipe self._pipes[1] = pipe
if proc.stderr is not None: if proc.stderr is not None:
_, pipe = yield from loop.connect_read_pipe( _, pipe = await loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 2), lambda: ReadSubprocessPipeProto(self, 2),
proc.stderr) proc.stderr)
self._pipes[2] = pipe self._pipes[2] = pipe
@ -224,8 +222,7 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
waiter.set_result(returncode) waiter.set_result(returncode)
self._exit_waiters = None self._exit_waiters = None
@coroutine async def _wait(self):
def _wait(self):
"""Wait until the process exit and return the process return code. """Wait until the process exit and return the process return code.
This method is a coroutine.""" This method is a coroutine."""
@ -234,7 +231,7 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
waiter = self._loop.create_future() waiter = self._loop.create_future()
self._exit_waiters.append(waiter) self._exit_waiters.append(waiter)
return (yield from waiter) return await waiter
def _try_finish(self): def _try_finish(self):
assert not self._finished assert not self._finished

View File

@ -219,7 +219,7 @@ class AbstractServer:
"""Stop serving. This leaves existing connections open.""" """Stop serving. This leaves existing connections open."""
return NotImplemented return NotImplemented
def wait_closed(self): async def wait_closed(self):
"""Coroutine to wait until service is closed.""" """Coroutine to wait until service is closed."""
return NotImplemented return NotImplemented
@ -267,7 +267,7 @@ class AbstractEventLoop:
""" """
raise NotImplementedError raise NotImplementedError
def shutdown_asyncgens(self): async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators.""" """Shutdown all active asynchronous generators."""
raise NotImplementedError raise NotImplementedError
@ -302,7 +302,7 @@ class AbstractEventLoop:
def call_soon_threadsafe(self, callback, *args): def call_soon_threadsafe(self, callback, *args):
raise NotImplementedError raise NotImplementedError
def run_in_executor(self, executor, func, *args): async def run_in_executor(self, executor, func, *args):
raise NotImplementedError raise NotImplementedError
def set_default_executor(self, executor): def set_default_executor(self, executor):
@ -310,21 +310,23 @@ class AbstractEventLoop:
# Network I/O methods returning Futures. # Network I/O methods returning Futures.
def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0): async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
raise NotImplementedError raise NotImplementedError
def getnameinfo(self, sockaddr, flags=0): async def getnameinfo(self, sockaddr, flags=0):
raise NotImplementedError raise NotImplementedError
def create_connection(self, protocol_factory, host=None, port=None, *, async def create_connection(self, protocol_factory, host=None, port=None,
ssl=None, family=0, proto=0, flags=0, sock=None, *, ssl=None, family=0, proto=0,
local_addr=None, server_hostname=None): flags=0, sock=None, local_addr=None,
server_hostname=None):
raise NotImplementedError raise NotImplementedError
def create_server(self, protocol_factory, host=None, port=None, *, async def create_server(self, protocol_factory, host=None, port=None,
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, *, family=socket.AF_UNSPEC,
sock=None, backlog=100, ssl=None, reuse_address=None, flags=socket.AI_PASSIVE, sock=None, backlog=100,
reuse_port=None): ssl=None, reuse_address=None, reuse_port=None):
"""A coroutine which creates a TCP server bound to host and port. """A coroutine which creates a TCP server bound to host and port.
The return value is a Server object which can be used to stop The return value is a Server object which can be used to stop
@ -362,12 +364,12 @@ class AbstractEventLoop:
""" """
raise NotImplementedError raise NotImplementedError
def create_unix_connection(self, protocol_factory, path=None, *, async def create_unix_connection(self, protocol_factory, path=None, *,
ssl=None, sock=None, ssl=None, sock=None,
server_hostname=None): server_hostname=None):
raise NotImplementedError raise NotImplementedError
def create_unix_server(self, protocol_factory, path=None, *, async def create_unix_server(self, protocol_factory, path=None, *,
sock=None, backlog=100, ssl=None): sock=None, backlog=100, ssl=None):
"""A coroutine which creates a UNIX Domain Socket server. """A coroutine which creates a UNIX Domain Socket server.
@ -388,7 +390,7 @@ class AbstractEventLoop:
""" """
raise NotImplementedError raise NotImplementedError
def create_datagram_endpoint(self, protocol_factory, async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *, local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0, family=0, proto=0, flags=0,
reuse_address=None, reuse_port=None, reuse_address=None, reuse_port=None,
@ -425,7 +427,7 @@ class AbstractEventLoop:
# Pipes and subprocesses. # Pipes and subprocesses.
def connect_read_pipe(self, protocol_factory, pipe): async def connect_read_pipe(self, protocol_factory, pipe):
"""Register read pipe in event loop. Set the pipe to non-blocking mode. """Register read pipe in event loop. Set the pipe to non-blocking mode.
protocol_factory should instantiate object with Protocol interface. protocol_factory should instantiate object with Protocol interface.
@ -438,7 +440,7 @@ class AbstractEventLoop:
# close fd in pipe transport then close f and vise versa. # close fd in pipe transport then close f and vise versa.
raise NotImplementedError raise NotImplementedError
def connect_write_pipe(self, protocol_factory, pipe): async def connect_write_pipe(self, protocol_factory, pipe):
"""Register write pipe in event loop. """Register write pipe in event loop.
protocol_factory should instantiate object with BaseProtocol interface. protocol_factory should instantiate object with BaseProtocol interface.
@ -451,13 +453,17 @@ class AbstractEventLoop:
# close fd in pipe transport then close f and vise versa. # close fd in pipe transport then close f and vise versa.
raise NotImplementedError raise NotImplementedError
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE, async def subprocess_shell(self, protocol_factory, cmd, *,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs): **kwargs):
raise NotImplementedError raise NotImplementedError
def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE, async def subprocess_exec(self, protocol_factory, *args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs): **kwargs):
raise NotImplementedError raise NotImplementedError
@ -480,19 +486,19 @@ class AbstractEventLoop:
# Completion based I/O methods returning Futures. # Completion based I/O methods returning Futures.
def sock_recv(self, sock, nbytes): async def sock_recv(self, sock, nbytes):
raise NotImplementedError raise NotImplementedError
def sock_recv_into(self, sock, buf): async def sock_recv_into(self, sock, buf):
raise NotImplementedError raise NotImplementedError
def sock_sendall(self, sock, data): async def sock_sendall(self, sock, data):
raise NotImplementedError raise NotImplementedError
def sock_connect(self, sock, address): async def sock_connect(self, sock, address):
raise NotImplementedError raise NotImplementedError
def sock_accept(self, sock): async def sock_accept(self, sock):
raise NotImplementedError raise NotImplementedError
# Signal handling. # Signal handling.

View File

@ -66,20 +66,21 @@ class _ContextManagerMixin:
yield from self.acquire() yield from self.acquire()
return _ContextManager(self) return _ContextManager(self)
def __await__(self): async def __acquire_ctx(self):
# To make "with await lock" work. await self.acquire()
yield from self.acquire()
return _ContextManager(self) return _ContextManager(self)
@coroutine def __await__(self):
def __aenter__(self): # To make "with await lock" work.
yield from self.acquire() return self.__acquire_ctx().__await__()
async def __aenter__(self):
await self.acquire()
# We have no use for the "as ..." clause in the with # We have no use for the "as ..." clause in the with
# statement for locks. # statement for locks.
return None return None
@coroutine async def __aexit__(self, exc_type, exc, tb):
def __aexit__(self, exc_type, exc, tb):
self.release() self.release()
@ -156,8 +157,7 @@ class Lock(_ContextManagerMixin):
"""Return True if lock is acquired.""" """Return True if lock is acquired."""
return self._locked return self._locked
@coroutine async def acquire(self):
def acquire(self):
"""Acquire a lock. """Acquire a lock.
This method blocks until the lock is unlocked, then sets it to This method blocks until the lock is unlocked, then sets it to
@ -170,7 +170,7 @@ class Lock(_ContextManagerMixin):
fut = self._loop.create_future() fut = self._loop.create_future()
self._waiters.append(fut) self._waiters.append(fut)
try: try:
yield from fut await fut
self._locked = True self._locked = True
return True return True
except futures.CancelledError: except futures.CancelledError:
@ -251,8 +251,7 @@ class Event:
to true again.""" to true again."""
self._value = False self._value = False
@coroutine async def wait(self):
def wait(self):
"""Block until the internal flag is true. """Block until the internal flag is true.
If the internal flag is true on entry, return True If the internal flag is true on entry, return True
@ -265,7 +264,7 @@ class Event:
fut = self._loop.create_future() fut = self._loop.create_future()
self._waiters.append(fut) self._waiters.append(fut)
try: try:
yield from fut await fut
return True return True
finally: finally:
self._waiters.remove(fut) self._waiters.remove(fut)
@ -307,8 +306,7 @@ class Condition(_ContextManagerMixin):
extra = '{},waiters:{}'.format(extra, len(self._waiters)) extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra) return '<{} [{}]>'.format(res[1:-1], extra)
@coroutine async def wait(self):
def wait(self):
"""Wait until notified. """Wait until notified.
If the calling coroutine has not acquired the lock when this If the calling coroutine has not acquired the lock when this
@ -327,7 +325,7 @@ class Condition(_ContextManagerMixin):
fut = self._loop.create_future() fut = self._loop.create_future()
self._waiters.append(fut) self._waiters.append(fut)
try: try:
yield from fut await fut
return True return True
finally: finally:
self._waiters.remove(fut) self._waiters.remove(fut)
@ -336,13 +334,12 @@ class Condition(_ContextManagerMixin):
# Must reacquire lock even if wait is cancelled # Must reacquire lock even if wait is cancelled
while True: while True:
try: try:
yield from self.acquire() await self.acquire()
break break
except futures.CancelledError: except futures.CancelledError:
pass pass
@coroutine async def wait_for(self, predicate):
def wait_for(self, predicate):
"""Wait until a predicate becomes true. """Wait until a predicate becomes true.
The predicate should be a callable which result will be The predicate should be a callable which result will be
@ -351,7 +348,7 @@ class Condition(_ContextManagerMixin):
""" """
result = predicate() result = predicate()
while not result: while not result:
yield from self.wait() await self.wait()
result = predicate() result = predicate()
return result return result
@ -432,8 +429,7 @@ class Semaphore(_ContextManagerMixin):
"""Returns True if semaphore can not be acquired immediately.""" """Returns True if semaphore can not be acquired immediately."""
return self._value == 0 return self._value == 0
@coroutine async def acquire(self):
def acquire(self):
"""Acquire a semaphore. """Acquire a semaphore.
If the internal counter is larger than zero on entry, If the internal counter is larger than zero on entry,
@ -446,7 +442,7 @@ class Semaphore(_ContextManagerMixin):
fut = self._loop.create_future() fut = self._loop.create_future()
self._waiters.append(fut) self._waiters.append(fut)
try: try:
yield from fut await fut
except: except:
# See the similar code in Queue.get. # See the similar code in Queue.get.
fut.cancel() fut.cancel()

View File

@ -7,7 +7,6 @@ import heapq
from . import events from . import events
from . import locks from . import locks
from .coroutines import coroutine
class QueueEmpty(Exception): class QueueEmpty(Exception):
@ -28,7 +27,7 @@ class Queue:
"""A queue, useful for coordinating producer and consumer coroutines. """A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "yield from put()" will block when the is an integer greater than 0, then "await put()" will block when the
queue reaches maxsize, until an item is removed by get(). queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size Unlike the standard library Queue, you can reliably know this Queue's size
@ -116,20 +115,17 @@ class Queue:
else: else:
return self.qsize() >= self._maxsize return self.qsize() >= self._maxsize
@coroutine async def put(self, item):
def put(self, item):
"""Put an item into the queue. """Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item. slot is available before adding item.
This method is a coroutine.
""" """
while self.full(): while self.full():
putter = self._loop.create_future() putter = self._loop.create_future()
self._putters.append(putter) self._putters.append(putter)
try: try:
yield from putter await putter
except: except:
putter.cancel() # Just in case putter is not done yet. putter.cancel() # Just in case putter is not done yet.
if not self.full() and not putter.cancelled(): if not self.full() and not putter.cancelled():
@ -151,19 +147,16 @@ class Queue:
self._finished.clear() self._finished.clear()
self._wakeup_next(self._getters) self._wakeup_next(self._getters)
@coroutine async def get(self):
def get(self):
"""Remove and return an item from the queue. """Remove and return an item from the queue.
If queue is empty, wait until an item is available. If queue is empty, wait until an item is available.
This method is a coroutine.
""" """
while self.empty(): while self.empty():
getter = self._loop.create_future() getter = self._loop.create_future()
self._getters.append(getter) self._getters.append(getter)
try: try:
yield from getter await getter
except: except:
getter.cancel() # Just in case getter is not done yet. getter.cancel() # Just in case getter is not done yet.
@ -210,8 +203,7 @@ class Queue:
if self._unfinished_tasks == 0: if self._unfinished_tasks == 0:
self._finished.set() self._finished.set()
@coroutine async def join(self):
def join(self):
"""Block until all items in the queue have been gotten and processed. """Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the The count of unfinished tasks goes up whenever an item is added to the
@ -220,7 +212,7 @@ class Queue:
When the count of unfinished tasks drops to zero, join() unblocks. When the count of unfinished tasks drops to zero, join() unblocks.
""" """
if self._unfinished_tasks > 0: if self._unfinished_tasks > 0:
yield from self._finished.wait() await self._finished.wait()
class PriorityQueue(Queue): class PriorityQueue(Queue):

View File

@ -24,7 +24,6 @@ from . import events
from . import futures from . import futures
from . import transports from . import transports
from . import sslproto from . import sslproto
from .coroutines import coroutine
from .log import logger from .log import logger
@ -189,8 +188,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
sslcontext, server) sslcontext, server)
self.create_task(accept) self.create_task(accept)
@coroutine async def _accept_connection2(self, protocol_factory, conn, extra,
def _accept_connection2(self, protocol_factory, conn, extra,
sslcontext=None, server=None): sslcontext=None, server=None):
protocol = None protocol = None
transport = None transport = None
@ -207,7 +205,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
server=server) server=server)
try: try:
yield from waiter await waiter
except: except:
transport.close() transport.close()
raise raise
@ -452,8 +450,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
fd = sock.fileno() fd = sock.fileno()
self.add_writer(fd, self._sock_sendall, fut, fd, sock, data) self.add_writer(fd, self._sock_sendall, fut, fd, sock, data)
@coroutine async def sock_connect(self, sock, address):
def sock_connect(self, sock, address):
"""Connect to a remote socket at address. """Connect to a remote socket at address.
This method is a coroutine. This method is a coroutine.
@ -465,12 +462,12 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
resolved = base_events._ensure_resolved( resolved = base_events._ensure_resolved(
address, family=sock.family, proto=sock.proto, loop=self) address, family=sock.family, proto=sock.proto, loop=self)
if not resolved.done(): if not resolved.done():
yield from resolved await resolved
_, _, _, _, address = resolved.result()[0] _, _, _, _, address = resolved.result()[0]
fut = self.create_future() fut = self.create_future()
self._sock_connect(fut, sock, address) self._sock_connect(fut, sock, address)
return (yield from fut) return await fut
def _sock_connect(self, fut, sock, address): def _sock_connect(self, fut, sock, address):
fd = sock.fileno() fd = sock.fileno()

View File

@ -14,8 +14,8 @@ if hasattr(socket, 'AF_UNIX'):
from . import coroutines from . import coroutines
from . import events from . import events
from . import protocols from . import protocols
from .coroutines import coroutine
from .log import logger from .log import logger
from .tasks import sleep
_DEFAULT_LIMIT = 2 ** 16 _DEFAULT_LIMIT = 2 ** 16
@ -52,8 +52,7 @@ class LimitOverrunError(Exception):
return type(self), (self.args[0], self.consumed) return type(self), (self.args[0], self.consumed)
@coroutine async def open_connection(host=None, port=None, *,
def open_connection(host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds): loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""A wrapper for create_connection() returning a (reader, writer) pair. """A wrapper for create_connection() returning a (reader, writer) pair.
@ -76,14 +75,13 @@ def open_connection(host=None, port=None, *,
loop = events.get_event_loop() loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop) reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop) protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = yield from loop.create_connection( transport, _ = await loop.create_connection(
lambda: protocol, host, port, **kwds) lambda: protocol, host, port, **kwds)
writer = StreamWriter(transport, protocol, reader, loop) writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer return reader, writer
@coroutine async def start_server(client_connected_cb, host=None, port=None, *,
def start_server(client_connected_cb, host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds): loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Start a socket server, call back for each client connected. """Start a socket server, call back for each client connected.
@ -115,27 +113,25 @@ def start_server(client_connected_cb, host=None, port=None, *,
loop=loop) loop=loop)
return protocol return protocol
return (yield from loop.create_server(factory, host, port, **kwds)) return await loop.create_server(factory, host, port, **kwds)
if hasattr(socket, 'AF_UNIX'): if hasattr(socket, 'AF_UNIX'):
# UNIX Domain Sockets are supported on this platform # UNIX Domain Sockets are supported on this platform
@coroutine async def open_unix_connection(path=None, *,
def open_unix_connection(path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds): loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `open_connection` but works with UNIX Domain Sockets.""" """Similar to `open_connection` but works with UNIX Domain Sockets."""
if loop is None: if loop is None:
loop = events.get_event_loop() loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop) reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop) protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = yield from loop.create_unix_connection( transport, _ = await loop.create_unix_connection(
lambda: protocol, path, **kwds) lambda: protocol, path, **kwds)
writer = StreamWriter(transport, protocol, reader, loop) writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer return reader, writer
@coroutine async def start_unix_server(client_connected_cb, path=None, *,
def start_unix_server(client_connected_cb, path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds): loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `start_server` but works with UNIX Domain Sockets.""" """Similar to `start_server` but works with UNIX Domain Sockets."""
if loop is None: if loop is None:
@ -147,7 +143,7 @@ if hasattr(socket, 'AF_UNIX'):
loop=loop) loop=loop)
return protocol return protocol
return (yield from loop.create_unix_server(factory, path, **kwds)) return await loop.create_unix_server(factory, path, **kwds)
class FlowControlMixin(protocols.Protocol): class FlowControlMixin(protocols.Protocol):
@ -203,8 +199,7 @@ class FlowControlMixin(protocols.Protocol):
else: else:
waiter.set_exception(exc) waiter.set_exception(exc)
@coroutine async def _drain_helper(self):
def _drain_helper(self):
if self._connection_lost: if self._connection_lost:
raise ConnectionResetError('Connection lost') raise ConnectionResetError('Connection lost')
if not self._paused: if not self._paused:
@ -213,7 +208,7 @@ class FlowControlMixin(protocols.Protocol):
assert waiter is None or waiter.cancelled() assert waiter is None or waiter.cancelled()
waiter = self._loop.create_future() waiter = self._loop.create_future()
self._drain_waiter = waiter self._drain_waiter = waiter
yield from waiter await waiter
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol): class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
@ -313,14 +308,13 @@ class StreamWriter:
def get_extra_info(self, name, default=None): def get_extra_info(self, name, default=None):
return self._transport.get_extra_info(name, default) return self._transport.get_extra_info(name, default)
@coroutine async def drain(self):
def drain(self):
"""Flush the write buffer. """Flush the write buffer.
The intended use is to write The intended use is to write
w.write(data) w.write(data)
yield from w.drain() await w.drain()
""" """
if self._reader is not None: if self._reader is not None:
exc = self._reader.exception() exc = self._reader.exception()
@ -331,11 +325,11 @@ class StreamWriter:
# Yield to the event loop so connection_lost() may be # Yield to the event loop so connection_lost() may be
# called. Without this, _drain_helper() would return # called. Without this, _drain_helper() would return
# immediately, and code that calls # immediately, and code that calls
# write(...); yield from drain() # write(...); await drain()
# in a loop would never call connection_lost(), so it # in a loop would never call connection_lost(), so it
# would not see an error when the socket is closed. # would not see an error when the socket is closed.
yield await sleep(0, loop=self._loop)
yield from self._protocol._drain_helper() await self._protocol._drain_helper()
class StreamReader: class StreamReader:
@ -436,8 +430,7 @@ class StreamReader:
else: else:
self._paused = True self._paused = True
@coroutine async def _wait_for_data(self, func_name):
def _wait_for_data(self, func_name):
"""Wait until feed_data() or feed_eof() is called. """Wait until feed_data() or feed_eof() is called.
If stream was paused, automatically resume it. If stream was paused, automatically resume it.
@ -460,12 +453,11 @@ class StreamReader:
self._waiter = self._loop.create_future() self._waiter = self._loop.create_future()
try: try:
yield from self._waiter await self._waiter
finally: finally:
self._waiter = None self._waiter = None
@coroutine async def readline(self):
def readline(self):
"""Read chunk of data from the stream until newline (b'\n') is found. """Read chunk of data from the stream until newline (b'\n') is found.
On success, return chunk that ends with newline. If only partial On success, return chunk that ends with newline. If only partial
@ -484,7 +476,7 @@ class StreamReader:
sep = b'\n' sep = b'\n'
seplen = len(sep) seplen = len(sep)
try: try:
line = yield from self.readuntil(sep) line = await self.readuntil(sep)
except IncompleteReadError as e: except IncompleteReadError as e:
return e.partial return e.partial
except LimitOverrunError as e: except LimitOverrunError as e:
@ -496,8 +488,7 @@ class StreamReader:
raise ValueError(e.args[0]) raise ValueError(e.args[0])
return line return line
@coroutine async def readuntil(self, separator=b'\n'):
def readuntil(self, separator=b'\n'):
"""Read data from the stream until ``separator`` is found. """Read data from the stream until ``separator`` is found.
On success, the data and separator will be removed from the On success, the data and separator will be removed from the
@ -577,7 +568,7 @@ class StreamReader:
raise IncompleteReadError(chunk, None) raise IncompleteReadError(chunk, None)
# _wait_for_data() will resume reading if stream was paused. # _wait_for_data() will resume reading if stream was paused.
yield from self._wait_for_data('readuntil') await self._wait_for_data('readuntil')
if isep > self._limit: if isep > self._limit:
raise LimitOverrunError( raise LimitOverrunError(
@ -588,8 +579,7 @@ class StreamReader:
self._maybe_resume_transport() self._maybe_resume_transport()
return bytes(chunk) return bytes(chunk)
@coroutine async def read(self, n=-1):
def read(self, n=-1):
"""Read up to `n` bytes from the stream. """Read up to `n` bytes from the stream.
If n is not provided, or set to -1, read until EOF and return all read If n is not provided, or set to -1, read until EOF and return all read
@ -623,14 +613,14 @@ class StreamReader:
# bytes. So just call self.read(self._limit) until EOF. # bytes. So just call self.read(self._limit) until EOF.
blocks = [] blocks = []
while True: while True:
block = yield from self.read(self._limit) block = await self.read(self._limit)
if not block: if not block:
break break
blocks.append(block) blocks.append(block)
return b''.join(blocks) return b''.join(blocks)
if not self._buffer and not self._eof: if not self._buffer and not self._eof:
yield from self._wait_for_data('read') await self._wait_for_data('read')
# This will work right even if buffer is less than n bytes # This will work right even if buffer is less than n bytes
data = bytes(self._buffer[:n]) data = bytes(self._buffer[:n])
@ -639,8 +629,7 @@ class StreamReader:
self._maybe_resume_transport() self._maybe_resume_transport()
return data return data
@coroutine async def readexactly(self, n):
def readexactly(self, n):
"""Read exactly `n` bytes. """Read exactly `n` bytes.
Raise an IncompleteReadError if EOF is reached before `n` bytes can be Raise an IncompleteReadError if EOF is reached before `n` bytes can be
@ -670,7 +659,7 @@ class StreamReader:
self._buffer.clear() self._buffer.clear()
raise IncompleteReadError(incomplete, n) raise IncompleteReadError(incomplete, n)
yield from self._wait_for_data('readexactly') await self._wait_for_data('readexactly')
if len(self._buffer) == n: if len(self._buffer) == n:
data = bytes(self._buffer) data = bytes(self._buffer)
@ -684,9 +673,8 @@ class StreamReader:
def __aiter__(self): def __aiter__(self):
return self return self
@coroutine async def __anext__(self):
def __anext__(self): val = await self.readline()
val = yield from self.readline()
if val == b'': if val == b'':
raise StopAsyncIteration raise StopAsyncIteration
return val return val

View File

@ -6,7 +6,6 @@ from . import events
from . import protocols from . import protocols
from . import streams from . import streams
from . import tasks from . import tasks
from .coroutines import coroutine
from .log import logger from .log import logger
@ -121,12 +120,9 @@ class Process:
def returncode(self): def returncode(self):
return self._transport.get_returncode() return self._transport.get_returncode()
@coroutine async def wait(self):
def wait(self): """Wait until the process exit and return the process return code."""
"""Wait until the process exit and return the process return code. return await self._transport._wait()
This method is a coroutine."""
return (yield from self._transport._wait())
def send_signal(self, signal): def send_signal(self, signal):
self._transport.send_signal(signal) self._transport.send_signal(signal)
@ -137,15 +133,14 @@ class Process:
def kill(self): def kill(self):
self._transport.kill() self._transport.kill()
@coroutine async def _feed_stdin(self, input):
def _feed_stdin(self, input):
debug = self._loop.get_debug() debug = self._loop.get_debug()
self.stdin.write(input) self.stdin.write(input)
if debug: if debug:
logger.debug('%r communicate: feed stdin (%s bytes)', logger.debug('%r communicate: feed stdin (%s bytes)',
self, len(input)) self, len(input))
try: try:
yield from self.stdin.drain() await self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc: except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError # communicate() ignores BrokenPipeError and ConnectionResetError
if debug: if debug:
@ -155,12 +150,10 @@ class Process:
logger.debug('%r communicate: close stdin', self) logger.debug('%r communicate: close stdin', self)
self.stdin.close() self.stdin.close()
@coroutine async def _noop(self):
def _noop(self):
return None return None
@coroutine async def _read_stream(self, fd):
def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd) transport = self._transport.get_pipe_transport(fd)
if fd == 2: if fd == 2:
stream = self.stderr stream = self.stderr
@ -170,15 +163,14 @@ class Process:
if self._loop.get_debug(): if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr' name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name) logger.debug('%r communicate: read %s', self, name)
output = yield from stream.read() output = await stream.read()
if self._loop.get_debug(): if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr' name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name) logger.debug('%r communicate: close %s', self, name)
transport.close() transport.close()
return output return output
@coroutine async def communicate(self, input=None):
def communicate(self, input=None):
if input is not None: if input is not None:
stdin = self._feed_stdin(input) stdin = self._feed_stdin(input)
else: else:
@ -191,34 +183,34 @@ class Process:
stderr = self._read_stream(2) stderr = self._read_stream(2)
else: else:
stderr = self._noop() stderr = self._noop()
stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr, stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr,
loop=self._loop) loop=self._loop)
yield from self.wait() await self.wait()
return (stdout, stderr) return (stdout, stderr)
@coroutine async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None, loop=None, limit=streams._DEFAULT_LIMIT,
loop=None, limit=streams._DEFAULT_LIMIT, **kwds): **kwds):
if loop is None: if loop is None:
loop = events.get_event_loop() loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop) loop=loop)
transport, protocol = yield from loop.subprocess_shell( transport, protocol = await loop.subprocess_shell(
protocol_factory, protocol_factory,
cmd, stdin=stdin, stdout=stdout, cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds) stderr=stderr, **kwds)
return Process(transport, protocol, loop) return Process(transport, protocol, loop)
@coroutine
def create_subprocess_exec(program, *args, stdin=None, stdout=None, async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None, stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds): limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None: if loop is None:
loop = events.get_event_loop() loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop) loop=loop)
transport, protocol = yield from loop.subprocess_exec( transport, protocol = await loop.subprocess_exec(
protocol_factory, protocol_factory,
program, *args, program, *args,
stdin=stdin, stdout=stdout, stdin=stdin, stdout=stdout,

View File

@ -9,6 +9,7 @@ __all__ = ['Task',
import concurrent.futures import concurrent.futures
import functools import functools
import inspect import inspect
import types
import warnings import warnings
import weakref import weakref
@ -276,8 +277,7 @@ FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
ALL_COMPLETED = concurrent.futures.ALL_COMPLETED ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
@coroutine async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the Futures and coroutines given by fs to complete. """Wait for the Futures and coroutines given by fs to complete.
The sequence futures must not be empty. The sequence futures must not be empty.
@ -288,7 +288,7 @@ def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
Usage: Usage:
done, pending = yield from asyncio.wait(fs) done, pending = await asyncio.wait(fs)
Note: This does not raise TimeoutError! Futures that aren't done Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set. when the timeout occurs are returned in the second set.
@ -305,7 +305,7 @@ def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
fs = {ensure_future(f, loop=loop) for f in set(fs)} fs = {ensure_future(f, loop=loop) for f in set(fs)}
return (yield from _wait(fs, timeout, return_when, loop)) return await _wait(fs, timeout, return_when, loop)
def _release_waiter(waiter, *args): def _release_waiter(waiter, *args):
@ -313,8 +313,7 @@ def _release_waiter(waiter, *args):
waiter.set_result(None) waiter.set_result(None)
@coroutine async def wait_for(fut, timeout, *, loop=None):
def wait_for(fut, timeout, *, loop=None):
"""Wait for the single Future or coroutine to complete, with timeout. """Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task. Coroutine will be wrapped in Task.
@ -331,7 +330,7 @@ def wait_for(fut, timeout, *, loop=None):
loop = events.get_event_loop() loop = events.get_event_loop()
if timeout is None: if timeout is None:
return (yield from fut) return await fut
if timeout <= 0: if timeout <= 0:
fut = ensure_future(fut, loop=loop) fut = ensure_future(fut, loop=loop)
@ -352,7 +351,7 @@ def wait_for(fut, timeout, *, loop=None):
try: try:
# wait until the future completes or the timeout # wait until the future completes or the timeout
try: try:
yield from waiter await waiter
except futures.CancelledError: except futures.CancelledError:
fut.remove_done_callback(cb) fut.remove_done_callback(cb)
fut.cancel() fut.cancel()
@ -368,8 +367,7 @@ def wait_for(fut, timeout, *, loop=None):
timeout_handle.cancel() timeout_handle.cancel()
@coroutine async def _wait(fs, timeout, return_when, loop):
def _wait(fs, timeout, return_when, loop):
"""Internal helper for wait() and wait_for(). """Internal helper for wait() and wait_for().
The fs argument must be a collection of Futures. The fs argument must be a collection of Futures.
@ -397,7 +395,7 @@ def _wait(fs, timeout, return_when, loop):
f.add_done_callback(_on_completion) f.add_done_callback(_on_completion)
try: try:
yield from waiter await waiter
finally: finally:
if timeout_handle is not None: if timeout_handle is not None:
timeout_handle.cancel() timeout_handle.cancel()
@ -423,10 +421,10 @@ def as_completed(fs, *, loop=None, timeout=None):
This differs from PEP 3148; the proper way to use this is: This differs from PEP 3148; the proper way to use this is:
for f in as_completed(fs): for f in as_completed(fs):
result = yield from f # The 'yield from' may raise. result = await f # The 'await' may raise.
# Use result. # Use result.
If a timeout is specified, the 'yield from' will raise If a timeout is specified, the 'await' will raise
TimeoutError when the timeout occurs before all Futures are done. TimeoutError when the timeout occurs before all Futures are done.
Note: The futures 'f' are not necessarily members of fs. Note: The futures 'f' are not necessarily members of fs.
@ -453,9 +451,8 @@ def as_completed(fs, *, loop=None, timeout=None):
if not todo and timeout_handle is not None: if not todo and timeout_handle is not None:
timeout_handle.cancel() timeout_handle.cancel()
@coroutine async def _wait_for_one():
def _wait_for_one(): f = await done.get()
f = yield from done.get()
if f is None: if f is None:
# Dummy value from _on_timeout(). # Dummy value from _on_timeout().
raise futures.TimeoutError raise futures.TimeoutError
@ -469,11 +466,22 @@ def as_completed(fs, *, loop=None, timeout=None):
yield _wait_for_one() yield _wait_for_one()
@coroutine @types.coroutine
def sleep(delay, result=None, *, loop=None): def __sleep0():
"""Skip one event loop run cycle.
This is a private helper for 'asyncio.sleep()', used
when the 'delay' is set to 0. It uses a bare 'yield'
expression (which Task._step knows how to handle)
instead of creating a Future object.
"""
yield
async def sleep(delay, result=None, *, loop=None):
"""Coroutine that completes after a given time (in seconds).""" """Coroutine that completes after a given time (in seconds)."""
if delay == 0: if delay == 0:
yield await __sleep0()
return result return result
if loop is None: if loop is None:
@ -483,7 +491,7 @@ def sleep(delay, result=None, *, loop=None):
futures._set_result_unless_cancelled, futures._set_result_unless_cancelled,
future, result) future, result)
try: try:
return (yield from future) return await future
finally: finally:
h.cancel() h.cancel()
@ -652,11 +660,11 @@ def shield(arg, *, loop=None):
The statement The statement
res = yield from shield(something()) res = await shield(something())
is exactly equivalent to the statement is exactly equivalent to the statement
res = yield from something() res = await something()
*except* that if the coroutine containing it is cancelled, the *except* that if the coroutine containing it is cancelled, the
task running in something() is not cancelled. From the POV of task running in something() is not cancelled. From the POV of
@ -669,7 +677,7 @@ def shield(arg, *, loop=None):
you can combine shield() with a try/except clause, as follows: you can combine shield() with a try/except clause, as follows:
try: try:
res = yield from shield(something()) res = await shield(something())
except CancelledError: except CancelledError:
res = None res = None
""" """

View File

@ -30,7 +30,6 @@ from . import base_events
from . import events from . import events
from . import futures from . import futures
from . import tasks from . import tasks
from .coroutines import coroutine
from .log import logger from .log import logger
from test import support from test import support
@ -43,8 +42,7 @@ def dummy_ssl_context():
def run_briefly(loop): def run_briefly(loop):
@coroutine async def once():
def once():
pass pass
gen = once() gen = once()
t = loop.create_task(gen) t = loop.create_task(gen)

View File

@ -20,7 +20,6 @@ from . import events
from . import futures from . import futures
from . import selector_events from . import selector_events
from . import transports from . import transports
from .coroutines import coroutine
from .log import logger from .log import logger
@ -168,8 +167,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
extra=None): extra=None):
return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra) return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
@coroutine async def _make_subprocess_transport(self, protocol, args, shell,
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize, stdin, stdout, stderr, bufsize,
extra=None, **kwargs): extra=None, **kwargs):
with events.get_child_watcher() as watcher: with events.get_child_watcher() as watcher:
@ -182,27 +180,18 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
watcher.add_child_handler(transp.get_pid(), watcher.add_child_handler(transp.get_pid(),
self._child_watcher_callback, transp) self._child_watcher_callback, transp)
try: try:
yield from waiter await waiter
except Exception as exc: except Exception:
# Workaround CPython bug #23353: using yield/yield-from in an
# except block of a generator doesn't clear properly
# sys.exc_info()
err = exc
else:
err = None
if err is not None:
transp.close() transp.close()
yield from transp._wait() await transp._wait()
raise err raise
return transp return transp
def _child_watcher_callback(self, pid, returncode, transp): def _child_watcher_callback(self, pid, returncode, transp):
self.call_soon_threadsafe(transp._process_exited, returncode) self.call_soon_threadsafe(transp._process_exited, returncode)
@coroutine async def create_unix_connection(self, protocol_factory, path=None, *,
def create_unix_connection(self, protocol_factory, path=None, *,
ssl=None, sock=None, ssl=None, sock=None,
server_hostname=None): server_hostname=None):
assert server_hostname is None or isinstance(server_hostname, str) assert server_hostname is None or isinstance(server_hostname, str)
@ -223,7 +212,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try: try:
sock.setblocking(False) sock.setblocking(False)
yield from self.sock_connect(sock, path) await self.sock_connect(sock, path)
except: except:
sock.close() sock.close()
raise raise
@ -238,12 +227,11 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
.format(sock)) .format(sock))
sock.setblocking(False) sock.setblocking(False)
transport, protocol = yield from self._create_connection_transport( transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname) sock, protocol_factory, ssl, server_hostname)
return transport, protocol return transport, protocol
@coroutine async def create_unix_server(self, protocol_factory, path=None, *,
def create_unix_server(self, protocol_factory, path=None, *,
sock=None, backlog=100, ssl=None): sock=None, backlog=100, ssl=None):
if isinstance(ssl, bool): if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None') raise TypeError('ssl argument must be an SSLContext or None')

View File

@ -15,7 +15,6 @@ from . import proactor_events
from . import selector_events from . import selector_events
from . import tasks from . import tasks
from . import windows_utils from . import windows_utils
from .coroutines import coroutine
from .log import logger from .log import logger
@ -305,17 +304,15 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
proactor = IocpProactor() proactor = IocpProactor()
super().__init__(proactor) super().__init__(proactor)
@coroutine async def create_pipe_connection(self, protocol_factory, address):
def create_pipe_connection(self, protocol_factory, address):
f = self._proactor.connect_pipe(address) f = self._proactor.connect_pipe(address)
pipe = yield from f pipe = await f
protocol = protocol_factory() protocol = protocol_factory()
trans = self._make_duplex_pipe_transport(pipe, protocol, trans = self._make_duplex_pipe_transport(pipe, protocol,
extra={'addr': address}) extra={'addr': address})
return trans, protocol return trans, protocol
@coroutine async def start_serving_pipe(self, protocol_factory, address):
def start_serving_pipe(self, protocol_factory, address):
server = PipeServer(address) server = PipeServer(address)
def loop_accept_pipe(f=None): def loop_accept_pipe(f=None):
@ -361,8 +358,7 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
self.call_soon(loop_accept_pipe) self.call_soon(loop_accept_pipe)
return [server] return [server]
@coroutine async def _make_subprocess_transport(self, protocol, args, shell,
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize, stdin, stdout, stderr, bufsize,
extra=None, **kwargs): extra=None, **kwargs):
waiter = self.create_future() waiter = self.create_future()
@ -371,18 +367,11 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
waiter=waiter, extra=extra, waiter=waiter, extra=extra,
**kwargs) **kwargs)
try: try:
yield from waiter await waiter
except Exception as exc: except Exception:
# Workaround CPython bug #23353: using yield/yield-from in an
# except block of a generator doesn't clear properly sys.exc_info()
err = exc
else:
err = None
if err is not None:
transp.close() transp.close()
yield from transp._wait() await transp._wait()
raise err raise
return transp return transp
@ -498,11 +487,10 @@ class IocpProactor:
conn.settimeout(listener.gettimeout()) conn.settimeout(listener.gettimeout())
return conn, conn.getpeername() return conn, conn.getpeername()
@coroutine async def accept_coro(future, conn):
def accept_coro(future, conn):
# Coroutine closing the accept socket if the future is cancelled # Coroutine closing the accept socket if the future is cancelled
try: try:
yield from future await future
except futures.CancelledError: except futures.CancelledError:
conn.close() conn.close()
raise raise
@ -552,8 +540,7 @@ class IocpProactor:
return self._register(ov, pipe, finish_accept_pipe) return self._register(ov, pipe, finish_accept_pipe)
@coroutine async def connect_pipe(self, address):
def connect_pipe(self, address):
delay = CONNECT_PIPE_INIT_DELAY delay = CONNECT_PIPE_INIT_DELAY
while True: while True:
# Unfortunately there is no way to do an overlapped connect to a pipe. # Unfortunately there is no way to do an overlapped connect to a pipe.
@ -568,7 +555,7 @@ class IocpProactor:
# ConnectPipe() failed with ERROR_PIPE_BUSY: retry later # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY) delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
yield from tasks.sleep(delay, loop=self._loop) await tasks.sleep(delay, loop=self._loop)
return windows_utils.PipeHandle(handle) return windows_utils.PipeHandle(handle)

View File

@ -1316,7 +1316,8 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.getaddrinfo.side_effect = mock_getaddrinfo self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock() self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = () self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock() self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock: class _SelectorTransportMock:
@ -1416,7 +1417,8 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def test_create_server_no_getaddrinfo(self): def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock() getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = [] getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0) f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f) self.assertRaises(OSError, self.loop.run_until_complete, f)

View File

@ -285,10 +285,10 @@ class EventLoopTestsMixin:
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0) self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self): def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb(): async def cb():
self.loop.stop() self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop) await asyncio.sleep(0.1, loop=self.loop)
task = cb() task = cb()
self.assertRaises(RuntimeError, self.assertRaises(RuntimeError,
self.loop.run_until_complete, task) self.loop.run_until_complete, task)
@ -1424,9 +1424,8 @@ class EventLoopTestsMixin:
rpipe, wpipe = os.pipe() rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024) pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine async def connect():
def connect(): t, p = await self.loop.connect_read_pipe(
t, p = yield from self.loop.connect_read_pipe(
lambda: proto, pipeobj) lambda: proto, pipeobj)
self.assertIs(p, proto) self.assertIs(p, proto)
self.assertIs(t, proto.transport) self.assertIs(t, proto.transport)
@ -1463,11 +1462,10 @@ class EventLoopTestsMixin:
rpipeobj = io.open(rpipe, 'rb', 1024) rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024) wpipeobj = io.open(wpipe, 'w', 1024)
@asyncio.coroutine async def connect():
def connect(): read_transport, _ = await loop.connect_read_pipe(
read_transport, _ = yield from loop.connect_read_pipe(
lambda: read_proto, rpipeobj) lambda: read_proto, rpipeobj)
write_transport, _ = yield from loop.connect_write_pipe( write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj) lambda: write_proto, wpipeobj)
return read_transport, write_transport return read_transport, write_transport
@ -1499,9 +1497,8 @@ class EventLoopTestsMixin:
master, slave = os.openpty() master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0) master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine async def connect():
def connect(): t, p = await self.loop.connect_read_pipe(lambda: proto,
t, p = yield from self.loop.connect_read_pipe(lambda: proto,
master_read_obj) master_read_obj)
self.assertIs(p, proto) self.assertIs(p, proto)
self.assertIs(t, proto.transport) self.assertIs(t, proto.transport)
@ -1713,11 +1710,10 @@ class EventLoopTestsMixin:
if ov is not None: if ov is not None:
self.assertTrue(ov.pending) self.assertTrue(ov.pending)
@asyncio.coroutine async def main():
def main():
try: try:
self.loop.call_soon(f.cancel) self.loop.call_soon(f.cancel)
yield from f await f
except asyncio.CancelledError: except asyncio.CancelledError:
res = 'cancelled' res = 'cancelled'
else: else:
@ -1750,14 +1746,13 @@ class EventLoopTestsMixin:
self.loop._run_once_counter = 0 self.loop._run_once_counter = 0
self.loop._run_once = _run_once self.loop._run_once = _run_once
@asyncio.coroutine async def wait():
def wait():
loop = self.loop loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop) await asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop) await asyncio.sleep(1e-4, loop=loop)
yield from asyncio.sleep(1e-6, loop=loop) await asyncio.sleep(1e-6, loop=loop)
yield from asyncio.sleep(1e-8, loop=loop) await asyncio.sleep(1e-8, loop=loop)
yield from asyncio.sleep(1e-10, loop=loop) await asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait()) self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector # The ideal number of call is 12, but on some platforms, the selector
@ -2076,9 +2071,9 @@ class SubprocessTestsMixin:
self.assertEqual(7, proto.returncode) self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self): def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds): async def connect(**kwds):
yield from self.loop.subprocess_exec( await self.loop.subprocess_exec(
asyncio.SubprocessProtocol, asyncio.SubprocessProtocol,
'pwd', **kwds) 'pwd', **kwds)
@ -2090,11 +2085,11 @@ class SubprocessTestsMixin:
self.loop.run_until_complete(connect(shell=True)) self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self): def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds): async def connect(cmd=None, **kwds):
if not cmd: if not cmd:
cmd = 'pwd' cmd = 'pwd'
yield from self.loop.subprocess_shell( await self.loop.subprocess_shell(
asyncio.SubprocessProtocol, asyncio.SubprocessProtocol,
cmd, **kwds) cmd, **kwds)
@ -2548,20 +2543,8 @@ class AbstractEventLoopTests(unittest.TestCase):
NotImplementedError, loop.time) NotImplementedError, loop.time)
self.assertRaises( self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None) NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises( self.assertRaises(
NotImplementedError, loop.set_default_executor, f) NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises( self.assertRaises(
NotImplementedError, loop.add_reader, 1, f) NotImplementedError, loop.add_reader, 1, f)
self.assertRaises( self.assertRaises(
@ -2570,33 +2553,12 @@ class AbstractEventLoopTests(unittest.TestCase):
NotImplementedError, loop.add_writer, 1, f) NotImplementedError, loop.add_writer, 1, f)
self.assertRaises( self.assertRaises(
NotImplementedError, loop.remove_writer, 1) NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_recv_into, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises( self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f) NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises( self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1) NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises( self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1) NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
self.assertRaises( self.assertRaises(
NotImplementedError, loop.set_exception_handler, f) NotImplementedError, loop.set_exception_handler, f)
self.assertRaises( self.assertRaises(
@ -2608,6 +2570,47 @@ class AbstractEventLoopTests(unittest.TestCase):
self.assertRaises( self.assertRaises(
NotImplementedError, loop.set_debug, f) NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class ProtocolsAbsTests(unittest.TestCase): class ProtocolsAbsTests(unittest.TestCase):

View File

@ -69,21 +69,18 @@ class LockTests(test_utils.TestCase):
self.assertTrue(self.loop.run_until_complete(lock.acquire())) self.assertTrue(self.loop.run_until_complete(lock.acquire()))
@asyncio.coroutine async def c1(result):
def c1(result): if await lock.acquire():
if (yield from lock.acquire()):
result.append(1) result.append(1)
return True return True
@asyncio.coroutine async def c2(result):
def c2(result): if await lock.acquire():
if (yield from lock.acquire()):
result.append(2) result.append(2)
return True return True
@asyncio.coroutine async def c3(result):
def c3(result): if await lock.acquire():
if (yield from lock.acquire()):
result.append(3) result.append(3)
return True return True
@ -145,12 +142,11 @@ class LockTests(test_utils.TestCase):
# Setup: A has the lock, b and c are waiting. # Setup: A has the lock, b and c are waiting.
lock = asyncio.Lock(loop=self.loop) lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine async def lockit(name, blocker):
def lockit(name, blocker): await lock.acquire()
yield from lock.acquire()
try: try:
if blocker is not None: if blocker is not None:
yield from blocker await blocker
finally: finally:
lock.release() lock.release()
@ -294,19 +290,16 @@ class EventTests(test_utils.TestCase):
result = [] result = []
@asyncio.coroutine async def c1(result):
def c1(result): if await ev.wait():
if (yield from ev.wait()):
result.append(1) result.append(1)
@asyncio.coroutine async def c2(result):
def c2(result): if await ev.wait():
if (yield from ev.wait()):
result.append(2) result.append(2)
@asyncio.coroutine async def c3(result):
def c3(result): if await ev.wait():
if (yield from ev.wait()):
result.append(3) result.append(3)
t1 = asyncio.Task(c1(result), loop=self.loop) t1 = asyncio.Task(c1(result), loop=self.loop)
@ -359,9 +352,8 @@ class EventTests(test_utils.TestCase):
ev = asyncio.Event(loop=self.loop) ev = asyncio.Event(loop=self.loop)
result = [] result = []
@asyncio.coroutine async def c1(result):
def c1(result): if await ev.wait():
if (yield from ev.wait()):
result.append(1) result.append(1)
return True return True
@ -408,24 +400,21 @@ class ConditionTests(test_utils.TestCase):
cond = asyncio.Condition(loop=self.loop) cond = asyncio.Condition(loop=self.loop)
result = [] result = []
@asyncio.coroutine async def c1(result):
def c1(result): await cond.acquire()
yield from cond.acquire() if await cond.wait():
if (yield from cond.wait()):
result.append(1) result.append(1)
return True return True
@asyncio.coroutine async def c2(result):
def c2(result): await cond.acquire()
yield from cond.acquire() if await cond.wait():
if (yield from cond.wait()):
result.append(2) result.append(2)
return True return True
@asyncio.coroutine async def c3(result):
def c3(result): await cond.acquire()
yield from cond.acquire() if await cond.wait():
if (yield from cond.wait()):
result.append(3) result.append(3)
return True return True
@ -522,10 +511,9 @@ class ConditionTests(test_utils.TestCase):
result = [] result = []
@asyncio.coroutine async def c1(result):
def c1(result): await cond.acquire()
yield from cond.acquire() if await cond.wait_for(predicate):
if (yield from cond.wait_for(predicate)):
result.append(1) result.append(1)
cond.release() cond.release()
return True return True
@ -567,26 +555,23 @@ class ConditionTests(test_utils.TestCase):
cond = asyncio.Condition(loop=self.loop) cond = asyncio.Condition(loop=self.loop)
result = [] result = []
@asyncio.coroutine async def c1(result):
def c1(result): await cond.acquire()
yield from cond.acquire() if await cond.wait():
if (yield from cond.wait()):
result.append(1) result.append(1)
cond.release() cond.release()
return True return True
@asyncio.coroutine async def c2(result):
def c2(result): await cond.acquire()
yield from cond.acquire() if await cond.wait():
if (yield from cond.wait()):
result.append(2) result.append(2)
cond.release() cond.release()
return True return True
@asyncio.coroutine async def c3(result):
def c3(result): await cond.acquire()
yield from cond.acquire() if await cond.wait():
if (yield from cond.wait()):
result.append(3) result.append(3)
cond.release() cond.release()
return True return True
@ -623,18 +608,16 @@ class ConditionTests(test_utils.TestCase):
result = [] result = []
@asyncio.coroutine async def c1(result):
def c1(result): await cond.acquire()
yield from cond.acquire() if await cond.wait():
if (yield from cond.wait()):
result.append(1) result.append(1)
cond.release() cond.release()
return True return True
@asyncio.coroutine async def c2(result):
def c2(result): await cond.acquire()
yield from cond.acquire() if await cond.wait():
if (yield from cond.wait()):
result.append(2) result.append(2)
cond.release() cond.release()
return True return True
@ -791,27 +774,23 @@ class SemaphoreTests(test_utils.TestCase):
self.assertTrue(self.loop.run_until_complete(sem.acquire())) self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertFalse(sem.locked()) self.assertFalse(sem.locked())
@asyncio.coroutine async def c1(result):
def c1(result): await sem.acquire()
yield from sem.acquire()
result.append(1) result.append(1)
return True return True
@asyncio.coroutine async def c2(result):
def c2(result): await sem.acquire()
yield from sem.acquire()
result.append(2) result.append(2)
return True return True
@asyncio.coroutine async def c3(result):
def c3(result): await sem.acquire()
yield from sem.acquire()
result.append(3) result.append(3)
return True return True
@asyncio.coroutine async def c4(result):
def c4(result): await sem.acquire()
yield from sem.acquire()
result.append(4) result.append(4)
return True return True

View File

@ -36,27 +36,25 @@ class QueueBasicTests(_QueueTestBase):
id_is_present = hex(id(q)) in fn(q) id_is_present = hex(id(q)) in fn(q)
self.assertEqual(expect_id, id_is_present) self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine async def add_getter():
def add_getter():
q = asyncio.Queue(loop=loop) q = asyncio.Queue(loop=loop)
# Start a task that waits to get. # Start a task that waits to get.
asyncio.Task(q.get(), loop=loop) asyncio.Task(q.get(), loop=loop)
# Let it start waiting. # Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop) await asyncio.sleep(0.1, loop=loop)
self.assertTrue('_getters[1]' in fn(q)) self.assertTrue('_getters[1]' in fn(q))
# resume q.get coroutine to finish generator # resume q.get coroutine to finish generator
q.put_nowait(0) q.put_nowait(0)
loop.run_until_complete(add_getter()) loop.run_until_complete(add_getter())
@asyncio.coroutine async def add_putter():
def add_putter():
q = asyncio.Queue(maxsize=1, loop=loop) q = asyncio.Queue(maxsize=1, loop=loop)
q.put_nowait(1) q.put_nowait(1)
# Start a task that waits to put. # Start a task that waits to put.
asyncio.Task(q.put(2), loop=loop) asyncio.Task(q.put(2), loop=loop)
# Let it start waiting. # Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop) await asyncio.sleep(0.1, loop=loop)
self.assertTrue('_putters[1]' in fn(q)) self.assertTrue('_putters[1]' in fn(q))
# resume q.put coroutine to finish generator # resume q.put coroutine to finish generator
q.get_nowait() q.get_nowait()
@ -125,24 +123,22 @@ class QueueBasicTests(_QueueTestBase):
self.assertEqual(2, q.maxsize) self.assertEqual(2, q.maxsize)
have_been_put = [] have_been_put = []
@asyncio.coroutine async def putter():
def putter():
for i in range(3): for i in range(3):
yield from q.put(i) await q.put(i)
have_been_put.append(i) have_been_put.append(i)
return True return True
@asyncio.coroutine async def test():
def test():
t = asyncio.Task(putter(), loop=loop) t = asyncio.Task(putter(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop) await asyncio.sleep(0.01, loop=loop)
# The putter is blocked after putting two items. # The putter is blocked after putting two items.
self.assertEqual([0, 1], have_been_put) self.assertEqual([0, 1], have_been_put)
self.assertEqual(0, q.get_nowait()) self.assertEqual(0, q.get_nowait())
# Let the putter resume and put last item. # Let the putter resume and put last item.
yield from asyncio.sleep(0.01, loop=loop) await asyncio.sleep(0.01, loop=loop)
self.assertEqual([0, 1, 2], have_been_put) self.assertEqual([0, 1, 2], have_been_put)
self.assertEqual(1, q.get_nowait()) self.assertEqual(1, q.get_nowait())
self.assertEqual(2, q.get_nowait()) self.assertEqual(2, q.get_nowait())
@ -160,9 +156,8 @@ class QueueGetTests(_QueueTestBase):
q = asyncio.Queue(loop=self.loop) q = asyncio.Queue(loop=self.loop)
q.put_nowait(1) q.put_nowait(1)
@asyncio.coroutine async def queue_get():
def queue_get(): return await q.get()
return (yield from q.get())
res = self.loop.run_until_complete(queue_get()) res = self.loop.run_until_complete(queue_get())
self.assertEqual(1, res) self.assertEqual(1, res)
@ -192,21 +187,19 @@ class QueueGetTests(_QueueTestBase):
started = asyncio.Event(loop=loop) started = asyncio.Event(loop=loop)
finished = False finished = False
@asyncio.coroutine async def queue_get():
def queue_get():
nonlocal finished nonlocal finished
started.set() started.set()
res = yield from q.get() res = await q.get()
finished = True finished = True
return res return res
@asyncio.coroutine async def queue_put():
def queue_put():
loop.call_later(0.01, q.put_nowait, 1) loop.call_later(0.01, q.put_nowait, 1)
queue_get_task = asyncio.Task(queue_get(), loop=loop) queue_get_task = asyncio.Task(queue_get(), loop=loop)
yield from started.wait() await started.wait()
self.assertFalse(finished) self.assertFalse(finished)
res = yield from queue_get_task res = await queue_get_task
self.assertTrue(finished) self.assertTrue(finished)
return res return res
@ -236,16 +229,14 @@ class QueueGetTests(_QueueTestBase):
q = asyncio.Queue(loop=loop) q = asyncio.Queue(loop=loop)
@asyncio.coroutine async def queue_get():
def queue_get(): return await asyncio.wait_for(q.get(), 0.051, loop=loop)
return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
@asyncio.coroutine async def test():
def test():
get_task = asyncio.Task(queue_get(), loop=loop) get_task = asyncio.Task(queue_get(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop) # let the task start await asyncio.sleep(0.01, loop=loop) # let the task start
q.put_nowait(1) q.put_nowait(1)
return (yield from get_task) return await get_task
self.assertEqual(1, loop.run_until_complete(test())) self.assertEqual(1, loop.run_until_complete(test()))
self.assertAlmostEqual(0.06, loop.time()) self.assertAlmostEqual(0.06, loop.time())
@ -275,15 +266,13 @@ class QueueGetTests(_QueueTestBase):
def test_why_are_getters_waiting(self): def test_why_are_getters_waiting(self):
# From issue #268. # From issue #268.
@asyncio.coroutine async def consumer(queue, num_expected):
def consumer(queue, num_expected):
for _ in range(num_expected): for _ in range(num_expected):
yield from queue.get() await queue.get()
@asyncio.coroutine async def producer(queue, num_items):
def producer(queue, num_items):
for i in range(num_items): for i in range(num_items):
yield from queue.put(i) await queue.put(i)
queue_size = 1 queue_size = 1
producer_num_items = 5 producer_num_items = 5
@ -301,10 +290,10 @@ class QueueGetTests(_QueueTestBase):
yield 0.2 yield 0.2
self.loop = self.new_test_loop(a_generator) self.loop = self.new_test_loop(a_generator)
@asyncio.coroutine
def consumer(queue): async def consumer(queue):
try: try:
item = yield from asyncio.wait_for(queue.get(), 0.1, loop=self.loop) item = await asyncio.wait_for(queue.get(), 0.1, loop=self.loop)
except asyncio.TimeoutError: except asyncio.TimeoutError:
pass pass
@ -318,10 +307,9 @@ class QueuePutTests(_QueueTestBase):
def test_blocking_put(self): def test_blocking_put(self):
q = asyncio.Queue(loop=self.loop) q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine async def queue_put():
def queue_put():
# No maxsize, won't block. # No maxsize, won't block.
yield from q.put(1) await q.put(1)
self.loop.run_until_complete(queue_put()) self.loop.run_until_complete(queue_put())
@ -338,21 +326,19 @@ class QueuePutTests(_QueueTestBase):
started = asyncio.Event(loop=loop) started = asyncio.Event(loop=loop)
finished = False finished = False
@asyncio.coroutine async def queue_put():
def queue_put():
nonlocal finished nonlocal finished
started.set() started.set()
yield from q.put(1) await q.put(1)
yield from q.put(2) await q.put(2)
finished = True finished = True
@asyncio.coroutine async def queue_get():
def queue_get():
loop.call_later(0.01, q.get_nowait) loop.call_later(0.01, q.get_nowait)
queue_put_task = asyncio.Task(queue_put(), loop=loop) queue_put_task = asyncio.Task(queue_put(), loop=loop)
yield from started.wait() await started.wait()
self.assertFalse(finished) self.assertFalse(finished)
yield from queue_put_task await queue_put_task
self.assertTrue(finished) self.assertTrue(finished)
loop.run_until_complete(queue_get()) loop.run_until_complete(queue_get())
@ -464,24 +450,22 @@ class QueuePutTests(_QueueTestBase):
self.assertRaises(asyncio.QueueFull, q.put_nowait, 3) self.assertRaises(asyncio.QueueFull, q.put_nowait, 3)
q = asyncio.Queue(maxsize=1.3, loop=self.loop) q = asyncio.Queue(maxsize=1.3, loop=self.loop)
@asyncio.coroutine
def queue_put(): async def queue_put():
yield from q.put(1) await q.put(1)
yield from q.put(2) await q.put(2)
self.assertTrue(q.full()) self.assertTrue(q.full())
self.loop.run_until_complete(queue_put()) self.loop.run_until_complete(queue_put())
def test_put_cancelled(self): def test_put_cancelled(self):
q = asyncio.Queue(loop=self.loop) q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine async def queue_put():
def queue_put(): await q.put(1)
yield from q.put(1)
return True return True
@asyncio.coroutine async def test():
def test(): return await q.get()
return (yield from q.get())
t = asyncio.Task(queue_put(), loop=self.loop) t = asyncio.Task(queue_put(), loop=self.loop)
self.assertEqual(1, self.loop.run_until_complete(test())) self.assertEqual(1, self.loop.run_until_complete(test()))
@ -520,13 +504,11 @@ class QueuePutTests(_QueueTestBase):
queue = asyncio.Queue(2, loop=self.loop) queue = asyncio.Queue(2, loop=self.loop)
@asyncio.coroutine async def putter(item):
def putter(item): await queue.put(item)
yield from queue.put(item)
@asyncio.coroutine async def getter():
def getter(): await asyncio.sleep(0, loop=self.loop)
yield
num = queue.qsize() num = queue.qsize()
for _ in range(num): for _ in range(num):
item = queue.get_nowait() item = queue.get_nowait()
@ -580,21 +562,19 @@ class _QueueJoinTestMixin:
# Join the queue and assert all items have been processed. # Join the queue and assert all items have been processed.
running = True running = True
@asyncio.coroutine async def worker():
def worker():
nonlocal accumulator nonlocal accumulator
while running: while running:
item = yield from q.get() item = await q.get()
accumulator += item accumulator += item
q.task_done() q.task_done()
@asyncio.coroutine async def test():
def test():
tasks = [asyncio.Task(worker(), loop=self.loop) tasks = [asyncio.Task(worker(), loop=self.loop)
for index in range(2)] for index in range(2)]
yield from q.join() await q.join()
return tasks return tasks
tasks = self.loop.run_until_complete(test()) tasks = self.loop.run_until_complete(test())
@ -612,10 +592,9 @@ class _QueueJoinTestMixin:
# Test that a queue join()s successfully, and before anything else # Test that a queue join()s successfully, and before anything else
# (done twice for insurance). # (done twice for insurance).
@asyncio.coroutine async def join():
def join(): await q.join()
yield from q.join() await q.join()
yield from q.join()
self.loop.run_until_complete(join()) self.loop.run_until_complete(join())

View File

@ -571,11 +571,10 @@ class StreamReaderTests(test_utils.TestCase):
self.server = None self.server = None
self.loop = loop self.loop = loop
@asyncio.coroutine async def handle_client(self, client_reader, client_writer):
def handle_client(self, client_reader, client_writer): data = await client_reader.readline()
data = yield from client_reader.readline()
client_writer.write(data) client_writer.write(data)
yield from client_writer.drain() await client_writer.drain()
client_writer.close() client_writer.close()
def start(self): def start(self):
@ -608,14 +607,13 @@ class StreamReaderTests(test_utils.TestCase):
self.loop.run_until_complete(self.server.wait_closed()) self.loop.run_until_complete(self.server.wait_closed())
self.server = None self.server = None
@asyncio.coroutine async def client(addr):
def client(addr): reader, writer = await asyncio.open_connection(
reader, writer = yield from asyncio.open_connection(
*addr, loop=self.loop) *addr, loop=self.loop)
# send a line # send a line
writer.write(b"hello world!\n") writer.write(b"hello world!\n")
# read it back # read it back
msgback = yield from reader.readline() msgback = await reader.readline()
writer.close() writer.close()
return msgback return msgback
@ -645,11 +643,10 @@ class StreamReaderTests(test_utils.TestCase):
self.loop = loop self.loop = loop
self.path = path self.path = path
@asyncio.coroutine async def handle_client(self, client_reader, client_writer):
def handle_client(self, client_reader, client_writer): data = await client_reader.readline()
data = yield from client_reader.readline()
client_writer.write(data) client_writer.write(data)
yield from client_writer.drain() await client_writer.drain()
client_writer.close() client_writer.close()
def start(self): def start(self):
@ -674,14 +671,13 @@ class StreamReaderTests(test_utils.TestCase):
self.loop.run_until_complete(self.server.wait_closed()) self.loop.run_until_complete(self.server.wait_closed())
self.server = None self.server = None
@asyncio.coroutine async def client(path):
def client(path): reader, writer = await asyncio.open_unix_connection(
reader, writer = yield from asyncio.open_unix_connection(
path, loop=self.loop) path, loop=self.loop)
# send a line # send a line
writer.write(b"hello world!\n") writer.write(b"hello world!\n")
# read it back # read it back
msgback = yield from reader.readline() msgback = await reader.readline()
writer.close() writer.close()
return msgback return msgback
@ -782,14 +778,13 @@ os.close(fd)
clt, _ = sock.accept() clt, _ = sock.accept()
clt.close() clt.close()
@asyncio.coroutine async def client(host, port):
def client(host, port): reader, writer = await asyncio.open_connection(
reader, writer = yield from asyncio.open_connection(
host, port, loop=self.loop) host, port, loop=self.loop)
while True: while True:
writer.write(b"foo\n") writer.write(b"foo\n")
yield from writer.drain() await writer.drain()
# Start the server thread and wait for it to be listening. # Start the server thread and wait for it to be listening.
thread = threading.Thread(target=server) thread = threading.Thread(target=server)

View File

@ -81,9 +81,8 @@ class SubprocessMixin:
def test_stdin_stdout(self): def test_stdin_stdout(self):
args = PROGRAM_CAT args = PROGRAM_CAT
@asyncio.coroutine async def run(data):
def run(data): proc = await asyncio.create_subprocess_exec(
proc = yield from asyncio.create_subprocess_exec(
*args, *args,
stdin=subprocess.PIPE, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
@ -91,12 +90,12 @@ class SubprocessMixin:
# feed data # feed data
proc.stdin.write(data) proc.stdin.write(data)
yield from proc.stdin.drain() await proc.stdin.drain()
proc.stdin.close() proc.stdin.close()
# get output and exitcode # get output and exitcode
data = yield from proc.stdout.read() data = await proc.stdout.read()
exitcode = yield from proc.wait() exitcode = await proc.wait()
return (exitcode, data) return (exitcode, data)
task = run(b'some data') task = run(b'some data')
@ -108,14 +107,13 @@ class SubprocessMixin:
def test_communicate(self): def test_communicate(self):
args = PROGRAM_CAT args = PROGRAM_CAT
@asyncio.coroutine async def run(data):
def run(data): proc = await asyncio.create_subprocess_exec(
proc = yield from asyncio.create_subprocess_exec(
*args, *args,
stdin=subprocess.PIPE, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
loop=self.loop) loop=self.loop)
stdout, stderr = yield from proc.communicate(data) stdout, stderr = await proc.communicate(data)
return proc.returncode, stdout return proc.returncode, stdout
task = run(b'some data') task = run(b'some data')
@ -178,14 +176,13 @@ class SubprocessMixin:
loop=self.loop) loop=self.loop)
proc = self.loop.run_until_complete(create) proc = self.loop.run_until_complete(create)
@asyncio.coroutine async def send_signal(proc):
def send_signal(proc):
# basic synchronization to wait until the program is sleeping # basic synchronization to wait until the program is sleeping
line = yield from proc.stdout.readline() line = await proc.stdout.readline()
self.assertEqual(line, b'sleeping\n') self.assertEqual(line, b'sleeping\n')
proc.send_signal(signal.SIGHUP) proc.send_signal(signal.SIGHUP)
returncode = (yield from proc.wait()) returncode = await proc.wait()
return returncode return returncode
returncode = self.loop.run_until_complete(send_signal(proc)) returncode = self.loop.run_until_complete(send_signal(proc))
@ -208,10 +205,9 @@ class SubprocessMixin:
def test_stdin_broken_pipe(self): def test_stdin_broken_pipe(self):
proc, large_data = self.prepare_broken_pipe_test() proc, large_data = self.prepare_broken_pipe_test()
@asyncio.coroutine async def write_stdin(proc, data):
def write_stdin(proc, data):
proc.stdin.write(data) proc.stdin.write(data)
yield from proc.stdin.drain() await proc.stdin.drain()
coro = write_stdin(proc, large_data) coro = write_stdin(proc, large_data)
# drain() must raise BrokenPipeError or ConnectionResetError # drain() must raise BrokenPipeError or ConnectionResetError
@ -232,8 +228,7 @@ class SubprocessMixin:
limit = 10 limit = 10
size = (limit * 2 + 1) size = (limit * 2 + 1)
@asyncio.coroutine async def test_pause_reading():
def test_pause_reading():
code = '\n'.join(( code = '\n'.join((
'import sys', 'import sys',
'sys.stdout.write("x" * %s)' % size, 'sys.stdout.write("x" * %s)' % size,
@ -242,16 +237,15 @@ class SubprocessMixin:
connect_read_pipe = self.loop.connect_read_pipe connect_read_pipe = self.loop.connect_read_pipe
@asyncio.coroutine async def connect_read_pipe_mock(*args, **kw):
def connect_read_pipe_mock(*args, **kw): transport, protocol = await connect_read_pipe(*args, **kw)
transport, protocol = yield from connect_read_pipe(*args, **kw)
transport.pause_reading = mock.Mock() transport.pause_reading = mock.Mock()
transport.resume_reading = mock.Mock() transport.resume_reading = mock.Mock()
return (transport, protocol) return (transport, protocol)
self.loop.connect_read_pipe = connect_read_pipe_mock self.loop.connect_read_pipe = connect_read_pipe_mock
proc = yield from asyncio.create_subprocess_exec( proc = await asyncio.create_subprocess_exec(
sys.executable, '-c', code, sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE,
@ -259,7 +253,7 @@ class SubprocessMixin:
loop=self.loop) loop=self.loop)
stdout_transport = proc._transport.get_pipe_transport(1) stdout_transport = proc._transport.get_pipe_transport(1)
stdout, stderr = yield from proc.communicate() stdout, stderr = await proc.communicate()
# The child process produced more than limit bytes of output, # The child process produced more than limit bytes of output,
# the stream reader transport should pause the protocol to not # the stream reader transport should pause the protocol to not
@ -277,18 +271,17 @@ class SubprocessMixin:
def test_stdin_not_inheritable(self): def test_stdin_not_inheritable(self):
# asyncio issue #209: stdin must not be inheritable, otherwise # asyncio issue #209: stdin must not be inheritable, otherwise
# the Process.communicate() hangs # the Process.communicate() hangs
@asyncio.coroutine async def len_message(message):
def len_message(message):
code = 'import sys; data = sys.stdin.read(); print(len(data))' code = 'import sys; data = sys.stdin.read(); print(len(data))'
proc = yield from asyncio.create_subprocess_exec( proc = await asyncio.create_subprocess_exec(
sys.executable, '-c', code, sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
close_fds=False, close_fds=False,
loop=self.loop) loop=self.loop)
stdout, stderr = yield from proc.communicate(message) stdout, stderr = await proc.communicate(message)
exitcode = yield from proc.wait() exitcode = await proc.wait()
return (stdout, exitcode) return (stdout, exitcode)
output, exitcode = self.loop.run_until_complete(len_message(b'abc')) output, exitcode = self.loop.run_until_complete(len_message(b'abc'))
@ -296,18 +289,18 @@ class SubprocessMixin:
self.assertEqual(exitcode, 0) self.assertEqual(exitcode, 0)
def test_empty_input(self): def test_empty_input(self):
@asyncio.coroutine
def empty_input(): async def empty_input():
code = 'import sys; data = sys.stdin.read(); print(len(data))' code = 'import sys; data = sys.stdin.read(); print(len(data))'
proc = yield from asyncio.create_subprocess_exec( proc = await asyncio.create_subprocess_exec(
sys.executable, '-c', code, sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
close_fds=False, close_fds=False,
loop=self.loop) loop=self.loop)
stdout, stderr = yield from proc.communicate(b'') stdout, stderr = await proc.communicate(b'')
exitcode = yield from proc.wait() exitcode = await proc.wait()
return (stdout, exitcode) return (stdout, exitcode)
output, exitcode = self.loop.run_until_complete(empty_input()) output, exitcode = self.loop.run_until_complete(empty_input())
@ -317,9 +310,8 @@ class SubprocessMixin:
def test_cancel_process_wait(self): def test_cancel_process_wait(self):
# Issue #23140: cancel Process.wait() # Issue #23140: cancel Process.wait()
@asyncio.coroutine async def cancel_wait():
def cancel_wait(): proc = await asyncio.create_subprocess_exec(
proc = yield from asyncio.create_subprocess_exec(
*PROGRAM_BLOCKED, *PROGRAM_BLOCKED,
loop=self.loop) loop=self.loop)
@ -327,7 +319,7 @@ class SubprocessMixin:
task = self.loop.create_task(proc.wait()) task = self.loop.create_task(proc.wait())
self.loop.call_soon(task.cancel) self.loop.call_soon(task.cancel)
try: try:
yield from task await task
except asyncio.CancelledError: except asyncio.CancelledError:
pass pass
@ -336,20 +328,20 @@ class SubprocessMixin:
# Kill the process and wait until it is done # Kill the process and wait until it is done
proc.kill() proc.kill()
yield from proc.wait() await proc.wait()
self.loop.run_until_complete(cancel_wait()) self.loop.run_until_complete(cancel_wait())
def test_cancel_make_subprocess_transport_exec(self): def test_cancel_make_subprocess_transport_exec(self):
@asyncio.coroutine
def cancel_make_transport(): async def cancel_make_transport():
coro = asyncio.create_subprocess_exec(*PROGRAM_BLOCKED, coro = asyncio.create_subprocess_exec(*PROGRAM_BLOCKED,
loop=self.loop) loop=self.loop)
task = self.loop.create_task(coro) task = self.loop.create_task(coro)
self.loop.call_soon(task.cancel) self.loop.call_soon(task.cancel)
try: try:
yield from task await task
except asyncio.CancelledError: except asyncio.CancelledError:
pass pass
@ -359,15 +351,15 @@ class SubprocessMixin:
self.loop.run_until_complete(cancel_make_transport()) self.loop.run_until_complete(cancel_make_transport())
def test_cancel_post_init(self): def test_cancel_post_init(self):
@asyncio.coroutine
def cancel_make_transport(): async def cancel_make_transport():
coro = self.loop.subprocess_exec(asyncio.SubprocessProtocol, coro = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED) *PROGRAM_BLOCKED)
task = self.loop.create_task(coro) task = self.loop.create_task(coro)
self.loop.call_soon(task.cancel) self.loop.call_soon(task.cancel)
try: try:
yield from task await task
except asyncio.CancelledError: except asyncio.CancelledError:
pass pass
@ -378,11 +370,11 @@ class SubprocessMixin:
test_utils.run_briefly(self.loop) test_utils.run_briefly(self.loop)
def test_close_kill_running(self): def test_close_kill_running(self):
@asyncio.coroutine
def kill_running(): async def kill_running():
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol, create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED) *PROGRAM_BLOCKED)
transport, protocol = yield from create transport, protocol = await create
kill_called = False kill_called = False
def kill(): def kill():
@ -395,7 +387,7 @@ class SubprocessMixin:
proc.kill = kill proc.kill = kill
returncode = transport.get_returncode() returncode = transport.get_returncode()
transport.close() transport.close()
yield from transport._wait() await transport._wait()
return (returncode, kill_called) return (returncode, kill_called)
# Ignore "Close running child process: kill ..." log # Ignore "Close running child process: kill ..." log
@ -408,11 +400,11 @@ class SubprocessMixin:
test_utils.run_briefly(self.loop) test_utils.run_briefly(self.loop)
def test_close_dont_kill_finished(self): def test_close_dont_kill_finished(self):
@asyncio.coroutine
def kill_running(): async def kill_running():
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol, create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED) *PROGRAM_BLOCKED)
transport, protocol = yield from create transport, protocol = await create
proc = transport.get_extra_info('subprocess') proc = transport.get_extra_info('subprocess')
# kill the process (but asyncio is not notified immediately) # kill the process (but asyncio is not notified immediately)
@ -444,8 +436,8 @@ class SubprocessMixin:
# Unlike SafeChildWatcher, FastChildWatcher does not pop the # Unlike SafeChildWatcher, FastChildWatcher does not pop the
# callbacks if waitpid() is called elsewhere. Let's clear them # callbacks if waitpid() is called elsewhere. Let's clear them
# manually to avoid a warning when the watcher is detached. # manually to avoid a warning when the watcher is detached.
if sys.platform != 'win32' and \ if (sys.platform != 'win32' and
isinstance(self, SubprocessFastWatcherTests): isinstance(self, SubprocessFastWatcherTests)):
asyncio.get_child_watcher()._callbacks.clear() asyncio.get_child_watcher()._callbacks.clear()
def test_popen_error(self): def test_popen_error(self):
@ -467,8 +459,8 @@ class SubprocessMixin:
self.assertEqual(warns, []) self.assertEqual(warns, [])
def test_read_stdout_after_process_exit(self): def test_read_stdout_after_process_exit(self):
@asyncio.coroutine
def execute(): async def execute():
code = '\n'.join(['import sys', code = '\n'.join(['import sys',
'for _ in range(64):', 'for _ in range(64):',
' sys.stdout.write("x" * 4096)', ' sys.stdout.write("x" * 4096)',
@ -480,11 +472,11 @@ class SubprocessMixin:
stdout=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE,
loop=self.loop) loop=self.loop)
process = yield from fut process = await fut
while True: while True:
data = yield from process.stdout.read(65536) data = await process.stdout.read(65536)
if data: if data:
yield from asyncio.sleep(0.3, loop=self.loop) await asyncio.sleep(0.3, loop=self.loop)
else: else:
break break

View File

@ -31,10 +31,6 @@ except ImportError:
from asyncio.test_support import assert_python_ok from asyncio.test_support import assert_python_ok
PY34 = (sys.version_info >= (3, 4))
PY35 = (sys.version_info >= (3, 5))
@asyncio.coroutine @asyncio.coroutine
def coroutine_function(): def coroutine_function():
pass pass
@ -110,9 +106,8 @@ class BaseTaskTests:
other_loop = asyncio.new_event_loop() other_loop = asyncio.new_event_loop()
fut = self.new_future(other_loop) fut = self.new_future(other_loop)
@asyncio.coroutine async def run(fut):
def run(fut): await fut
yield from fut
try: try:
with self.assertRaisesRegex(RuntimeError, with self.assertRaisesRegex(RuntimeError,
@ -122,9 +117,9 @@ class BaseTaskTests:
other_loop.close() other_loop.close()
def test_task_awaits_on_itself(self): def test_task_awaits_on_itself(self):
@asyncio.coroutine
def test(): async def test():
yield from task await task
task = asyncio.ensure_future(test(), loop=self.loop) task = asyncio.ensure_future(test(), loop=self.loop)
@ -209,7 +204,6 @@ class BaseTaskTests:
t = asyncio.ensure_future(t_orig, loop=self.loop) t = asyncio.ensure_future(t_orig, loop=self.loop)
self.assertIs(t, t_orig) self.assertIs(t, t_orig)
@unittest.skipUnless(PY35, 'need python 3.5 or later')
def test_ensure_future_awaitable(self): def test_ensure_future_awaitable(self):
class Aw: class Aw:
def __init__(self, coro): def __init__(self, coro):
@ -234,12 +228,10 @@ class BaseTaskTests:
def test_get_stack(self): def test_get_stack(self):
T = None T = None
@asyncio.coroutine async def foo():
def foo(): await bar()
yield from bar()
@asyncio.coroutine async def bar():
def bar():
# test get_stack() # test get_stack()
f = T.get_stack(limit=1) f = T.get_stack(limit=1)
try: try:
@ -254,11 +246,10 @@ class BaseTaskTests:
tb = file.read() tb = file.read()
self.assertRegex(tb, r'foo\(\) running') self.assertRegex(tb, r'foo\(\) running')
@asyncio.coroutine async def runner():
def runner():
nonlocal T nonlocal T
T = asyncio.ensure_future(foo(), loop=self.loop) T = asyncio.ensure_future(foo(), loop=self.loop)
yield from T await T
self.loop.run_until_complete(runner()) self.loop.run_until_complete(runner())
@ -272,7 +263,6 @@ class BaseTaskTests:
# test coroutine function # test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch') self.assertEqual(notmuch.__name__, 'notmuch')
if PY35:
self.assertRegex(notmuch.__qualname__, self.assertRegex(notmuch.__qualname__,
r'\w+.test_task_repr.<locals>.notmuch') r'\w+.test_task_repr.<locals>.notmuch')
self.assertEqual(notmuch.__module__, __name__) self.assertEqual(notmuch.__module__, __name__)
@ -282,14 +272,9 @@ class BaseTaskTests:
# test coroutine object # test coroutine object
gen = notmuch() gen = notmuch()
if coroutines._DEBUG or PY35:
coro_qualname = 'BaseTaskTests.test_task_repr.<locals>.notmuch' coro_qualname = 'BaseTaskTests.test_task_repr.<locals>.notmuch'
else:
coro_qualname = 'notmuch'
self.assertEqual(gen.__name__, 'notmuch') self.assertEqual(gen.__name__, 'notmuch')
if PY35: self.assertEqual(gen.__qualname__, coro_qualname)
self.assertEqual(gen.__qualname__,
coro_qualname)
# test pending Task # test pending Task
t = self.new_task(self.loop, gen) t = self.new_task(self.loop, gen)
@ -332,7 +317,6 @@ class BaseTaskTests:
# test coroutine function # test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch') self.assertEqual(notmuch.__name__, 'notmuch')
if PY35:
self.assertRegex(notmuch.__qualname__, self.assertRegex(notmuch.__qualname__,
r'\w+.test_task_repr_coro_decorator' r'\w+.test_task_repr_coro_decorator'
r'\.<locals>\.notmuch') r'\.<locals>\.notmuch')
@ -340,19 +324,13 @@ class BaseTaskTests:
# test coroutine object # test coroutine object
gen = notmuch() gen = notmuch()
if coroutines._DEBUG or PY35:
# On Python >= 3.5, generators now inherit the name of the # On Python >= 3.5, generators now inherit the name of the
# function, as expected, and have a qualified name (__qualname__ # function, as expected, and have a qualified name (__qualname__
# attribute). # attribute).
coro_name = 'notmuch' coro_name = 'notmuch'
coro_qualname = ('BaseTaskTests.test_task_repr_coro_decorator' coro_qualname = ('BaseTaskTests.test_task_repr_coro_decorator'
'.<locals>.notmuch') '.<locals>.notmuch')
else:
# On Python < 3.5, generators inherit the name of the code, not of
# the function. See: http://bugs.python.org/issue21205
coro_name = coro_qualname = 'coro'
self.assertEqual(gen.__name__, coro_name) self.assertEqual(gen.__name__, coro_name)
if PY35:
self.assertEqual(gen.__qualname__, coro_qualname) self.assertEqual(gen.__qualname__, coro_qualname)
# test repr(CoroWrapper) # test repr(CoroWrapper)
@ -392,9 +370,8 @@ class BaseTaskTests:
def test_task_repr_wait_for(self): def test_task_repr_wait_for(self):
self.loop.set_debug(False) self.loop.set_debug(False)
@asyncio.coroutine async def wait_for(fut):
def wait_for(fut): return await fut
return (yield from fut)
fut = self.new_future(self.loop) fut = self.new_future(self.loop)
task = self.new_task(self.loop, wait_for(fut)) task = self.new_task(self.loop, wait_for(fut))
@ -411,9 +388,8 @@ class BaseTaskTests:
with set_coroutine_debug(True): with set_coroutine_debug(True):
self.loop.set_debug(True) self.loop.set_debug(True)
@asyncio.coroutine async def func(x, y):
def func(x, y): await asyncio.sleep(0)
yield from asyncio.sleep(0)
partial_func = asyncio.coroutine(functools.partial(func, 1)) partial_func = asyncio.coroutine(functools.partial(func, 1))
task = self.loop.create_task(partial_func(2)) task = self.loop.create_task(partial_func(2))
@ -430,18 +406,16 @@ class BaseTaskTests:
self.assertRegex(coro_repr, expected) self.assertRegex(coro_repr, expected)
def test_task_basics(self): def test_task_basics(self):
@asyncio.coroutine
def outer(): async def outer():
a = yield from inner1() a = await inner1()
b = yield from inner2() b = await inner2()
return a+b return a+b
@asyncio.coroutine async def inner1():
def inner1():
return 42 return 42
@asyncio.coroutine async def inner2():
def inner2():
return 1000 return 1000
t = outer() t = outer()
@ -456,9 +430,8 @@ class BaseTaskTests:
loop = self.new_test_loop(gen) loop = self.new_test_loop(gen)
@asyncio.coroutine async def task():
def task(): await asyncio.sleep(10.0, loop=loop)
yield from asyncio.sleep(10.0, loop=loop)
return 12 return 12
t = self.new_task(loop, task()) t = self.new_task(loop, task())
@ -488,9 +461,8 @@ class BaseTaskTests:
def test_cancel_inner_future(self): def test_cancel_inner_future(self):
f = self.new_future(self.loop) f = self.new_future(self.loop)
@asyncio.coroutine async def task():
def task(): await f
yield from f
return 12 return 12
t = self.new_task(self.loop, task()) t = self.new_task(self.loop, task())
@ -504,9 +476,8 @@ class BaseTaskTests:
def test_cancel_both_task_and_inner_future(self): def test_cancel_both_task_and_inner_future(self):
f = self.new_future(self.loop) f = self.new_future(self.loop)
@asyncio.coroutine async def task():
def task(): await f
yield from f
return 12 return 12
t = self.new_task(self.loop, task()) t = self.new_task(self.loop, task())
@ -526,11 +497,10 @@ class BaseTaskTests:
fut1 = self.new_future(self.loop) fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop) fut2 = self.new_future(self.loop)
@asyncio.coroutine async def task():
def task(): await fut1
yield from fut1
try: try:
yield from fut2 await fut2
except asyncio.CancelledError: except asyncio.CancelledError:
return 42 return 42
@ -551,14 +521,13 @@ class BaseTaskTests:
fut2 = self.new_future(self.loop) fut2 = self.new_future(self.loop)
fut3 = self.new_future(self.loop) fut3 = self.new_future(self.loop)
@asyncio.coroutine async def task():
def task(): await fut1
yield from fut1
try: try:
yield from fut2 await fut2
except asyncio.CancelledError: except asyncio.CancelledError:
pass pass
res = yield from fut3 res = await fut3
return res return res
t = self.new_task(self.loop, task()) t = self.new_task(self.loop, task())
@ -581,12 +550,11 @@ class BaseTaskTests:
loop = asyncio.new_event_loop() loop = asyncio.new_event_loop()
self.set_event_loop(loop) self.set_event_loop(loop)
@asyncio.coroutine async def task():
def task():
t.cancel() t.cancel()
self.assertTrue(t._must_cancel) # White-box test. self.assertTrue(t._must_cancel) # White-box test.
# The sleep should be cancelled immediately. # The sleep should be cancelled immediately.
yield from asyncio.sleep(100, loop=loop) await asyncio.sleep(100, loop=loop)
return 12 return 12
t = self.new_task(loop, task()) t = self.new_task(loop, task())
@ -628,14 +596,11 @@ class BaseTaskTests:
loop = self.new_test_loop(gen) loop = self.new_test_loop(gen)
x = 0 x = 0
waiters = []
@asyncio.coroutine async def task():
def task():
nonlocal x nonlocal x
while x < 10: while x < 10:
waiters.append(asyncio.sleep(0.1, loop=loop)) await asyncio.sleep(0.1, loop=loop)
yield from waiters[-1]
x += 1 x += 1
if x == 2: if x == 2:
loop.stop() loop.stop()
@ -649,9 +614,6 @@ class BaseTaskTests:
self.assertEqual(x, 2) self.assertEqual(x, 2)
self.assertAlmostEqual(0.3, loop.time()) self.assertAlmostEqual(0.3, loop.time())
# close generators
for w in waiters:
w.close()
t.cancel() t.cancel()
self.assertRaises(asyncio.CancelledError, loop.run_until_complete, t) self.assertRaises(asyncio.CancelledError, loop.run_until_complete, t)
@ -704,12 +666,11 @@ class BaseTaskTests:
foo_running = None foo_running = None
@asyncio.coroutine async def foo():
def foo():
nonlocal foo_running nonlocal foo_running
foo_running = True foo_running = True
try: try:
yield from asyncio.sleep(0.2, loop=loop) await asyncio.sleep(0.2, loop=loop)
finally: finally:
foo_running = False foo_running = False
return 'done' return 'done'
@ -738,12 +699,11 @@ class BaseTaskTests:
foo_running = None foo_running = None
@asyncio.coroutine async def foo():
def foo():
nonlocal foo_running nonlocal foo_running
foo_running = True foo_running = True
try: try:
yield from asyncio.sleep(0.2, loop=loop) await asyncio.sleep(0.2, loop=loop)
finally: finally:
foo_running = False foo_running = False
return 'done' return 'done'
@ -781,9 +741,8 @@ class BaseTaskTests:
loop = self.new_test_loop(gen) loop = self.new_test_loop(gen)
@asyncio.coroutine async def foo():
def foo(): await asyncio.sleep(0.2, loop=loop)
yield from asyncio.sleep(0.2, loop=loop)
return 'done' return 'done'
asyncio.set_event_loop(loop) asyncio.set_event_loop(loop)
@ -827,9 +786,8 @@ class BaseTaskTests:
a = self.new_task(loop, asyncio.sleep(0.1, loop=loop)) a = self.new_task(loop, asyncio.sleep(0.1, loop=loop))
b = self.new_task(loop, asyncio.sleep(0.15, loop=loop)) b = self.new_task(loop, asyncio.sleep(0.15, loop=loop))
@asyncio.coroutine async def foo():
def foo(): done, pending = await asyncio.wait([b, a], loop=loop)
done, pending = yield from asyncio.wait([b, a], loop=loop)
self.assertEqual(done, set([a, b])) self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set()) self.assertEqual(pending, set())
return 42 return 42
@ -857,9 +815,8 @@ class BaseTaskTests:
a = self.new_task(loop, asyncio.sleep(0.01, loop=loop)) a = self.new_task(loop, asyncio.sleep(0.01, loop=loop))
b = self.new_task(loop, asyncio.sleep(0.015, loop=loop)) b = self.new_task(loop, asyncio.sleep(0.015, loop=loop))
@asyncio.coroutine async def foo():
def foo(): done, pending = await asyncio.wait([b, a])
done, pending = yield from asyncio.wait([b, a])
self.assertEqual(done, set([a, b])) self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set()) self.assertEqual(pending, set())
return 42 return 42
@ -871,6 +828,7 @@ class BaseTaskTests:
self.assertEqual(res, 42) self.assertEqual(res, 42)
def test_wait_duplicate_coroutines(self): def test_wait_duplicate_coroutines(self):
@asyncio.coroutine @asyncio.coroutine
def coro(s): def coro(s):
return s return s
@ -1000,9 +958,8 @@ class BaseTaskTests:
# first_exception, exception during waiting # first_exception, exception during waiting
a = self.new_task(loop, asyncio.sleep(10.0, loop=loop)) a = self.new_task(loop, asyncio.sleep(10.0, loop=loop))
@asyncio.coroutine async def exc():
def exc(): await asyncio.sleep(0.01, loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
raise ZeroDivisionError('err') raise ZeroDivisionError('err')
b = self.new_task(loop, exc()) b = self.new_task(loop, exc())
@ -1038,9 +995,8 @@ class BaseTaskTests:
b = self.new_task(loop, sleeper()) b = self.new_task(loop, sleeper())
@asyncio.coroutine async def foo():
def foo(): done, pending = await asyncio.wait([b, a], loop=loop)
done, pending = yield from asyncio.wait([b, a], loop=loop)
self.assertEqual(len(done), 2) self.assertEqual(len(done), 2)
self.assertEqual(pending, set()) self.assertEqual(pending, set())
errors = set(f for f in done if f.exception() is not None) errors = set(f for f in done if f.exception() is not None)
@ -1068,9 +1024,8 @@ class BaseTaskTests:
a = self.new_task(loop, asyncio.sleep(0.1, loop=loop)) a = self.new_task(loop, asyncio.sleep(0.1, loop=loop))
b = self.new_task(loop, asyncio.sleep(0.15, loop=loop)) b = self.new_task(loop, asyncio.sleep(0.15, loop=loop))
@asyncio.coroutine async def foo():
def foo(): done, pending = await asyncio.wait([b, a], timeout=0.11,
done, pending = yield from asyncio.wait([b, a], timeout=0.11,
loop=loop) loop=loop)
self.assertEqual(done, set([a])) self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b])) self.assertEqual(pending, set([b]))
@ -1164,17 +1119,16 @@ class BaseTaskTests:
loop = self.new_test_loop(gen) loop = self.new_test_loop(gen)
a = asyncio.sleep(0.1, 'a', loop=loop) a = loop.create_task(asyncio.sleep(0.1, 'a', loop=loop))
b = asyncio.sleep(0.15, 'b', loop=loop) b = loop.create_task(asyncio.sleep(0.15, 'b', loop=loop))
@asyncio.coroutine async def foo():
def foo():
values = [] values = []
for f in asyncio.as_completed([a, b], timeout=0.12, loop=loop): for f in asyncio.as_completed([a, b], timeout=0.12, loop=loop):
if values: if values:
loop.advance_time(0.02) loop.advance_time(0.02)
try: try:
v = yield from f v = await f
values.append((1, v)) values.append((1, v))
except asyncio.TimeoutError as exc: except asyncio.TimeoutError as exc:
values.append((2, exc)) values.append((2, exc))
@ -1202,10 +1156,9 @@ class BaseTaskTests:
a = asyncio.sleep(0.01, 'a', loop=loop) a = asyncio.sleep(0.01, 'a', loop=loop)
@asyncio.coroutine async def foo():
def foo():
for f in asyncio.as_completed([a], timeout=1, loop=loop): for f in asyncio.as_completed([a], timeout=1, loop=loop):
v = yield from f v = await f
self.assertEqual(v, 'a') self.assertEqual(v, 'a')
loop.run_until_complete(self.new_task(loop, foo())) loop.run_until_complete(self.new_task(loop, foo()))
@ -1578,18 +1531,16 @@ class BaseTaskTests:
fut1 = self.new_future(self.loop) fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop) fut2 = self.new_future(self.loop)
@asyncio.coroutine async def coro1(loop):
def coro1(loop):
self.assertTrue(Task.current_task(loop=loop) is task1) self.assertTrue(Task.current_task(loop=loop) is task1)
yield from fut1 await fut1
self.assertTrue(Task.current_task(loop=loop) is task1) self.assertTrue(Task.current_task(loop=loop) is task1)
fut2.set_result(True) fut2.set_result(True)
@asyncio.coroutine async def coro2(loop):
def coro2(loop):
self.assertTrue(Task.current_task(loop=loop) is task2) self.assertTrue(Task.current_task(loop=loop) is task2)
fut1.set_result(True) fut1.set_result(True)
yield from fut2 await fut2
self.assertTrue(Task.current_task(loop=loop) is task2) self.assertTrue(Task.current_task(loop=loop) is task2)
task1 = self.new_task(self.loop, coro1(self.loop)) task1 = self.new_task(self.loop, coro1(self.loop))
@ -1607,22 +1558,20 @@ class BaseTaskTests:
proof = 0 proof = 0
waiter = self.new_future(self.loop) waiter = self.new_future(self.loop)
@asyncio.coroutine async def inner():
def inner():
nonlocal proof nonlocal proof
try: try:
yield from waiter await waiter
except asyncio.CancelledError: except asyncio.CancelledError:
proof += 1 proof += 1
raise raise
else: else:
self.fail('got past sleep() in inner()') self.fail('got past sleep() in inner()')
@asyncio.coroutine async def outer():
def outer():
nonlocal proof nonlocal proof
try: try:
yield from inner() await inner()
except asyncio.CancelledError: except asyncio.CancelledError:
proof += 100 # Expect this path. proof += 100 # Expect this path.
else: else:
@ -1641,16 +1590,14 @@ class BaseTaskTests:
proof = 0 proof = 0
waiter = self.new_future(self.loop) waiter = self.new_future(self.loop)
@asyncio.coroutine async def inner():
def inner():
nonlocal proof nonlocal proof
yield from waiter await waiter
proof += 1 proof += 1
@asyncio.coroutine async def outer():
def outer():
nonlocal proof nonlocal proof
d, p = yield from asyncio.wait([inner()], loop=self.loop) d, p = await asyncio.wait([inner()], loop=self.loop)
proof += 100 proof += 100
f = asyncio.ensure_future(outer(), loop=self.loop) f = asyncio.ensure_future(outer(), loop=self.loop)
@ -1697,16 +1644,14 @@ class BaseTaskTests:
proof = 0 proof = 0
waiter = self.new_future(self.loop) waiter = self.new_future(self.loop)
@asyncio.coroutine async def inner():
def inner():
nonlocal proof nonlocal proof
yield from waiter await waiter
proof += 1 proof += 1
@asyncio.coroutine async def outer():
def outer():
nonlocal proof nonlocal proof
yield from asyncio.shield(inner(), loop=self.loop) await asyncio.shield(inner(), loop=self.loop)
proof += 100 proof += 100
f = asyncio.ensure_future(outer(), loop=self.loop) f = asyncio.ensure_future(outer(), loop=self.loop)
@ -1890,8 +1835,6 @@ class BaseTaskTests:
self.assertIsInstance(exception, Exception) self.assertIsInstance(exception, Exception)
self.assertEqual(exception.args, ("foo", )) self.assertEqual(exception.args, ("foo", ))
@unittest.skipUnless(PY34,
'need python 3.4 or later')
def test_log_destroyed_pending_task(self): def test_log_destroyed_pending_task(self):
Task = self.__class__.Task Task = self.__class__.Task
@ -2661,5 +2604,47 @@ class SleepTests(test_utils.TestCase):
self.assertEqual(result, 11) self.assertEqual(result, 11)
class CompatibilityTests(test_utils.TestCase):
# Tests for checking a bridge between old-styled coroutines
# and async/await syntax
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_yield_from_awaitable(self):
@asyncio.coroutine
def coro():
yield from asyncio.sleep(0, loop=self.loop)
return 'ok'
result = self.loop.run_until_complete(coro())
self.assertEqual('ok', result)
def test_await_old_style_coro(self):
@asyncio.coroutine
def coro1():
return 'ok1'
@asyncio.coroutine
def coro2():
yield from asyncio.sleep(0, loop=self.loop)
return 'ok2'
async def inner():
return await asyncio.gather(coro1(), coro2(), loop=self.loop)
result = self.loop.run_until_complete(inner())
self.assertEqual(['ok1', 'ok2'], result)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -77,9 +77,8 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
def test_add_signal_handler_coroutine_error(self, m_signal): def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG m_signal.NSIG = signal.NSIG
@asyncio.coroutine async def simple_coroutine():
def simple_coroutine(): pass
yield from []
# callback must not be a coroutine function # callback must not be a coroutine function
coro_func = simple_coroutine coro_func = simple_coroutine

View File

@ -56,14 +56,14 @@ class ProactorTests(test_utils.TestCase):
res = self.loop.run_until_complete(self._test_pipe()) res = self.loop.run_until_complete(self._test_pipe())
self.assertEqual(res, 'done') self.assertEqual(res, 'done')
def _test_pipe(self): async def _test_pipe(self):
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid() ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
with self.assertRaises(FileNotFoundError): with self.assertRaises(FileNotFoundError):
yield from self.loop.create_pipe_connection( await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS) asyncio.Protocol, ADDRESS)
[server] = yield from self.loop.start_serving_pipe( [server] = await self.loop.start_serving_pipe(
UpperProto, ADDRESS) UpperProto, ADDRESS)
self.assertIsInstance(server, windows_events.PipeServer) self.assertIsInstance(server, windows_events.PipeServer)
@ -72,7 +72,7 @@ class ProactorTests(test_utils.TestCase):
stream_reader = asyncio.StreamReader(loop=self.loop) stream_reader = asyncio.StreamReader(loop=self.loop)
protocol = asyncio.StreamReaderProtocol(stream_reader, protocol = asyncio.StreamReaderProtocol(stream_reader,
loop=self.loop) loop=self.loop)
trans, proto = yield from self.loop.create_pipe_connection( trans, proto = await self.loop.create_pipe_connection(
lambda: protocol, ADDRESS) lambda: protocol, ADDRESS)
self.assertIsInstance(trans, asyncio.Transport) self.assertIsInstance(trans, asyncio.Transport)
self.assertEqual(protocol, proto) self.assertEqual(protocol, proto)
@ -82,14 +82,14 @@ class ProactorTests(test_utils.TestCase):
w.write('lower-{}\n'.format(i).encode()) w.write('lower-{}\n'.format(i).encode())
for i, (r, w) in enumerate(clients): for i, (r, w) in enumerate(clients):
response = yield from r.readline() response = await r.readline()
self.assertEqual(response, 'LOWER-{}\n'.format(i).encode()) self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
w.close() w.close()
server.close() server.close()
with self.assertRaises(FileNotFoundError): with self.assertRaises(FileNotFoundError):
yield from self.loop.create_pipe_connection( await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS) asyncio.Protocol, ADDRESS)
return 'done' return 'done'
@ -97,7 +97,8 @@ class ProactorTests(test_utils.TestCase):
def test_connect_pipe_cancel(self): def test_connect_pipe_cancel(self):
exc = OSError() exc = OSError()
exc.winerror = _overlapped.ERROR_PIPE_BUSY exc.winerror = _overlapped.ERROR_PIPE_BUSY
with mock.patch.object(_overlapped, 'ConnectPipe', side_effect=exc) as connect: with mock.patch.object(_overlapped, 'ConnectPipe',
side_effect=exc) as connect:
coro = self.loop._proactor.connect_pipe('pipe_address') coro = self.loop._proactor.connect_pipe('pipe_address')
task = self.loop.create_task(coro) task = self.loop.create_task(coro)

View File

@ -0,0 +1,2 @@
Convert asyncio to use *async/await* syntax. Old styled ``yield from`` is
still supported too.