2013-10-17 17:40:50 -03:00
|
|
|
"""Base implementation of event loop.
|
|
|
|
|
|
|
|
The event loop can be broken up into a multiplexer (the part
|
2014-07-14 13:33:40 -03:00
|
|
|
responsible for notifying us of I/O events) and the event loop proper,
|
2013-10-17 17:40:50 -03:00
|
|
|
which wraps a multiplexer with functionality for scheduling callbacks,
|
|
|
|
immediately or at a given time in the future.
|
|
|
|
|
|
|
|
Whenever a public API takes a callback, subsequent positional
|
|
|
|
arguments will be passed to the callback if/when it is called. This
|
|
|
|
avoids the proliferation of trivial lambdas implementing closures.
|
|
|
|
Keyword arguments for the callback are not supported; this is a
|
|
|
|
conscious design decision, leaving the door open for keyword arguments
|
|
|
|
to modify the meaning of the API call itself.
|
|
|
|
"""
|
|
|
|
|
|
|
|
import collections
|
2017-04-24 03:05:00 -03:00
|
|
|
import collections.abc
|
2013-10-17 17:40:50 -03:00
|
|
|
import concurrent.futures
|
2024-01-22 12:40:35 -04:00
|
|
|
import errno
|
2013-10-17 17:40:50 -03:00
|
|
|
import heapq
|
2015-09-21 13:33:43 -03:00
|
|
|
import itertools
|
2014-06-30 09:39:11 -03:00
|
|
|
import os
|
2013-10-17 17:40:50 -03:00
|
|
|
import socket
|
2019-04-09 10:40:59 -03:00
|
|
|
import stat
|
2013-10-17 17:40:50 -03:00
|
|
|
import subprocess
|
2014-12-26 16:07:52 -04:00
|
|
|
import threading
|
2013-10-17 17:40:50 -03:00
|
|
|
import time
|
2014-06-30 09:39:11 -03:00
|
|
|
import traceback
|
2013-10-17 17:40:50 -03:00
|
|
|
import sys
|
2015-01-29 12:50:58 -04:00
|
|
|
import warnings
|
2016-09-09 02:01:51 -03:00
|
|
|
import weakref
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2017-12-30 01:35:36 -04:00
|
|
|
try:
|
|
|
|
import ssl
|
|
|
|
except ImportError: # pragma: no cover
|
|
|
|
ssl = None
|
|
|
|
|
2018-01-21 10:44:07 -04:00
|
|
|
from . import constants
|
2014-06-28 19:46:45 -03:00
|
|
|
from . import coroutines
|
2013-10-17 17:40:50 -03:00
|
|
|
from . import events
|
2018-09-11 14:13:04 -03:00
|
|
|
from . import exceptions
|
2013-10-17 17:40:50 -03:00
|
|
|
from . import futures
|
2018-01-27 15:22:47 -04:00
|
|
|
from . import protocols
|
2017-12-30 01:35:36 -04:00
|
|
|
from . import sslproto
|
2019-05-05 08:14:35 -03:00
|
|
|
from . import staggered
|
2013-10-17 17:40:50 -03:00
|
|
|
from . import tasks
|
2024-02-18 20:01:00 -04:00
|
|
|
from . import timeouts
|
2018-01-27 15:22:47 -04:00
|
|
|
from . import transports
|
2019-05-27 10:57:20 -03:00
|
|
|
from . import trsock
|
2013-10-17 19:39:45 -03:00
|
|
|
from .log import logger
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
|
2022-03-08 17:07:33 -04:00
|
|
|
__all__ = 'BaseEventLoop','Server',
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
|
2014-09-25 13:07:56 -03:00
|
|
|
# Minimum number of _scheduled timer handles before cleanup of
|
|
|
|
# cancelled handles is performed.
|
|
|
|
_MIN_SCHEDULED_TIMER_HANDLES = 100
|
|
|
|
|
|
|
|
# Minimum fraction of _scheduled timer handles that are cancelled
|
|
|
|
# before cleanup of cancelled handles is performed.
|
|
|
|
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2018-09-12 18:03:54 -03:00
|
|
|
|
2018-06-28 22:59:32 -03:00
|
|
|
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
|
|
|
|
|
2018-07-31 11:06:12 -03:00
|
|
|
# Maximum timeout passed to select to avoid OS limitations
|
|
|
|
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
|
|
|
|
|
2016-04-01 16:43:39 -03:00
|
|
|
|
2014-06-20 12:34:15 -03:00
|
|
|
def _format_handle(handle):
|
|
|
|
cb = handle._callback
|
2016-10-28 13:52:37 -03:00
|
|
|
if isinstance(getattr(cb, '__self__', None), tasks.Task):
|
2014-06-20 12:34:15 -03:00
|
|
|
# format the task
|
|
|
|
return repr(cb.__self__)
|
|
|
|
else:
|
|
|
|
return str(handle)
|
|
|
|
|
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
def _format_pipe(fd):
|
|
|
|
if fd == subprocess.PIPE:
|
|
|
|
return '<pipe>'
|
|
|
|
elif fd == subprocess.STDOUT:
|
|
|
|
return '<stdout>'
|
|
|
|
else:
|
|
|
|
return repr(fd)
|
|
|
|
|
|
|
|
|
2016-09-15 16:45:07 -03:00
|
|
|
def _set_reuseport(sock):
|
|
|
|
if not hasattr(socket, 'SO_REUSEPORT'):
|
|
|
|
raise ValueError('reuse_port not supported by socket module')
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
|
|
|
except OSError:
|
|
|
|
raise ValueError('reuse_port not supported by socket module, '
|
|
|
|
'SO_REUSEPORT defined but not implemented.')
|
|
|
|
|
|
|
|
|
2019-05-17 05:28:39 -03:00
|
|
|
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
|
2016-06-08 13:33:31 -03:00
|
|
|
# Try to skip getaddrinfo if "host" is already an IP. Users might have
|
|
|
|
# handled name resolution in their own code and pass in resolved IPs.
|
|
|
|
if not hasattr(socket, 'inet_pton'):
|
|
|
|
return
|
|
|
|
|
|
|
|
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
|
|
|
|
host is None:
|
2015-12-16 20:31:17 -04:00
|
|
|
return None
|
|
|
|
|
2017-12-19 07:44:37 -04:00
|
|
|
if type == socket.SOCK_STREAM:
|
2015-12-16 20:31:17 -04:00
|
|
|
proto = socket.IPPROTO_TCP
|
2017-12-19 07:44:37 -04:00
|
|
|
elif type == socket.SOCK_DGRAM:
|
2015-12-16 20:31:17 -04:00
|
|
|
proto = socket.IPPROTO_UDP
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
2016-06-02 17:51:07 -03:00
|
|
|
if port is None:
|
2016-05-20 18:44:19 -03:00
|
|
|
port = 0
|
2016-09-30 12:17:15 -03:00
|
|
|
elif isinstance(port, bytes) and port == b'':
|
|
|
|
port = 0
|
|
|
|
elif isinstance(port, str) and port == '':
|
|
|
|
port = 0
|
|
|
|
else:
|
|
|
|
# If port's a service name like "http", don't skip getaddrinfo.
|
|
|
|
try:
|
|
|
|
port = int(port)
|
|
|
|
except (TypeError, ValueError):
|
|
|
|
return None
|
2016-05-20 18:44:19 -03:00
|
|
|
|
2016-06-08 13:33:31 -03:00
|
|
|
if family == socket.AF_UNSPEC:
|
2016-11-09 16:47:00 -04:00
|
|
|
afs = [socket.AF_INET]
|
2018-06-28 22:59:32 -03:00
|
|
|
if _HAS_IPv6:
|
2016-11-09 16:47:00 -04:00
|
|
|
afs.append(socket.AF_INET6)
|
2016-06-08 13:33:31 -03:00
|
|
|
else:
|
|
|
|
afs = [family]
|
2015-12-16 20:31:17 -04:00
|
|
|
|
2016-06-08 13:33:31 -03:00
|
|
|
if isinstance(host, bytes):
|
|
|
|
host = host.decode('idna')
|
|
|
|
if '%' in host:
|
|
|
|
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
|
|
|
|
# like '::1%lo0'.
|
2015-12-16 20:31:17 -04:00
|
|
|
return None
|
|
|
|
|
2016-06-08 13:33:31 -03:00
|
|
|
for af in afs:
|
2015-12-16 20:31:17 -04:00
|
|
|
try:
|
2016-06-08 13:33:31 -03:00
|
|
|
socket.inet_pton(af, host)
|
|
|
|
# The host has already been resolved.
|
2018-06-28 22:59:32 -03:00
|
|
|
if _HAS_IPv6 and af == socket.AF_INET6:
|
2019-05-17 05:28:39 -03:00
|
|
|
return af, type, proto, '', (host, port, flowinfo, scopeid)
|
2018-06-28 22:59:32 -03:00
|
|
|
else:
|
|
|
|
return af, type, proto, '', (host, port)
|
2016-06-08 13:33:31 -03:00
|
|
|
except OSError:
|
|
|
|
pass
|
2015-12-16 20:31:17 -04:00
|
|
|
|
2016-06-08 13:33:31 -03:00
|
|
|
# "host" is not an IP address.
|
|
|
|
return None
|
2015-12-16 20:31:17 -04:00
|
|
|
|
|
|
|
|
2019-05-05 08:14:35 -03:00
|
|
|
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
|
|
|
|
"""Interleave list of addrinfo tuples by family."""
|
|
|
|
# Group addresses by family
|
|
|
|
addrinfos_by_family = collections.OrderedDict()
|
|
|
|
for addr in addrinfos:
|
|
|
|
family = addr[0]
|
|
|
|
if family not in addrinfos_by_family:
|
|
|
|
addrinfos_by_family[family] = []
|
|
|
|
addrinfos_by_family[family].append(addr)
|
|
|
|
addrinfos_lists = list(addrinfos_by_family.values())
|
|
|
|
|
|
|
|
reordered = []
|
|
|
|
if first_address_family_count > 1:
|
|
|
|
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
|
|
|
|
del addrinfos_lists[0][:first_address_family_count - 1]
|
|
|
|
reordered.extend(
|
|
|
|
a for a in itertools.chain.from_iterable(
|
|
|
|
itertools.zip_longest(*addrinfos_lists)
|
|
|
|
) if a is not None)
|
|
|
|
return reordered
|
|
|
|
|
|
|
|
|
2014-12-04 20:44:10 -04:00
|
|
|
def _run_until_complete_cb(fut):
|
2017-12-19 08:19:53 -04:00
|
|
|
if not fut.cancelled():
|
|
|
|
exc = fut.exception()
|
2019-05-27 09:45:12 -03:00
|
|
|
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
|
2017-12-19 08:19:53 -04:00
|
|
|
# Issue #22429: run_forever() already finished, no need to
|
|
|
|
# stop it.
|
|
|
|
return
|
2017-12-23 16:04:15 -04:00
|
|
|
futures._get_loop(fut).stop()
|
2014-12-04 20:44:10 -04:00
|
|
|
|
|
|
|
|
2018-12-03 15:08:13 -04:00
|
|
|
if hasattr(socket, 'TCP_NODELAY'):
|
|
|
|
def _set_nodelay(sock):
|
|
|
|
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
|
|
|
|
sock.type == socket.SOCK_STREAM and
|
|
|
|
sock.proto == socket.IPPROTO_TCP):
|
|
|
|
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
|
|
|
else:
|
|
|
|
def _set_nodelay(sock):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2022-02-20 08:17:15 -04:00
|
|
|
def _check_ssl_socket(sock):
|
|
|
|
if ssl is not None and isinstance(sock, ssl.SSLSocket):
|
|
|
|
raise TypeError("Socket cannot be of type SSLSocket")
|
|
|
|
|
|
|
|
|
2018-01-27 15:22:47 -04:00
|
|
|
class _SendfileFallbackProtocol(protocols.Protocol):
|
|
|
|
def __init__(self, transp):
|
|
|
|
if not isinstance(transp, transports._FlowControlMixin):
|
|
|
|
raise TypeError("transport should be _FlowControlMixin instance")
|
|
|
|
self._transport = transp
|
|
|
|
self._proto = transp.get_protocol()
|
|
|
|
self._should_resume_reading = transp.is_reading()
|
|
|
|
self._should_resume_writing = transp._protocol_paused
|
|
|
|
transp.pause_reading()
|
|
|
|
transp.set_protocol(self)
|
|
|
|
if self._should_resume_writing:
|
|
|
|
self._write_ready_fut = self._transport._loop.create_future()
|
|
|
|
else:
|
|
|
|
self._write_ready_fut = None
|
|
|
|
|
|
|
|
async def drain(self):
|
|
|
|
if self._transport.is_closing():
|
|
|
|
raise ConnectionError("Connection closed by peer")
|
|
|
|
fut = self._write_ready_fut
|
|
|
|
if fut is None:
|
|
|
|
return
|
|
|
|
await fut
|
|
|
|
|
|
|
|
def connection_made(self, transport):
|
|
|
|
raise RuntimeError("Invalid state: "
|
|
|
|
"connection should have been established already.")
|
|
|
|
|
|
|
|
def connection_lost(self, exc):
|
|
|
|
if self._write_ready_fut is not None:
|
|
|
|
# Never happens if peer disconnects after sending the whole content
|
|
|
|
# Thus disconnection is always an exception from user perspective
|
|
|
|
if exc is None:
|
|
|
|
self._write_ready_fut.set_exception(
|
|
|
|
ConnectionError("Connection is closed by peer"))
|
|
|
|
else:
|
|
|
|
self._write_ready_fut.set_exception(exc)
|
|
|
|
self._proto.connection_lost(exc)
|
|
|
|
|
|
|
|
def pause_writing(self):
|
|
|
|
if self._write_ready_fut is not None:
|
|
|
|
return
|
|
|
|
self._write_ready_fut = self._transport._loop.create_future()
|
|
|
|
|
|
|
|
def resume_writing(self):
|
|
|
|
if self._write_ready_fut is None:
|
|
|
|
return
|
|
|
|
self._write_ready_fut.set_result(False)
|
|
|
|
self._write_ready_fut = None
|
|
|
|
|
|
|
|
def data_received(self, data):
|
|
|
|
raise RuntimeError("Invalid state: reading should be paused")
|
|
|
|
|
|
|
|
def eof_received(self):
|
|
|
|
raise RuntimeError("Invalid state: reading should be paused")
|
|
|
|
|
|
|
|
async def restore(self):
|
|
|
|
self._transport.set_protocol(self._proto)
|
|
|
|
if self._should_resume_reading:
|
|
|
|
self._transport.resume_reading()
|
|
|
|
if self._write_ready_fut is not None:
|
|
|
|
# Cancel the future.
|
|
|
|
# Basically it has no effect because protocol is switched back,
|
|
|
|
# no code should wait for it anymore.
|
|
|
|
self._write_ready_fut.cancel()
|
|
|
|
if self._should_resume_writing:
|
|
|
|
self._proto.resume_writing()
|
|
|
|
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
class Server(events.AbstractServer):
|
|
|
|
|
2018-01-25 19:08:09 -04:00
|
|
|
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_handshake_timeout, ssl_shutdown_timeout=None):
|
2014-07-11 17:52:21 -03:00
|
|
|
self._loop = loop
|
2018-01-25 19:08:09 -04:00
|
|
|
self._sockets = sockets
|
2024-03-18 17:15:53 -03:00
|
|
|
# Weak references so we don't break Transport's ability to
|
|
|
|
# detect abandoned transports
|
|
|
|
self._clients = weakref.WeakSet()
|
2014-07-11 17:52:21 -03:00
|
|
|
self._waiters = []
|
2018-01-25 19:08:09 -04:00
|
|
|
self._protocol_factory = protocol_factory
|
|
|
|
self._backlog = backlog
|
|
|
|
self._ssl_context = ssl_context
|
|
|
|
self._ssl_handshake_timeout = ssl_handshake_timeout
|
2022-02-15 09:04:00 -04:00
|
|
|
self._ssl_shutdown_timeout = ssl_shutdown_timeout
|
2018-01-25 19:08:09 -04:00
|
|
|
self._serving = False
|
|
|
|
self._serving_forever_fut = None
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2014-07-11 22:11:53 -03:00
|
|
|
def __repr__(self):
|
2017-12-10 19:36:12 -04:00
|
|
|
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
|
2014-07-11 22:11:53 -03:00
|
|
|
|
2024-03-18 17:15:53 -03:00
|
|
|
def _attach(self, transport):
|
2018-01-25 19:08:09 -04:00
|
|
|
assert self._sockets is not None
|
2024-03-18 17:15:53 -03:00
|
|
|
self._clients.add(transport)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2024-03-18 17:15:53 -03:00
|
|
|
def _detach(self, transport):
|
|
|
|
self._clients.discard(transport)
|
|
|
|
if len(self._clients) == 0 and self._sockets is None:
|
2013-10-17 17:40:50 -03:00
|
|
|
self._wakeup()
|
|
|
|
|
2018-01-25 19:08:09 -04:00
|
|
|
def _wakeup(self):
|
|
|
|
waiters = self._waiters
|
|
|
|
self._waiters = None
|
|
|
|
for waiter in waiters:
|
|
|
|
if not waiter.done():
|
2023-10-28 15:04:29 -03:00
|
|
|
waiter.set_result(None)
|
2018-01-25 19:08:09 -04:00
|
|
|
|
|
|
|
def _start_serving(self):
|
|
|
|
if self._serving:
|
|
|
|
return
|
|
|
|
self._serving = True
|
|
|
|
for sock in self._sockets:
|
|
|
|
sock.listen(self._backlog)
|
|
|
|
self._loop._start_serving(
|
|
|
|
self._protocol_factory, sock, self._ssl_context,
|
2022-02-15 09:04:00 -04:00
|
|
|
self, self._backlog, self._ssl_handshake_timeout,
|
|
|
|
self._ssl_shutdown_timeout)
|
2018-01-25 19:08:09 -04:00
|
|
|
|
|
|
|
def get_loop(self):
|
|
|
|
return self._loop
|
|
|
|
|
|
|
|
def is_serving(self):
|
|
|
|
return self._serving
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sockets(self):
|
|
|
|
if self._sockets is None:
|
2019-05-27 10:57:20 -03:00
|
|
|
return ()
|
|
|
|
return tuple(trsock.TransportSocket(s) for s in self._sockets)
|
2018-01-25 19:08:09 -04:00
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
def close(self):
|
2018-01-25 19:08:09 -04:00
|
|
|
sockets = self._sockets
|
2014-07-11 17:52:21 -03:00
|
|
|
if sockets is None:
|
|
|
|
return
|
2018-01-25 19:08:09 -04:00
|
|
|
self._sockets = None
|
|
|
|
|
2014-07-11 17:52:21 -03:00
|
|
|
for sock in sockets:
|
|
|
|
self._loop._stop_serving(sock)
|
2018-01-25 19:08:09 -04:00
|
|
|
|
|
|
|
self._serving = False
|
|
|
|
|
|
|
|
if (self._serving_forever_fut is not None and
|
|
|
|
not self._serving_forever_fut.done()):
|
|
|
|
self._serving_forever_fut.cancel()
|
|
|
|
self._serving_forever_fut = None
|
|
|
|
|
2024-03-18 17:15:53 -03:00
|
|
|
if len(self._clients) == 0:
|
2014-07-11 17:52:21 -03:00
|
|
|
self._wakeup()
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2024-03-18 17:15:53 -03:00
|
|
|
def close_clients(self):
|
|
|
|
for transport in self._clients.copy():
|
|
|
|
transport.close()
|
|
|
|
|
|
|
|
def abort_clients(self):
|
|
|
|
for transport in self._clients.copy():
|
|
|
|
transport.abort()
|
|
|
|
|
2018-01-25 19:08:09 -04:00
|
|
|
async def start_serving(self):
|
|
|
|
self._start_serving()
|
2018-05-28 15:31:28 -03:00
|
|
|
# Skip one loop iteration so that all 'loop.add_reader'
|
|
|
|
# go through.
|
2020-11-28 04:21:17 -04:00
|
|
|
await tasks.sleep(0)
|
2017-12-30 11:09:32 -04:00
|
|
|
|
2018-01-25 19:08:09 -04:00
|
|
|
async def serve_forever(self):
|
|
|
|
if self._serving_forever_fut is not None:
|
|
|
|
raise RuntimeError(
|
|
|
|
f'server {self!r} is already being awaited on serve_forever()')
|
|
|
|
if self._sockets is None:
|
|
|
|
raise RuntimeError(f'server {self!r} is closed')
|
|
|
|
|
|
|
|
self._start_serving()
|
|
|
|
self._serving_forever_fut = self._loop.create_future()
|
|
|
|
|
|
|
|
try:
|
|
|
|
await self._serving_forever_fut
|
2018-09-11 14:13:04 -03:00
|
|
|
except exceptions.CancelledError:
|
2018-01-25 19:08:09 -04:00
|
|
|
try:
|
|
|
|
self.close()
|
|
|
|
await self.wait_closed()
|
|
|
|
finally:
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
self._serving_forever_fut = None
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def wait_closed(self):
|
2023-10-28 15:04:29 -03:00
|
|
|
"""Wait until server is closed and all connections are dropped.
|
|
|
|
|
|
|
|
- If the server is not closed, wait.
|
|
|
|
- If it is closed, but there are still active connections, wait.
|
|
|
|
|
|
|
|
Anyone waiting here will be unblocked once both conditions
|
|
|
|
(server is closed and all connections have been dropped)
|
|
|
|
have become true, in either order.
|
|
|
|
|
|
|
|
Historical note: In 3.11 and before, this was broken, returning
|
|
|
|
immediately if the server was already closed, even if there
|
|
|
|
were still active connections. An attempted fix in 3.12.0 was
|
|
|
|
still broken, returning immediately if the server was still
|
|
|
|
open and there were no active connections. Hopefully in 3.12.1
|
|
|
|
we have it right.
|
|
|
|
"""
|
|
|
|
# Waiters are unblocked by self._wakeup(), which is called
|
|
|
|
# from two places: self.close() and self._detach(), but only
|
|
|
|
# when both conditions have become true. To signal that this
|
|
|
|
# has happened, self._wakeup() sets self._waiters to None.
|
|
|
|
if self._waiters is None:
|
2013-10-17 17:40:50 -03:00
|
|
|
return
|
2016-05-16 16:38:39 -03:00
|
|
|
waiter = self._loop.create_future()
|
2014-07-11 17:52:21 -03:00
|
|
|
self._waiters.append(waiter)
|
2017-12-08 18:23:48 -04:00
|
|
|
await waiter
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
|
|
|
|
class BaseEventLoop(events.AbstractEventLoop):
|
|
|
|
|
|
|
|
def __init__(self):
|
2014-09-25 13:07:56 -03:00
|
|
|
self._timer_cancelled_count = 0
|
2014-06-10 05:23:10 -03:00
|
|
|
self._closed = False
|
2015-11-19 17:28:47 -04:00
|
|
|
self._stopping = False
|
2013-10-17 17:40:50 -03:00
|
|
|
self._ready = collections.deque()
|
|
|
|
self._scheduled = []
|
|
|
|
self._default_executor = None
|
|
|
|
self._internal_fds = 0
|
2014-12-26 16:07:52 -04:00
|
|
|
# Identifier of the thread running the event loop, or None if the
|
|
|
|
# event loop is not running
|
2015-02-05 06:45:33 -04:00
|
|
|
self._thread_id = None
|
2014-02-10 18:42:32 -04:00
|
|
|
self._clock_resolution = time.get_clock_info('monotonic').resolution
|
2014-02-18 19:02:19 -04:00
|
|
|
self._exception_handler = None
|
2017-11-20 11:14:07 -04:00
|
|
|
self.set_debug(coroutines._is_debug_mode())
|
2023-10-13 11:12:32 -03:00
|
|
|
# The preserved state of async generator hooks.
|
|
|
|
self._old_agen_hooks = None
|
2014-06-20 12:34:15 -03:00
|
|
|
# In debug mode, if the execution of a callback or a step of a task
|
|
|
|
# exceed this duration in seconds, the slow callback/task is logged.
|
|
|
|
self.slow_callback_duration = 0.1
|
2015-01-26 06:05:12 -04:00
|
|
|
self._current_handle = None
|
2015-05-11 15:23:38 -03:00
|
|
|
self._task_factory = None
|
2018-01-21 10:44:07 -04:00
|
|
|
self._coroutine_origin_tracking_enabled = False
|
|
|
|
self._coroutine_origin_tracking_saved_depth = None
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2018-01-21 15:56:59 -04:00
|
|
|
# A weak set of all asynchronous generators that are
|
|
|
|
# being iterated by the loop.
|
|
|
|
self._asyncgens = weakref.WeakSet()
|
2016-09-09 02:01:51 -03:00
|
|
|
# Set to True when `loop.shutdown_asyncgens` is called.
|
|
|
|
self._asyncgens_shutdown_called = False
|
2019-09-19 09:47:22 -03:00
|
|
|
# Set to True when `loop.shutdown_default_executor` is called.
|
|
|
|
self._executor_shutdown_called = False
|
2016-09-09 02:01:51 -03:00
|
|
|
|
2014-06-10 05:23:10 -03:00
|
|
|
def __repr__(self):
|
2017-12-10 19:36:12 -04:00
|
|
|
return (
|
|
|
|
f'<{self.__class__.__name__} running={self.is_running()} '
|
|
|
|
f'closed={self.is_closed()} debug={self.get_debug()}>'
|
|
|
|
)
|
2014-06-10 05:23:10 -03:00
|
|
|
|
2016-05-16 16:38:39 -03:00
|
|
|
def create_future(self):
|
|
|
|
"""Create a Future object attached to the loop."""
|
|
|
|
return futures.Future(loop=self)
|
|
|
|
|
2022-03-14 08:54:13 -03:00
|
|
|
def create_task(self, coro, *, name=None, context=None):
|
2014-07-08 06:29:25 -03:00
|
|
|
"""Schedule a coroutine object.
|
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
Return a task object.
|
|
|
|
"""
|
2014-12-04 18:07:47 -04:00
|
|
|
self._check_closed()
|
2015-05-11 15:23:38 -03:00
|
|
|
if self._task_factory is None:
|
2022-03-14 08:54:13 -03:00
|
|
|
task = tasks.Task(coro, loop=self, name=name, context=context)
|
2015-05-11 15:23:38 -03:00
|
|
|
if task._source_traceback:
|
|
|
|
del task._source_traceback[-1]
|
|
|
|
else:
|
2022-03-14 08:54:13 -03:00
|
|
|
if context is None:
|
|
|
|
# Use legacy API if context is not needed
|
|
|
|
task = self._task_factory(self, coro)
|
|
|
|
else:
|
|
|
|
task = self._task_factory(self, coro, context=context)
|
|
|
|
|
2023-06-13 03:06:40 -03:00
|
|
|
task.set_name(name)
|
2018-08-08 18:06:47 -03:00
|
|
|
|
2014-07-10 19:21:27 -03:00
|
|
|
return task
|
2014-07-08 06:29:25 -03:00
|
|
|
|
2015-05-11 15:23:38 -03:00
|
|
|
def set_task_factory(self, factory):
|
|
|
|
"""Set a task factory that will be used by loop.create_task().
|
|
|
|
|
|
|
|
If factory is None the default task factory will be set.
|
|
|
|
|
|
|
|
If factory is a callable, it should have a signature matching
|
|
|
|
'(loop, coro)', where 'loop' will be a reference to the active
|
|
|
|
event loop, 'coro' will be a coroutine object. The callable
|
|
|
|
must return a Future.
|
|
|
|
"""
|
|
|
|
if factory is not None and not callable(factory):
|
|
|
|
raise TypeError('task factory must be a callable or None')
|
|
|
|
self._task_factory = factory
|
|
|
|
|
|
|
|
def get_task_factory(self):
|
|
|
|
"""Return a task factory, or None if the default one is in use."""
|
|
|
|
return self._task_factory
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
def _make_socket_transport(self, sock, protocol, waiter=None, *,
|
|
|
|
extra=None, server=None):
|
|
|
|
"""Create socket transport."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2017-12-19 15:45:42 -04:00
|
|
|
def _make_ssl_transport(
|
|
|
|
self, rawsock, protocol, sslcontext, waiter=None,
|
|
|
|
*, server_side=False, server_hostname=None,
|
|
|
|
extra=None, server=None,
|
2017-12-30 01:35:36 -04:00
|
|
|
ssl_handshake_timeout=None,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_shutdown_timeout=None,
|
2017-12-30 01:35:36 -04:00
|
|
|
call_connection_made=True):
|
2013-10-17 17:40:50 -03:00
|
|
|
"""Create SSL transport."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def _make_datagram_transport(self, sock, protocol,
|
2014-07-08 18:57:31 -03:00
|
|
|
address=None, waiter=None, extra=None):
|
2013-10-17 17:40:50 -03:00
|
|
|
"""Create datagram transport."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
|
|
|
|
extra=None):
|
|
|
|
"""Create read pipe transport."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
|
|
|
|
extra=None):
|
|
|
|
"""Create write pipe transport."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def _make_subprocess_transport(self, protocol, args, shell,
|
|
|
|
stdin, stdout, stderr, bufsize,
|
|
|
|
extra=None, **kwargs):
|
2013-10-17 17:40:50 -03:00
|
|
|
"""Create subprocess transport."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def _write_to_self(self):
|
2014-07-14 13:33:40 -03:00
|
|
|
"""Write a byte to self-pipe, to wake up the event loop.
|
|
|
|
|
|
|
|
This may be called from a different thread.
|
|
|
|
|
|
|
|
The subclass is responsible for implementing the self-pipe.
|
|
|
|
"""
|
2013-10-17 17:40:50 -03:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def _process_events(self, event_list):
|
|
|
|
"""Process selector events."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2014-06-10 05:23:10 -03:00
|
|
|
def _check_closed(self):
|
|
|
|
if self._closed:
|
|
|
|
raise RuntimeError('Event loop is closed')
|
|
|
|
|
2019-09-19 09:47:22 -03:00
|
|
|
def _check_default_executor(self):
|
|
|
|
if self._executor_shutdown_called:
|
|
|
|
raise RuntimeError('Executor shutdown has been called')
|
|
|
|
|
2016-09-09 02:01:51 -03:00
|
|
|
def _asyncgen_finalizer_hook(self, agen):
|
|
|
|
self._asyncgens.discard(agen)
|
|
|
|
if not self.is_closed():
|
2018-10-09 12:30:21 -03:00
|
|
|
self.call_soon_threadsafe(self.create_task, agen.aclose())
|
2016-09-09 02:01:51 -03:00
|
|
|
|
|
|
|
def _asyncgen_firstiter_hook(self, agen):
|
|
|
|
if self._asyncgens_shutdown_called:
|
|
|
|
warnings.warn(
|
2017-12-10 19:36:12 -04:00
|
|
|
f"asynchronous generator {agen!r} was scheduled after "
|
|
|
|
f"loop.shutdown_asyncgens() call",
|
2016-09-09 02:01:51 -03:00
|
|
|
ResourceWarning, source=self)
|
|
|
|
|
|
|
|
self._asyncgens.add(agen)
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def shutdown_asyncgens(self):
|
2016-09-09 02:01:51 -03:00
|
|
|
"""Shutdown all active asynchronous generators."""
|
|
|
|
self._asyncgens_shutdown_called = True
|
|
|
|
|
2018-01-21 15:56:59 -04:00
|
|
|
if not len(self._asyncgens):
|
2016-09-15 14:24:03 -03:00
|
|
|
# If Python version is <3.6 or we don't have any asynchronous
|
|
|
|
# generators alive.
|
2016-09-09 02:01:51 -03:00
|
|
|
return
|
|
|
|
|
|
|
|
closing_agens = list(self._asyncgens)
|
|
|
|
self._asyncgens.clear()
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
results = await tasks.gather(
|
2016-09-09 02:01:51 -03:00
|
|
|
*[ag.aclose() for ag in closing_agens],
|
2020-11-28 04:21:17 -04:00
|
|
|
return_exceptions=True)
|
2016-09-09 02:01:51 -03:00
|
|
|
|
|
|
|
for result, agen in zip(results, closing_agens):
|
|
|
|
if isinstance(result, Exception):
|
|
|
|
self.call_exception_handler({
|
2017-12-10 19:36:12 -04:00
|
|
|
'message': f'an error occurred during closing of '
|
|
|
|
f'asynchronous generator {agen!r}',
|
2016-09-09 02:01:51 -03:00
|
|
|
'exception': result,
|
|
|
|
'asyncgen': agen
|
|
|
|
})
|
|
|
|
|
2022-09-28 14:39:42 -03:00
|
|
|
async def shutdown_default_executor(self, timeout=None):
|
|
|
|
"""Schedule the shutdown of the default executor.
|
|
|
|
|
|
|
|
The timeout parameter specifies the amount of time the executor will
|
|
|
|
be given to finish joining. The default value is None, which means
|
|
|
|
that the executor will be given an unlimited amount of time.
|
|
|
|
"""
|
2019-09-19 09:47:22 -03:00
|
|
|
self._executor_shutdown_called = True
|
|
|
|
if self._default_executor is None:
|
|
|
|
return
|
|
|
|
future = self.create_future()
|
|
|
|
thread = threading.Thread(target=self._do_shutdown, args=(future,))
|
|
|
|
thread.start()
|
|
|
|
try:
|
2024-02-18 20:01:00 -04:00
|
|
|
async with timeouts.timeout(timeout):
|
|
|
|
await future
|
|
|
|
except TimeoutError:
|
2022-09-28 14:39:42 -03:00
|
|
|
warnings.warn("The executor did not finishing joining "
|
2024-02-18 20:01:00 -04:00
|
|
|
f"its threads within {timeout} seconds.",
|
|
|
|
RuntimeWarning, stacklevel=2)
|
2022-09-28 14:39:42 -03:00
|
|
|
self._default_executor.shutdown(wait=False)
|
2024-02-18 20:01:00 -04:00
|
|
|
else:
|
|
|
|
thread.join()
|
2019-09-19 09:47:22 -03:00
|
|
|
|
|
|
|
def _do_shutdown(self, future):
|
|
|
|
try:
|
|
|
|
self._default_executor.shutdown(wait=True)
|
2022-09-30 16:55:40 -03:00
|
|
|
if not self.is_closed():
|
2024-02-18 20:01:00 -04:00
|
|
|
self.call_soon_threadsafe(futures._set_result_unless_cancelled,
|
|
|
|
future, None)
|
2019-09-19 09:47:22 -03:00
|
|
|
except Exception as ex:
|
2024-02-18 20:01:00 -04:00
|
|
|
if not self.is_closed() and not future.cancelled():
|
2022-09-30 16:55:40 -03:00
|
|
|
self.call_soon_threadsafe(future.set_exception, ex)
|
2019-09-19 09:47:22 -03:00
|
|
|
|
2020-01-07 09:23:01 -04:00
|
|
|
def _check_running(self):
|
2014-12-26 16:07:52 -04:00
|
|
|
if self.is_running():
|
2016-11-04 15:29:28 -03:00
|
|
|
raise RuntimeError('This event loop is already running')
|
|
|
|
if events._get_running_loop() is not None:
|
|
|
|
raise RuntimeError(
|
|
|
|
'Cannot run the event loop while another loop is running')
|
2020-01-04 05:10:14 -04:00
|
|
|
|
2023-10-13 11:12:32 -03:00
|
|
|
def _run_forever_setup(self):
|
|
|
|
"""Prepare the run loop to process events.
|
|
|
|
|
|
|
|
This method exists so that custom custom event loop subclasses (e.g., event loops
|
|
|
|
that integrate a GUI event loop with Python's event loop) have access to all the
|
|
|
|
loop setup logic.
|
|
|
|
"""
|
2020-01-04 05:10:14 -04:00
|
|
|
self._check_closed()
|
2020-01-07 09:23:01 -04:00
|
|
|
self._check_running()
|
2018-01-21 10:44:07 -04:00
|
|
|
self._set_coroutine_origin_tracking(self._debug)
|
2018-01-21 15:56:59 -04:00
|
|
|
|
2023-10-13 11:12:32 -03:00
|
|
|
self._old_agen_hooks = sys.get_asyncgen_hooks()
|
|
|
|
self._thread_id = threading.get_ident()
|
|
|
|
sys.set_asyncgen_hooks(
|
|
|
|
firstiter=self._asyncgen_firstiter_hook,
|
|
|
|
finalizer=self._asyncgen_finalizer_hook
|
|
|
|
)
|
|
|
|
|
|
|
|
events._set_running_loop(self)
|
|
|
|
|
|
|
|
def _run_forever_cleanup(self):
|
|
|
|
"""Clean up after an event loop finishes the looping over events.
|
2022-10-03 17:34:35 -03:00
|
|
|
|
2023-10-13 11:12:32 -03:00
|
|
|
This method exists so that custom custom event loop subclasses (e.g., event loops
|
|
|
|
that integrate a GUI event loop with Python's event loop) have access to all the
|
|
|
|
loop cleanup logic.
|
|
|
|
"""
|
|
|
|
self._stopping = False
|
|
|
|
self._thread_id = None
|
|
|
|
events._set_running_loop(None)
|
|
|
|
self._set_coroutine_origin_tracking(False)
|
|
|
|
# Restore any pre-existing async generator hooks.
|
|
|
|
if self._old_agen_hooks is not None:
|
|
|
|
sys.set_asyncgen_hooks(*self._old_agen_hooks)
|
|
|
|
self._old_agen_hooks = None
|
|
|
|
|
|
|
|
def run_forever(self):
|
|
|
|
"""Run until stop() is called."""
|
|
|
|
try:
|
|
|
|
self._run_forever_setup()
|
2013-10-17 17:40:50 -03:00
|
|
|
while True:
|
2015-11-19 17:28:47 -04:00
|
|
|
self._run_once()
|
|
|
|
if self._stopping:
|
2013-10-17 17:40:50 -03:00
|
|
|
break
|
|
|
|
finally:
|
2023-10-13 11:12:32 -03:00
|
|
|
self._run_forever_cleanup()
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
def run_until_complete(self, future):
|
|
|
|
"""Run until the Future is done.
|
|
|
|
|
|
|
|
If the argument is a coroutine, it is wrapped in a Task.
|
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
WARNING: It would be disastrous to call run_until_complete()
|
2013-10-17 17:40:50 -03:00
|
|
|
with the same coroutine twice -- it would wrap it in two
|
|
|
|
different Tasks and that can't be good.
|
|
|
|
|
|
|
|
Return the Future's result, or raise its exception.
|
|
|
|
"""
|
2014-06-10 05:23:10 -03:00
|
|
|
self._check_closed()
|
2020-01-07 09:23:01 -04:00
|
|
|
self._check_running()
|
2014-06-30 09:51:04 -03:00
|
|
|
|
2016-09-09 18:26:31 -03:00
|
|
|
new_task = not futures.isfuture(future)
|
2015-05-11 15:48:38 -03:00
|
|
|
future = tasks.ensure_future(future, loop=self)
|
2014-06-30 09:51:04 -03:00
|
|
|
if new_task:
|
|
|
|
# An exception is raised if the future didn't complete, so there
|
|
|
|
# is no need to log the "destroy pending task" message
|
|
|
|
future._log_destroy_pending = False
|
|
|
|
|
2014-12-04 20:44:10 -04:00
|
|
|
future.add_done_callback(_run_until_complete_cb)
|
2014-10-11 09:30:18 -03:00
|
|
|
try:
|
|
|
|
self.run_forever()
|
|
|
|
except:
|
|
|
|
if new_task and future.done() and not future.cancelled():
|
|
|
|
# The coroutine raised a BaseException. Consume the exception
|
|
|
|
# to not log a warning, the caller doesn't have access to the
|
|
|
|
# local task.
|
|
|
|
future.exception()
|
|
|
|
raise
|
2017-05-23 02:32:46 -03:00
|
|
|
finally:
|
|
|
|
future.remove_done_callback(_run_until_complete_cb)
|
2013-10-17 17:40:50 -03:00
|
|
|
if not future.done():
|
|
|
|
raise RuntimeError('Event loop stopped before Future completed.')
|
|
|
|
|
|
|
|
return future.result()
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
"""Stop running the event loop.
|
|
|
|
|
2015-11-19 17:28:47 -04:00
|
|
|
Every callback already scheduled will still run. This simply informs
|
|
|
|
run_forever to stop looping after a complete iteration.
|
2013-10-17 17:40:50 -03:00
|
|
|
"""
|
2015-11-19 17:28:47 -04:00
|
|
|
self._stopping = True
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2013-10-19 19:54:10 -03:00
|
|
|
def close(self):
|
2013-11-01 18:19:04 -03:00
|
|
|
"""Close the event loop.
|
|
|
|
|
|
|
|
This clears the queues and shuts down the executor,
|
|
|
|
but does not wait for the executor to finish.
|
2014-06-22 20:02:37 -03:00
|
|
|
|
|
|
|
The event loop must not be running.
|
2013-11-01 18:19:04 -03:00
|
|
|
"""
|
2014-12-26 16:07:52 -04:00
|
|
|
if self.is_running():
|
2014-07-14 13:33:40 -03:00
|
|
|
raise RuntimeError("Cannot close a running event loop")
|
2014-06-10 05:23:10 -03:00
|
|
|
if self._closed:
|
|
|
|
return
|
2014-07-11 22:11:53 -03:00
|
|
|
if self._debug:
|
|
|
|
logger.debug("Close %r", self)
|
2015-05-12 12:43:04 -03:00
|
|
|
self._closed = True
|
|
|
|
self._ready.clear()
|
|
|
|
self._scheduled.clear()
|
2019-09-19 09:47:22 -03:00
|
|
|
self._executor_shutdown_called = True
|
2015-05-12 12:43:04 -03:00
|
|
|
executor = self._default_executor
|
|
|
|
if executor is not None:
|
|
|
|
self._default_executor = None
|
2019-06-04 08:03:20 -03:00
|
|
|
executor.shutdown(wait=False)
|
2013-10-19 19:54:10 -03:00
|
|
|
|
2014-06-10 05:23:10 -03:00
|
|
|
def is_closed(self):
|
|
|
|
"""Returns True if the event loop was closed."""
|
|
|
|
return self._closed
|
|
|
|
|
2019-01-10 06:24:40 -04:00
|
|
|
def __del__(self, _warn=warnings.warn):
|
2017-04-24 22:57:18 -03:00
|
|
|
if not self.is_closed():
|
2019-01-10 06:24:40 -04:00
|
|
|
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
|
2017-04-24 22:57:18 -03:00
|
|
|
if not self.is_running():
|
|
|
|
self.close()
|
2015-01-29 12:50:58 -04:00
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
def is_running(self):
|
2014-07-14 13:33:40 -03:00
|
|
|
"""Returns True if the event loop is running."""
|
2015-02-05 06:45:33 -04:00
|
|
|
return (self._thread_id is not None)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
def time(self):
|
2014-07-14 13:33:40 -03:00
|
|
|
"""Return the time according to the event loop's clock.
|
|
|
|
|
|
|
|
This is a float expressed in seconds since an epoch, but the
|
|
|
|
epoch, precision, accuracy and drift are unspecified and may
|
|
|
|
differ per event loop.
|
|
|
|
"""
|
2013-10-17 17:40:50 -03:00
|
|
|
return time.monotonic()
|
|
|
|
|
2018-01-22 20:11:18 -04:00
|
|
|
def call_later(self, delay, callback, *args, context=None):
|
2013-10-17 17:40:50 -03:00
|
|
|
"""Arrange for a callback to be called at a given time.
|
|
|
|
|
|
|
|
Return a Handle: an opaque object with a cancel() method that
|
|
|
|
can be used to cancel the call.
|
|
|
|
|
|
|
|
The delay can be an int or float, expressed in seconds. It is
|
2014-07-14 13:33:40 -03:00
|
|
|
always relative to the current time.
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
Each callback will be called exactly once. If two callbacks
|
2023-07-13 04:51:13 -03:00
|
|
|
are scheduled for exactly the same time, it is undefined which
|
2013-10-17 17:40:50 -03:00
|
|
|
will be called first.
|
|
|
|
|
|
|
|
Any positional arguments after the callback will be passed to
|
|
|
|
the callback when it is called.
|
|
|
|
"""
|
2021-12-06 19:40:35 -04:00
|
|
|
if delay is None:
|
|
|
|
raise TypeError('delay must not be None')
|
2018-01-22 20:11:18 -04:00
|
|
|
timer = self.call_at(self.time() + delay, callback, *args,
|
|
|
|
context=context)
|
2014-06-27 08:52:20 -03:00
|
|
|
if timer._source_traceback:
|
|
|
|
del timer._source_traceback[-1]
|
|
|
|
return timer
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2018-01-22 20:11:18 -04:00
|
|
|
def call_at(self, when, callback, *args, context=None):
|
2014-07-14 13:33:40 -03:00
|
|
|
"""Like call_later(), but uses an absolute time.
|
|
|
|
|
|
|
|
Absolute time corresponds to the event loop's time() method.
|
|
|
|
"""
|
2021-12-06 19:40:35 -04:00
|
|
|
if when is None:
|
|
|
|
raise TypeError("when cannot be None")
|
2014-12-04 18:07:47 -04:00
|
|
|
self._check_closed()
|
2014-03-21 06:00:52 -03:00
|
|
|
if self._debug:
|
2014-12-26 16:07:52 -04:00
|
|
|
self._check_thread()
|
2016-11-03 19:09:24 -03:00
|
|
|
self._check_callback(callback, 'call_at')
|
2018-01-22 20:11:18 -04:00
|
|
|
timer = events.TimerHandle(when, callback, args, self, context)
|
2014-06-27 08:52:20 -03:00
|
|
|
if timer._source_traceback:
|
|
|
|
del timer._source_traceback[-1]
|
2013-10-17 17:40:50 -03:00
|
|
|
heapq.heappush(self._scheduled, timer)
|
2014-09-25 13:07:56 -03:00
|
|
|
timer._scheduled = True
|
2013-10-17 17:40:50 -03:00
|
|
|
return timer
|
|
|
|
|
2018-01-22 20:11:18 -04:00
|
|
|
def call_soon(self, callback, *args, context=None):
|
2013-10-17 17:40:50 -03:00
|
|
|
"""Arrange for a callback to be called as soon as possible.
|
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
This operates as a FIFO queue: callbacks are called in the
|
2013-10-17 17:40:50 -03:00
|
|
|
order in which they are registered. Each callback will be
|
|
|
|
called exactly once.
|
|
|
|
|
|
|
|
Any positional arguments after the callback will be passed to
|
|
|
|
the callback when it is called.
|
|
|
|
"""
|
2016-11-03 19:09:24 -03:00
|
|
|
self._check_closed()
|
2014-12-26 16:07:52 -04:00
|
|
|
if self._debug:
|
|
|
|
self._check_thread()
|
2016-11-03 19:09:24 -03:00
|
|
|
self._check_callback(callback, 'call_soon')
|
2018-01-22 20:11:18 -04:00
|
|
|
handle = self._call_soon(callback, args, context)
|
2014-06-27 08:52:20 -03:00
|
|
|
if handle._source_traceback:
|
|
|
|
del handle._source_traceback[-1]
|
|
|
|
return handle
|
2014-03-21 06:00:52 -03:00
|
|
|
|
2016-11-03 19:09:24 -03:00
|
|
|
def _check_callback(self, callback, method):
|
|
|
|
if (coroutines.iscoroutine(callback) or
|
2024-08-11 13:35:51 -03:00
|
|
|
coroutines._iscoroutinefunction(callback)):
|
2016-11-03 19:09:24 -03:00
|
|
|
raise TypeError(
|
2017-12-10 19:36:12 -04:00
|
|
|
f"coroutines cannot be used with {method}()")
|
2016-11-03 19:09:24 -03:00
|
|
|
if not callable(callback):
|
|
|
|
raise TypeError(
|
2017-12-10 19:36:12 -04:00
|
|
|
f'a callable object was expected by {method}(), '
|
|
|
|
f'got {callback!r}')
|
2016-11-03 19:09:24 -03:00
|
|
|
|
2018-01-22 20:11:18 -04:00
|
|
|
def _call_soon(self, callback, args, context):
|
|
|
|
handle = events.Handle(callback, args, self, context)
|
2014-06-27 08:52:20 -03:00
|
|
|
if handle._source_traceback:
|
|
|
|
del handle._source_traceback[-1]
|
2013-10-17 17:40:50 -03:00
|
|
|
self._ready.append(handle)
|
|
|
|
return handle
|
|
|
|
|
2014-12-26 16:07:52 -04:00
|
|
|
def _check_thread(self):
|
|
|
|
"""Check that the current thread is the thread running the event loop.
|
2014-03-21 06:00:52 -03:00
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
Non-thread-safe methods of this class make this assumption and will
|
2014-03-21 06:00:52 -03:00
|
|
|
likely behave incorrectly when the assumption is violated.
|
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
Should only be called when (self._debug == True). The caller is
|
2014-03-21 06:00:52 -03:00
|
|
|
responsible for checking this condition for performance reasons.
|
|
|
|
"""
|
2015-02-05 06:45:33 -04:00
|
|
|
if self._thread_id is None:
|
2014-06-23 10:14:13 -03:00
|
|
|
return
|
2014-12-26 16:07:52 -04:00
|
|
|
thread_id = threading.get_ident()
|
2015-02-05 06:45:33 -04:00
|
|
|
if thread_id != self._thread_id:
|
2014-03-21 06:00:52 -03:00
|
|
|
raise RuntimeError(
|
2014-07-14 13:33:40 -03:00
|
|
|
"Non-thread-safe operation invoked on an event loop other "
|
2014-03-21 06:00:52 -03:00
|
|
|
"than the current one")
|
|
|
|
|
2018-01-22 20:11:18 -04:00
|
|
|
def call_soon_threadsafe(self, callback, *args, context=None):
|
2014-07-14 13:33:40 -03:00
|
|
|
"""Like call_soon(), but thread-safe."""
|
2016-11-03 19:09:24 -03:00
|
|
|
self._check_closed()
|
|
|
|
if self._debug:
|
|
|
|
self._check_callback(callback, 'call_soon_threadsafe')
|
2018-01-22 20:11:18 -04:00
|
|
|
handle = self._call_soon(callback, args, context)
|
2014-06-27 08:52:20 -03:00
|
|
|
if handle._source_traceback:
|
|
|
|
del handle._source_traceback[-1]
|
2013-10-17 17:40:50 -03:00
|
|
|
self._write_to_self()
|
|
|
|
return handle
|
|
|
|
|
2018-01-28 15:09:40 -04:00
|
|
|
def run_in_executor(self, executor, func, *args):
|
2014-12-04 18:07:47 -04:00
|
|
|
self._check_closed()
|
2016-11-03 19:09:24 -03:00
|
|
|
if self._debug:
|
|
|
|
self._check_callback(func, 'run_in_executor')
|
2013-10-17 17:40:50 -03:00
|
|
|
if executor is None:
|
|
|
|
executor = self._default_executor
|
2019-09-19 09:47:22 -03:00
|
|
|
# Only check when the default executor is being used
|
|
|
|
self._check_default_executor()
|
2013-10-17 17:40:50 -03:00
|
|
|
if executor is None:
|
2020-02-27 16:01:47 -04:00
|
|
|
executor = concurrent.futures.ThreadPoolExecutor(
|
|
|
|
thread_name_prefix='asyncio'
|
|
|
|
)
|
2013-10-17 17:40:50 -03:00
|
|
|
self._default_executor = executor
|
2018-01-28 15:09:40 -04:00
|
|
|
return futures.wrap_future(
|
2017-12-14 21:53:26 -04:00
|
|
|
executor.submit(func, *args), loop=self)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
def set_default_executor(self, executor):
|
2018-07-30 07:42:43 -03:00
|
|
|
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
|
2021-07-01 11:46:49 -03:00
|
|
|
raise TypeError('executor must be ThreadPoolExecutor instance')
|
2013-10-17 17:40:50 -03:00
|
|
|
self._default_executor = executor
|
|
|
|
|
2014-07-11 22:11:53 -03:00
|
|
|
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
|
2017-12-10 19:36:12 -04:00
|
|
|
msg = [f"{host}:{port!r}"]
|
2014-07-11 22:11:53 -03:00
|
|
|
if family:
|
2017-12-10 20:52:53 -04:00
|
|
|
msg.append(f'family={family!r}')
|
2014-07-11 22:11:53 -03:00
|
|
|
if type:
|
2017-12-10 19:36:12 -04:00
|
|
|
msg.append(f'type={type!r}')
|
2014-07-11 22:11:53 -03:00
|
|
|
if proto:
|
2017-12-10 19:36:12 -04:00
|
|
|
msg.append(f'proto={proto!r}')
|
2014-07-11 22:11:53 -03:00
|
|
|
if flags:
|
2017-12-10 19:36:12 -04:00
|
|
|
msg.append(f'flags={flags!r}')
|
2014-07-11 22:11:53 -03:00
|
|
|
msg = ', '.join(msg)
|
2014-07-14 13:33:40 -03:00
|
|
|
logger.debug('Get address info %s', msg)
|
2014-07-11 22:11:53 -03:00
|
|
|
|
|
|
|
t0 = self.time()
|
|
|
|
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
|
|
|
|
dt = self.time() - t0
|
|
|
|
|
2017-12-10 19:36:12 -04:00
|
|
|
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
|
2014-07-11 22:11:53 -03:00
|
|
|
if dt >= self.slow_callback_duration:
|
|
|
|
logger.info(msg)
|
|
|
|
else:
|
|
|
|
logger.debug(msg)
|
|
|
|
return addrinfo
|
|
|
|
|
2017-12-14 21:53:26 -04:00
|
|
|
async def getaddrinfo(self, host, port, *,
|
|
|
|
family=0, type=0, proto=0, flags=0):
|
2016-06-08 13:33:31 -03:00
|
|
|
if self._debug:
|
2017-12-14 21:53:26 -04:00
|
|
|
getaddr_func = self._getaddrinfo_debug
|
2014-07-11 22:11:53 -03:00
|
|
|
else:
|
2017-12-14 21:53:26 -04:00
|
|
|
getaddr_func = socket.getaddrinfo
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2017-12-14 21:53:26 -04:00
|
|
|
return await self.run_in_executor(
|
|
|
|
None, getaddr_func, host, port, family, type, proto, flags)
|
|
|
|
|
|
|
|
async def getnameinfo(self, sockaddr, flags=0):
|
|
|
|
return await self.run_in_executor(
|
|
|
|
None, socket.getnameinfo, sockaddr, flags)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2018-01-16 13:59:34 -04:00
|
|
|
async def sock_sendfile(self, sock, file, offset=0, count=None,
|
|
|
|
*, fallback=True):
|
|
|
|
if self._debug and sock.gettimeout() != 0:
|
|
|
|
raise ValueError("the socket must be non-blocking")
|
2022-02-20 08:17:15 -04:00
|
|
|
_check_ssl_socket(sock)
|
2018-01-16 13:59:34 -04:00
|
|
|
self._check_sendfile_params(sock, file, offset, count)
|
|
|
|
try:
|
|
|
|
return await self._sock_sendfile_native(sock, file,
|
|
|
|
offset, count)
|
2018-09-11 14:13:04 -03:00
|
|
|
except exceptions.SendfileNotAvailableError as exc:
|
2018-01-19 14:04:29 -04:00
|
|
|
if not fallback:
|
|
|
|
raise
|
|
|
|
return await self._sock_sendfile_fallback(sock, file,
|
|
|
|
offset, count)
|
2018-01-16 13:59:34 -04:00
|
|
|
|
|
|
|
async def _sock_sendfile_native(self, sock, file, offset, count):
|
|
|
|
# NB: sendfile syscall is not supported for SSL sockets and
|
|
|
|
# non-mmap files even if sendfile is supported by OS
|
2018-09-11 14:13:04 -03:00
|
|
|
raise exceptions.SendfileNotAvailableError(
|
2018-01-16 13:59:34 -04:00
|
|
|
f"syscall sendfile is not available for socket {sock!r} "
|
2022-04-27 03:30:54 -03:00
|
|
|
f"and file {file!r} combination")
|
2018-01-16 13:59:34 -04:00
|
|
|
|
|
|
|
async def _sock_sendfile_fallback(self, sock, file, offset, count):
|
|
|
|
if offset:
|
|
|
|
file.seek(offset)
|
2018-05-28 19:31:55 -03:00
|
|
|
blocksize = (
|
|
|
|
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
|
|
|
|
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
|
|
|
|
)
|
2018-01-16 13:59:34 -04:00
|
|
|
buf = bytearray(blocksize)
|
|
|
|
total_sent = 0
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
if count:
|
|
|
|
blocksize = min(count - total_sent, blocksize)
|
|
|
|
if blocksize <= 0:
|
|
|
|
break
|
|
|
|
view = memoryview(buf)[:blocksize]
|
2018-05-28 19:31:55 -03:00
|
|
|
read = await self.run_in_executor(None, file.readinto, view)
|
2018-01-16 13:59:34 -04:00
|
|
|
if not read:
|
|
|
|
break # EOF
|
2019-06-15 08:05:08 -03:00
|
|
|
await self.sock_sendall(sock, view[:read])
|
2018-01-16 13:59:34 -04:00
|
|
|
total_sent += read
|
|
|
|
return total_sent
|
|
|
|
finally:
|
|
|
|
if total_sent > 0 and hasattr(file, 'seek'):
|
|
|
|
file.seek(offset + total_sent)
|
|
|
|
|
|
|
|
def _check_sendfile_params(self, sock, file, offset, count):
|
|
|
|
if 'b' not in getattr(file, 'mode', 'b'):
|
|
|
|
raise ValueError("file should be opened in binary mode")
|
|
|
|
if not sock.type == socket.SOCK_STREAM:
|
|
|
|
raise ValueError("only SOCK_STREAM type sockets are supported")
|
|
|
|
if count is not None:
|
|
|
|
if not isinstance(count, int):
|
|
|
|
raise TypeError(
|
|
|
|
"count must be a positive integer (got {!r})".format(count))
|
|
|
|
if count <= 0:
|
|
|
|
raise ValueError(
|
|
|
|
"count must be a positive integer (got {!r})".format(count))
|
|
|
|
if not isinstance(offset, int):
|
|
|
|
raise TypeError(
|
|
|
|
"offset must be a non-negative integer (got {!r})".format(
|
|
|
|
offset))
|
|
|
|
if offset < 0:
|
|
|
|
raise ValueError(
|
|
|
|
"offset must be a non-negative integer (got {!r})".format(
|
|
|
|
offset))
|
|
|
|
|
2019-05-05 08:14:35 -03:00
|
|
|
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
|
|
|
|
"""Create, bind and connect one socket."""
|
|
|
|
my_exceptions = []
|
|
|
|
exceptions.append(my_exceptions)
|
|
|
|
family, type_, proto, _, address = addr_info
|
|
|
|
sock = None
|
|
|
|
try:
|
|
|
|
sock = socket.socket(family=family, type=type_, proto=proto)
|
|
|
|
sock.setblocking(False)
|
|
|
|
if local_addr_infos is not None:
|
2023-01-04 04:00:26 -04:00
|
|
|
for lfamily, _, _, _, laddr in local_addr_infos:
|
|
|
|
# skip local addresses of different family
|
|
|
|
if lfamily != family:
|
|
|
|
continue
|
2019-05-05 08:14:35 -03:00
|
|
|
try:
|
|
|
|
sock.bind(laddr)
|
|
|
|
break
|
|
|
|
except OSError as exc:
|
|
|
|
msg = (
|
|
|
|
f'error while attempting to bind on '
|
2024-07-25 08:56:04 -03:00
|
|
|
f'address {laddr!r}: {str(exc).lower()}'
|
2019-05-05 08:14:35 -03:00
|
|
|
)
|
|
|
|
exc = OSError(exc.errno, msg)
|
|
|
|
my_exceptions.append(exc)
|
|
|
|
else: # all bind attempts failed
|
2023-01-04 04:00:26 -04:00
|
|
|
if my_exceptions:
|
|
|
|
raise my_exceptions.pop()
|
|
|
|
else:
|
|
|
|
raise OSError(f"no matching local address with {family=} found")
|
2019-05-05 08:14:35 -03:00
|
|
|
await self.sock_connect(sock, address)
|
|
|
|
return sock
|
|
|
|
except OSError as exc:
|
|
|
|
my_exceptions.append(exc)
|
|
|
|
if sock is not None:
|
|
|
|
sock.close()
|
|
|
|
raise
|
|
|
|
except:
|
|
|
|
if sock is not None:
|
|
|
|
sock.close()
|
|
|
|
raise
|
2022-11-22 11:06:20 -04:00
|
|
|
finally:
|
|
|
|
exceptions = my_exceptions = None
|
2019-05-05 08:14:35 -03:00
|
|
|
|
2017-12-19 15:45:42 -04:00
|
|
|
async def create_connection(
|
|
|
|
self, protocol_factory, host=None, port=None,
|
|
|
|
*, ssl=None, family=0,
|
|
|
|
proto=0, flags=0, sock=None,
|
|
|
|
local_addr=None, server_hostname=None,
|
2019-05-05 08:14:35 -03:00
|
|
|
ssl_handshake_timeout=None,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_shutdown_timeout=None,
|
2022-09-04 22:33:50 -03:00
|
|
|
happy_eyeballs_delay=None, interleave=None,
|
|
|
|
all_errors=False):
|
2014-06-19 12:11:49 -03:00
|
|
|
"""Connect to a TCP server.
|
|
|
|
|
2021-07-26 19:11:55 -03:00
|
|
|
Create a streaming transport connection to a given internet host and
|
2014-06-19 12:11:49 -03:00
|
|
|
port: socket family AF_INET or socket.AF_INET6 depending on host (or
|
|
|
|
family if specified), socket type SOCK_STREAM. protocol_factory must be
|
|
|
|
a callable returning a protocol instance.
|
|
|
|
|
|
|
|
This method is a coroutine which will try to establish the connection
|
|
|
|
in the background. When successful, the coroutine returns a
|
|
|
|
(transport, protocol) pair.
|
|
|
|
"""
|
2013-11-01 18:16:54 -03:00
|
|
|
if server_hostname is not None and not ssl:
|
|
|
|
raise ValueError('server_hostname is only meaningful with ssl')
|
|
|
|
|
|
|
|
if server_hostname is None and ssl:
|
|
|
|
# Use host as default for server_hostname. It is an error
|
|
|
|
# if host is empty or not set, e.g. when an
|
|
|
|
# already-connected socket was passed or when only a port
|
|
|
|
# is given. To avoid this error, you can pass
|
|
|
|
# server_hostname='' -- this will bypass the hostname
|
|
|
|
# check. (This also means that if host is a numeric
|
|
|
|
# IP/IPv6 address, we will attempt to verify that exact
|
|
|
|
# address; this will probably fail, but it is possible to
|
|
|
|
# create a certificate for a specific IP address, so we
|
|
|
|
# don't judge it here.)
|
|
|
|
if not host:
|
|
|
|
raise ValueError('You must set server_hostname '
|
|
|
|
'when using ssl without a host')
|
|
|
|
server_hostname = host
|
2013-11-01 18:20:55 -03:00
|
|
|
|
2017-12-20 14:24:43 -04:00
|
|
|
if ssl_handshake_timeout is not None and not ssl:
|
|
|
|
raise ValueError(
|
|
|
|
'ssl_handshake_timeout is only meaningful with ssl')
|
|
|
|
|
2022-02-15 09:04:00 -04:00
|
|
|
if ssl_shutdown_timeout is not None and not ssl:
|
|
|
|
raise ValueError(
|
|
|
|
'ssl_shutdown_timeout is only meaningful with ssl')
|
|
|
|
|
2022-02-20 08:17:15 -04:00
|
|
|
if sock is not None:
|
|
|
|
_check_ssl_socket(sock)
|
|
|
|
|
2019-05-05 08:14:35 -03:00
|
|
|
if happy_eyeballs_delay is not None and interleave is None:
|
|
|
|
# If using happy eyeballs, default to interleave addresses by family
|
|
|
|
interleave = 1
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
if host is not None or port is not None:
|
|
|
|
if sock is not None:
|
|
|
|
raise ValueError(
|
|
|
|
'host/port and sock can not be specified at the same time')
|
|
|
|
|
2017-12-14 21:53:26 -04:00
|
|
|
infos = await self._ensure_resolved(
|
|
|
|
(host, port), family=family,
|
|
|
|
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
|
2013-10-17 17:40:50 -03:00
|
|
|
if not infos:
|
|
|
|
raise OSError('getaddrinfo() returned empty list')
|
2017-12-14 21:53:26 -04:00
|
|
|
|
|
|
|
if local_addr is not None:
|
|
|
|
laddr_infos = await self._ensure_resolved(
|
|
|
|
local_addr, family=family,
|
|
|
|
type=socket.SOCK_STREAM, proto=proto,
|
|
|
|
flags=flags, loop=self)
|
2013-10-17 17:40:50 -03:00
|
|
|
if not laddr_infos:
|
|
|
|
raise OSError('getaddrinfo() returned empty list')
|
2019-05-05 08:14:35 -03:00
|
|
|
else:
|
|
|
|
laddr_infos = None
|
|
|
|
|
|
|
|
if interleave:
|
|
|
|
infos = _interleave_addrinfos(infos, interleave)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
exceptions = []
|
2019-05-05 08:14:35 -03:00
|
|
|
if happy_eyeballs_delay is None:
|
|
|
|
# not using happy eyeballs
|
|
|
|
for addrinfo in infos:
|
|
|
|
try:
|
|
|
|
sock = await self._connect_sock(
|
|
|
|
exceptions, addrinfo, laddr_infos)
|
|
|
|
break
|
|
|
|
except OSError:
|
|
|
|
continue
|
|
|
|
else: # using happy eyeballs
|
2024-10-02 20:32:31 -03:00
|
|
|
sock = (await staggered.staggered_race(
|
|
|
|
(
|
|
|
|
# can't use functools.partial as it keeps a reference
|
|
|
|
# to exceptions
|
|
|
|
lambda addrinfo=addrinfo: self._connect_sock(
|
|
|
|
exceptions, addrinfo, laddr_infos
|
|
|
|
)
|
|
|
|
for addrinfo in infos
|
|
|
|
),
|
|
|
|
happy_eyeballs_delay,
|
|
|
|
loop=self,
|
|
|
|
))[0] # can't use sock, _, _ as it keeks a reference to exceptions
|
2019-05-05 08:14:35 -03:00
|
|
|
|
|
|
|
if sock is None:
|
|
|
|
exceptions = [exc for sub in exceptions for exc in sub]
|
2022-11-22 11:06:20 -04:00
|
|
|
try:
|
|
|
|
if all_errors:
|
|
|
|
raise ExceptionGroup("create_connection failed", exceptions)
|
|
|
|
if len(exceptions) == 1:
|
2013-10-17 17:40:50 -03:00
|
|
|
raise exceptions[0]
|
2022-11-22 11:06:20 -04:00
|
|
|
else:
|
|
|
|
# If they all have the same str(), raise one.
|
|
|
|
model = str(exceptions[0])
|
|
|
|
if all(str(exc) == model for exc in exceptions):
|
|
|
|
raise exceptions[0]
|
|
|
|
# Raise a combined exception so the user can see all
|
|
|
|
# the various error messages.
|
|
|
|
raise OSError('Multiple exceptions: {}'.format(
|
|
|
|
', '.join(str(exc) for exc in exceptions)))
|
|
|
|
finally:
|
|
|
|
exceptions = None
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2016-11-09 16:47:00 -04:00
|
|
|
else:
|
|
|
|
if sock is None:
|
|
|
|
raise ValueError(
|
|
|
|
'host and port was not specified and no sock specified')
|
2017-12-19 07:44:37 -04:00
|
|
|
if sock.type != socket.SOCK_STREAM:
|
2016-11-21 18:47:27 -04:00
|
|
|
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
|
|
|
|
# are SOCK_STREAM.
|
|
|
|
# We support passing AF_UNIX sockets even though we have
|
|
|
|
# a dedicated API for that: create_unix_connection.
|
|
|
|
# Disallowing AF_UNIX in this method, breaks backwards
|
|
|
|
# compatibility.
|
2016-11-09 16:47:00 -04:00
|
|
|
raise ValueError(
|
2017-12-10 19:36:12 -04:00
|
|
|
f'A Stream Socket was expected, got {sock!r}')
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
transport, protocol = await self._create_connection_transport(
|
2017-12-19 15:45:42 -04:00
|
|
|
sock, protocol_factory, ssl, server_hostname,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_handshake_timeout=ssl_handshake_timeout,
|
|
|
|
ssl_shutdown_timeout=ssl_shutdown_timeout)
|
2014-07-11 22:11:53 -03:00
|
|
|
if self._debug:
|
2014-08-25 18:20:52 -03:00
|
|
|
# Get the socket from the transport because SSL transport closes
|
|
|
|
# the old socket and creates a new SSL socket
|
|
|
|
sock = transport.get_extra_info('socket')
|
2014-07-14 13:33:40 -03:00
|
|
|
logger.debug("%r connected to %s:%r: (%r, %r)",
|
|
|
|
sock, host, port, transport, protocol)
|
2014-02-18 13:15:06 -04:00
|
|
|
return transport, protocol
|
|
|
|
|
2017-12-19 15:45:42 -04:00
|
|
|
async def _create_connection_transport(
|
|
|
|
self, sock, protocol_factory, ssl,
|
|
|
|
server_hostname, server_side=False,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_handshake_timeout=None,
|
|
|
|
ssl_shutdown_timeout=None):
|
2016-07-12 19:23:10 -03:00
|
|
|
|
|
|
|
sock.setblocking(False)
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
protocol = protocol_factory()
|
2016-05-16 16:38:39 -03:00
|
|
|
waiter = self.create_future()
|
2013-10-17 17:40:50 -03:00
|
|
|
if ssl:
|
|
|
|
sslcontext = None if isinstance(ssl, bool) else ssl
|
|
|
|
transport = self._make_ssl_transport(
|
|
|
|
sock, protocol, sslcontext, waiter,
|
2017-12-19 15:45:42 -04:00
|
|
|
server_side=server_side, server_hostname=server_hostname,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_handshake_timeout=ssl_handshake_timeout,
|
|
|
|
ssl_shutdown_timeout=ssl_shutdown_timeout)
|
2013-10-17 17:40:50 -03:00
|
|
|
else:
|
|
|
|
transport = self._make_socket_transport(sock, protocol, waiter)
|
|
|
|
|
2015-01-14 19:04:21 -04:00
|
|
|
try:
|
2017-12-08 18:23:48 -04:00
|
|
|
await waiter
|
2015-01-21 19:17:41 -04:00
|
|
|
except:
|
2015-01-14 19:04:21 -04:00
|
|
|
transport.close()
|
|
|
|
raise
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
return transport, protocol
|
|
|
|
|
2018-01-27 15:22:47 -04:00
|
|
|
async def sendfile(self, transport, file, offset=0, count=None,
|
|
|
|
*, fallback=True):
|
|
|
|
"""Send a file to transport.
|
|
|
|
|
|
|
|
Return the total number of bytes which were sent.
|
|
|
|
|
|
|
|
The method uses high-performance os.sendfile if available.
|
|
|
|
|
|
|
|
file must be a regular file object opened in binary mode.
|
|
|
|
|
|
|
|
offset tells from where to start reading the file. If specified,
|
|
|
|
count is the total number of bytes to transmit as opposed to
|
|
|
|
sending the file until EOF is reached. File position is updated on
|
|
|
|
return or also in case of error in which case file.tell()
|
|
|
|
can be used to figure out the number of bytes
|
|
|
|
which were sent.
|
|
|
|
|
|
|
|
fallback set to True makes asyncio to manually read and send
|
|
|
|
the file when the platform does not support the sendfile syscall
|
|
|
|
(e.g. Windows or SSL socket on Unix).
|
|
|
|
|
|
|
|
Raise SendfileNotAvailableError if the system does not support
|
|
|
|
sendfile syscall and fallback is False.
|
|
|
|
"""
|
|
|
|
if transport.is_closing():
|
|
|
|
raise RuntimeError("Transport is closing")
|
|
|
|
mode = getattr(transport, '_sendfile_compatible',
|
|
|
|
constants._SendfileMode.UNSUPPORTED)
|
|
|
|
if mode is constants._SendfileMode.UNSUPPORTED:
|
|
|
|
raise RuntimeError(
|
|
|
|
f"sendfile is not supported for transport {transport!r}")
|
|
|
|
if mode is constants._SendfileMode.TRY_NATIVE:
|
|
|
|
try:
|
|
|
|
return await self._sendfile_native(transport, file,
|
|
|
|
offset, count)
|
2018-09-11 14:13:04 -03:00
|
|
|
except exceptions.SendfileNotAvailableError as exc:
|
2018-01-27 15:22:47 -04:00
|
|
|
if not fallback:
|
|
|
|
raise
|
2018-01-27 16:52:52 -04:00
|
|
|
|
|
|
|
if not fallback:
|
|
|
|
raise RuntimeError(
|
|
|
|
f"fallback is disabled and native sendfile is not "
|
|
|
|
f"supported for transport {transport!r}")
|
|
|
|
|
2018-01-27 15:22:47 -04:00
|
|
|
return await self._sendfile_fallback(transport, file,
|
|
|
|
offset, count)
|
|
|
|
|
|
|
|
async def _sendfile_native(self, transp, file, offset, count):
|
2018-09-11 14:13:04 -03:00
|
|
|
raise exceptions.SendfileNotAvailableError(
|
2018-01-27 15:22:47 -04:00
|
|
|
"sendfile syscall is not supported")
|
|
|
|
|
|
|
|
async def _sendfile_fallback(self, transp, file, offset, count):
|
|
|
|
if offset:
|
|
|
|
file.seek(offset)
|
|
|
|
blocksize = min(count, 16384) if count else 16384
|
|
|
|
buf = bytearray(blocksize)
|
|
|
|
total_sent = 0
|
|
|
|
proto = _SendfileFallbackProtocol(transp)
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
if count:
|
|
|
|
blocksize = min(count - total_sent, blocksize)
|
|
|
|
if blocksize <= 0:
|
|
|
|
return total_sent
|
|
|
|
view = memoryview(buf)[:blocksize]
|
2019-06-15 08:05:35 -03:00
|
|
|
read = await self.run_in_executor(None, file.readinto, view)
|
2018-01-27 15:22:47 -04:00
|
|
|
if not read:
|
|
|
|
return total_sent # EOF
|
|
|
|
await proto.drain()
|
2019-06-15 08:05:08 -03:00
|
|
|
transp.write(view[:read])
|
2018-01-27 15:22:47 -04:00
|
|
|
total_sent += read
|
|
|
|
finally:
|
|
|
|
if total_sent > 0 and hasattr(file, 'seek'):
|
|
|
|
file.seek(offset + total_sent)
|
|
|
|
await proto.restore()
|
|
|
|
|
2017-12-30 01:35:36 -04:00
|
|
|
async def start_tls(self, transport, protocol, sslcontext, *,
|
|
|
|
server_side=False,
|
|
|
|
server_hostname=None,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_handshake_timeout=None,
|
|
|
|
ssl_shutdown_timeout=None):
|
2017-12-30 01:35:36 -04:00
|
|
|
"""Upgrade transport to TLS.
|
|
|
|
|
|
|
|
Return a new transport that *protocol* should start using
|
|
|
|
immediately.
|
|
|
|
"""
|
|
|
|
if ssl is None:
|
|
|
|
raise RuntimeError('Python ssl module is not available')
|
|
|
|
|
|
|
|
if not isinstance(sslcontext, ssl.SSLContext):
|
|
|
|
raise TypeError(
|
|
|
|
f'sslcontext is expected to be an instance of ssl.SSLContext, '
|
|
|
|
f'got {sslcontext!r}')
|
|
|
|
|
|
|
|
if not getattr(transport, '_start_tls_compatible', False):
|
|
|
|
raise TypeError(
|
2018-06-05 09:59:58 -03:00
|
|
|
f'transport {transport!r} is not supported by start_tls()')
|
2017-12-30 01:35:36 -04:00
|
|
|
|
|
|
|
waiter = self.create_future()
|
|
|
|
ssl_protocol = sslproto.SSLProtocol(
|
|
|
|
self, protocol, sslcontext, waiter,
|
|
|
|
server_side, server_hostname,
|
|
|
|
ssl_handshake_timeout=ssl_handshake_timeout,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_shutdown_timeout=ssl_shutdown_timeout,
|
2017-12-30 01:35:36 -04:00
|
|
|
call_connection_made=False)
|
|
|
|
|
2018-05-29 02:00:12 -03:00
|
|
|
# Pause early so that "ssl_protocol.data_received()" doesn't
|
|
|
|
# have a chance to get called before "ssl_protocol.connection_made()".
|
|
|
|
transport.pause_reading()
|
|
|
|
|
2017-12-30 01:35:36 -04:00
|
|
|
transport.set_protocol(ssl_protocol)
|
2018-06-05 09:59:58 -03:00
|
|
|
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
|
|
|
|
resume_cb = self.call_soon(transport.resume_reading)
|
2017-12-30 01:35:36 -04:00
|
|
|
|
2018-06-04 12:32:35 -03:00
|
|
|
try:
|
|
|
|
await waiter
|
2019-05-27 09:45:12 -03:00
|
|
|
except BaseException:
|
2018-06-04 12:32:35 -03:00
|
|
|
transport.close()
|
2018-06-05 09:59:58 -03:00
|
|
|
conmade_cb.cancel()
|
|
|
|
resume_cb.cancel()
|
2018-06-04 12:32:35 -03:00
|
|
|
raise
|
|
|
|
|
2017-12-30 01:35:36 -04:00
|
|
|
return ssl_protocol._app_transport
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def create_datagram_endpoint(self, protocol_factory,
|
|
|
|
local_addr=None, remote_addr=None, *,
|
|
|
|
family=0, proto=0, flags=0,
|
2021-09-08 13:58:43 -03:00
|
|
|
reuse_port=None,
|
2017-12-08 18:23:48 -04:00
|
|
|
allow_broadcast=None, sock=None):
|
2013-10-17 17:40:50 -03:00
|
|
|
"""Create datagram connection."""
|
2015-10-05 13:15:28 -03:00
|
|
|
if sock is not None:
|
2024-02-03 13:14:02 -04:00
|
|
|
if sock.type == socket.SOCK_STREAM:
|
2016-11-09 16:47:00 -04:00
|
|
|
raise ValueError(
|
2024-02-03 13:14:02 -04:00
|
|
|
f'A datagram socket was expected, got {sock!r}')
|
2015-10-05 13:15:28 -03:00
|
|
|
if (local_addr or remote_addr or
|
|
|
|
family or proto or flags or
|
2019-12-09 10:21:10 -04:00
|
|
|
reuse_port or allow_broadcast):
|
2015-10-05 13:15:28 -03:00
|
|
|
# show the problematic kwargs in exception msg
|
|
|
|
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
|
|
|
|
family=family, proto=proto, flags=flags,
|
2021-09-08 13:58:43 -03:00
|
|
|
reuse_port=reuse_port,
|
2015-10-05 13:15:28 -03:00
|
|
|
allow_broadcast=allow_broadcast)
|
2017-12-10 19:36:12 -04:00
|
|
|
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
|
2015-10-05 13:15:28 -03:00
|
|
|
raise ValueError(
|
2017-12-10 19:36:12 -04:00
|
|
|
f'socket modifier keyword arguments can not be used '
|
|
|
|
f'when sock is specified. ({problems})')
|
2015-10-05 13:15:28 -03:00
|
|
|
sock.setblocking(False)
|
2013-10-17 17:40:50 -03:00
|
|
|
r_addr = None
|
|
|
|
else:
|
2015-10-05 13:15:28 -03:00
|
|
|
if not (local_addr or remote_addr):
|
|
|
|
if family == 0:
|
|
|
|
raise ValueError('unexpected address family')
|
|
|
|
addr_pairs_info = (((family, proto), (None, None)),)
|
2017-10-30 10:43:02 -03:00
|
|
|
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
|
|
|
|
for addr in (local_addr, remote_addr):
|
2017-11-27 19:34:08 -04:00
|
|
|
if addr is not None and not isinstance(addr, str):
|
2017-10-30 10:43:02 -03:00
|
|
|
raise TypeError('string is expected')
|
2019-04-09 10:40:59 -03:00
|
|
|
|
|
|
|
if local_addr and local_addr[0] not in (0, '\x00'):
|
|
|
|
try:
|
|
|
|
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
|
|
|
|
os.remove(local_addr)
|
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
|
|
|
except OSError as err:
|
|
|
|
# Directory may have permissions only to create socket.
|
|
|
|
logger.error('Unable to check or remove stale UNIX '
|
|
|
|
'socket %r: %r',
|
|
|
|
local_addr, err)
|
|
|
|
|
2017-10-30 10:43:02 -03:00
|
|
|
addr_pairs_info = (((family, proto),
|
|
|
|
(local_addr, remote_addr)), )
|
2015-10-05 13:15:28 -03:00
|
|
|
else:
|
|
|
|
# join address by (family, protocol)
|
2019-02-05 04:04:40 -04:00
|
|
|
addr_infos = {} # Using order preserving dict
|
2015-10-05 13:15:28 -03:00
|
|
|
for idx, addr in ((0, local_addr), (1, remote_addr)):
|
|
|
|
if addr is not None:
|
2021-12-26 07:13:14 -04:00
|
|
|
if not (isinstance(addr, tuple) and len(addr) == 2):
|
|
|
|
raise TypeError('2-tuple is expected')
|
2015-10-05 13:15:28 -03:00
|
|
|
|
2017-12-14 21:53:26 -04:00
|
|
|
infos = await self._ensure_resolved(
|
2016-06-08 13:33:31 -03:00
|
|
|
addr, family=family, type=socket.SOCK_DGRAM,
|
|
|
|
proto=proto, flags=flags, loop=self)
|
2015-10-05 13:15:28 -03:00
|
|
|
if not infos:
|
|
|
|
raise OSError('getaddrinfo() returned empty list')
|
|
|
|
|
|
|
|
for fam, _, pro, _, address in infos:
|
|
|
|
key = (fam, pro)
|
|
|
|
if key not in addr_infos:
|
|
|
|
addr_infos[key] = [None, None]
|
|
|
|
addr_infos[key][idx] = address
|
|
|
|
|
|
|
|
# each addr has to have info for each (family, proto) pair
|
|
|
|
addr_pairs_info = [
|
|
|
|
(key, addr_pair) for key, addr_pair in addr_infos.items()
|
|
|
|
if not ((local_addr and addr_pair[0] is None) or
|
|
|
|
(remote_addr and addr_pair[1] is None))]
|
|
|
|
|
|
|
|
if not addr_pairs_info:
|
|
|
|
raise ValueError('can not get address information')
|
|
|
|
|
|
|
|
exceptions = []
|
|
|
|
|
|
|
|
for ((family, proto),
|
|
|
|
(local_address, remote_address)) in addr_pairs_info:
|
|
|
|
sock = None
|
|
|
|
r_addr = None
|
|
|
|
try:
|
|
|
|
sock = socket.socket(
|
|
|
|
family=family, type=socket.SOCK_DGRAM, proto=proto)
|
|
|
|
if reuse_port:
|
2016-09-15 16:45:07 -03:00
|
|
|
_set_reuseport(sock)
|
2015-10-05 13:15:28 -03:00
|
|
|
if allow_broadcast:
|
|
|
|
sock.setsockopt(
|
|
|
|
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
|
|
|
|
sock.setblocking(False)
|
|
|
|
|
|
|
|
if local_addr:
|
|
|
|
sock.bind(local_address)
|
|
|
|
if remote_addr:
|
2019-05-07 14:18:49 -03:00
|
|
|
if not allow_broadcast:
|
|
|
|
await self.sock_connect(sock, remote_address)
|
2015-10-05 13:15:28 -03:00
|
|
|
r_addr = remote_address
|
|
|
|
except OSError as exc:
|
|
|
|
if sock is not None:
|
|
|
|
sock.close()
|
|
|
|
exceptions.append(exc)
|
|
|
|
except:
|
|
|
|
if sock is not None:
|
|
|
|
sock.close()
|
|
|
|
raise
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise exceptions[0]
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
protocol = protocol_factory()
|
2016-05-16 16:38:39 -03:00
|
|
|
waiter = self.create_future()
|
2015-10-05 13:15:28 -03:00
|
|
|
transport = self._make_datagram_transport(
|
|
|
|
sock, protocol, r_addr, waiter)
|
2014-07-11 22:11:53 -03:00
|
|
|
if self._debug:
|
|
|
|
if local_addr:
|
|
|
|
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
|
|
|
|
"created: (%r, %r)",
|
|
|
|
local_addr, remote_addr, transport, protocol)
|
|
|
|
else:
|
|
|
|
logger.debug("Datagram endpoint remote_addr=%r created: "
|
|
|
|
"(%r, %r)",
|
|
|
|
remote_addr, transport, protocol)
|
2015-01-26 06:02:18 -04:00
|
|
|
|
|
|
|
try:
|
2017-12-08 18:23:48 -04:00
|
|
|
await waiter
|
2015-01-26 06:02:18 -04:00
|
|
|
except:
|
|
|
|
transport.close()
|
|
|
|
raise
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
return transport, protocol
|
|
|
|
|
2017-12-14 21:53:26 -04:00
|
|
|
async def _ensure_resolved(self, address, *,
|
|
|
|
family=0, type=socket.SOCK_STREAM,
|
|
|
|
proto=0, flags=0, loop):
|
|
|
|
host, port = address[:2]
|
2019-05-17 05:28:39 -03:00
|
|
|
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
|
2017-12-14 21:53:26 -04:00
|
|
|
if info is not None:
|
|
|
|
# "host" is already a resolved IP.
|
|
|
|
return [info]
|
|
|
|
else:
|
|
|
|
return await loop.getaddrinfo(host, port, family=family, type=type,
|
|
|
|
proto=proto, flags=flags)
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def _create_server_getaddrinfo(self, host, port, family, flags):
|
2017-12-14 21:53:26 -04:00
|
|
|
infos = await self._ensure_resolved((host, port), family=family,
|
|
|
|
type=socket.SOCK_STREAM,
|
|
|
|
flags=flags, loop=self)
|
2015-09-21 13:33:43 -03:00
|
|
|
if not infos:
|
2017-12-10 19:36:12 -04:00
|
|
|
raise OSError(f'getaddrinfo({host!r}) returned empty list')
|
2015-09-21 13:33:43 -03:00
|
|
|
return infos
|
|
|
|
|
2017-12-19 15:45:42 -04:00
|
|
|
async def create_server(
|
|
|
|
self, protocol_factory, host=None, port=None,
|
|
|
|
*,
|
|
|
|
family=socket.AF_UNSPEC,
|
|
|
|
flags=socket.AI_PASSIVE,
|
|
|
|
sock=None,
|
|
|
|
backlog=100,
|
|
|
|
ssl=None,
|
2021-12-12 05:47:01 -04:00
|
|
|
reuse_address=None,
|
2017-12-19 15:45:42 -04:00
|
|
|
reuse_port=None,
|
2023-12-12 23:23:29 -04:00
|
|
|
keep_alive=None,
|
2018-01-25 19:08:09 -04:00
|
|
|
ssl_handshake_timeout=None,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_shutdown_timeout=None,
|
2018-01-25 19:08:09 -04:00
|
|
|
start_serving=True):
|
2015-09-21 13:33:43 -03:00
|
|
|
"""Create a TCP server.
|
|
|
|
|
2017-12-10 19:36:12 -04:00
|
|
|
The host parameter can be a string, in that case the TCP server is
|
|
|
|
bound to host and port.
|
2015-09-21 13:33:43 -03:00
|
|
|
|
|
|
|
The host parameter can also be a sequence of strings and in that case
|
2016-03-02 12:17:01 -04:00
|
|
|
the TCP server is bound to all hosts of the sequence. If a host
|
|
|
|
appears multiple times (possibly indirectly e.g. when hostnames
|
|
|
|
resolve to the same IP address), the server is only bound once to that
|
|
|
|
host.
|
2014-06-19 12:11:49 -03:00
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
Return a Server object which can be used to stop the service.
|
2014-06-19 12:11:49 -03:00
|
|
|
|
|
|
|
This method is a coroutine.
|
|
|
|
"""
|
2013-11-01 18:22:30 -03:00
|
|
|
if isinstance(ssl, bool):
|
|
|
|
raise TypeError('ssl argument must be an SSLContext or None')
|
2017-12-20 14:24:43 -04:00
|
|
|
|
|
|
|
if ssl_handshake_timeout is not None and ssl is None:
|
|
|
|
raise ValueError(
|
|
|
|
'ssl_handshake_timeout is only meaningful with ssl')
|
|
|
|
|
2022-02-15 09:04:00 -04:00
|
|
|
if ssl_shutdown_timeout is not None and ssl is None:
|
|
|
|
raise ValueError(
|
|
|
|
'ssl_shutdown_timeout is only meaningful with ssl')
|
|
|
|
|
2022-02-20 08:17:15 -04:00
|
|
|
if sock is not None:
|
|
|
|
_check_ssl_socket(sock)
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
if host is not None or port is not None:
|
|
|
|
if sock is not None:
|
|
|
|
raise ValueError(
|
|
|
|
'host/port and sock can not be specified at the same time')
|
|
|
|
|
2021-12-12 05:47:01 -04:00
|
|
|
if reuse_address is None:
|
|
|
|
reuse_address = os.name == "posix" and sys.platform != "cygwin"
|
2013-10-17 17:40:50 -03:00
|
|
|
sockets = []
|
|
|
|
if host == '':
|
2015-09-21 13:33:43 -03:00
|
|
|
hosts = [None]
|
|
|
|
elif (isinstance(host, str) or
|
2017-04-24 03:05:00 -03:00
|
|
|
not isinstance(host, collections.abc.Iterable)):
|
2015-09-21 13:33:43 -03:00
|
|
|
hosts = [host]
|
|
|
|
else:
|
|
|
|
hosts = host
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2015-09-21 13:33:43 -03:00
|
|
|
fs = [self._create_server_getaddrinfo(host, port, family=family,
|
|
|
|
flags=flags)
|
|
|
|
for host in hosts]
|
2020-11-28 04:21:17 -04:00
|
|
|
infos = await tasks.gather(*fs)
|
2016-03-02 12:17:01 -04:00
|
|
|
infos = set(itertools.chain.from_iterable(infos))
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
completed = False
|
|
|
|
try:
|
|
|
|
for res in infos:
|
|
|
|
af, socktype, proto, canonname, sa = res
|
2013-10-19 21:04:25 -03:00
|
|
|
try:
|
|
|
|
sock = socket.socket(af, socktype, proto)
|
|
|
|
except socket.error:
|
|
|
|
# Assume it's a bad family/type/protocol combination.
|
2014-08-25 18:20:52 -03:00
|
|
|
if self._debug:
|
|
|
|
logger.warning('create_server() failed to create '
|
|
|
|
'socket.socket(%r, %r, %r)',
|
|
|
|
af, socktype, proto, exc_info=True)
|
2013-10-19 21:04:25 -03:00
|
|
|
continue
|
2013-10-17 17:40:50 -03:00
|
|
|
sockets.append(sock)
|
2021-12-12 05:47:01 -04:00
|
|
|
if reuse_address:
|
|
|
|
sock.setsockopt(
|
|
|
|
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
|
2015-10-05 13:15:28 -03:00
|
|
|
if reuse_port:
|
2016-09-15 16:45:07 -03:00
|
|
|
_set_reuseport(sock)
|
2023-12-12 23:23:29 -04:00
|
|
|
if keep_alive:
|
|
|
|
sock.setsockopt(
|
|
|
|
socket.SOL_SOCKET, socket.SO_KEEPALIVE, True)
|
2013-10-17 17:40:50 -03:00
|
|
|
# Disable IPv4/IPv6 dual stack support (enabled by
|
|
|
|
# default on Linux) which makes a single socket
|
|
|
|
# listen on both address families.
|
2018-06-28 22:59:32 -03:00
|
|
|
if (_HAS_IPv6 and
|
|
|
|
af == socket.AF_INET6 and
|
|
|
|
hasattr(socket, 'IPPROTO_IPV6')):
|
2013-10-17 17:40:50 -03:00
|
|
|
sock.setsockopt(socket.IPPROTO_IPV6,
|
|
|
|
socket.IPV6_V6ONLY,
|
|
|
|
True)
|
|
|
|
try:
|
|
|
|
sock.bind(sa)
|
|
|
|
except OSError as err:
|
2024-01-22 12:40:35 -04:00
|
|
|
msg = ('error while attempting '
|
|
|
|
'to bind on address %r: %s'
|
2024-07-25 08:56:04 -03:00
|
|
|
% (sa, str(err).lower()))
|
2024-01-22 12:40:35 -04:00
|
|
|
if err.errno == errno.EADDRNOTAVAIL:
|
|
|
|
# Assume the family is not enabled (bpo-30945)
|
|
|
|
sockets.pop()
|
|
|
|
sock.close()
|
|
|
|
if self._debug:
|
|
|
|
logger.warning(msg)
|
|
|
|
continue
|
|
|
|
raise OSError(err.errno, msg) from None
|
|
|
|
|
|
|
|
if not sockets:
|
|
|
|
raise OSError('could not bind on any address out of %r'
|
|
|
|
% ([info[4] for info in infos],))
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
completed = True
|
|
|
|
finally:
|
|
|
|
if not completed:
|
|
|
|
for sock in sockets:
|
|
|
|
sock.close()
|
|
|
|
else:
|
|
|
|
if sock is None:
|
2014-07-14 13:33:40 -03:00
|
|
|
raise ValueError('Neither host/port nor sock were specified')
|
2017-12-19 07:44:37 -04:00
|
|
|
if sock.type != socket.SOCK_STREAM:
|
2017-12-10 19:36:12 -04:00
|
|
|
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
|
2013-10-17 17:40:50 -03:00
|
|
|
sockets = [sock]
|
|
|
|
|
|
|
|
for sock in sockets:
|
|
|
|
sock.setblocking(False)
|
2018-01-25 19:08:09 -04:00
|
|
|
|
|
|
|
server = Server(self, sockets, protocol_factory,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl, backlog, ssl_handshake_timeout,
|
|
|
|
ssl_shutdown_timeout)
|
2018-01-25 19:08:09 -04:00
|
|
|
if start_serving:
|
|
|
|
server._start_serving()
|
2018-05-28 15:31:28 -03:00
|
|
|
# Skip one loop iteration so that all 'loop.add_reader'
|
|
|
|
# go through.
|
2020-11-28 04:21:17 -04:00
|
|
|
await tasks.sleep(0)
|
2018-01-25 19:08:09 -04:00
|
|
|
|
2014-07-11 22:11:53 -03:00
|
|
|
if self._debug:
|
|
|
|
logger.info("%r is serving", server)
|
2013-10-17 17:40:50 -03:00
|
|
|
return server
|
|
|
|
|
2017-12-19 15:45:42 -04:00
|
|
|
async def connect_accepted_socket(
|
|
|
|
self, protocol_factory, sock,
|
|
|
|
*, ssl=None,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_handshake_timeout=None,
|
|
|
|
ssl_shutdown_timeout=None):
|
2017-12-19 07:44:37 -04:00
|
|
|
if sock.type != socket.SOCK_STREAM:
|
2017-12-10 19:36:12 -04:00
|
|
|
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
|
2016-11-09 16:47:00 -04:00
|
|
|
|
2017-12-20 14:24:43 -04:00
|
|
|
if ssl_handshake_timeout is not None and not ssl:
|
|
|
|
raise ValueError(
|
|
|
|
'ssl_handshake_timeout is only meaningful with ssl')
|
|
|
|
|
2022-02-15 09:04:00 -04:00
|
|
|
if ssl_shutdown_timeout is not None and not ssl:
|
|
|
|
raise ValueError(
|
|
|
|
'ssl_shutdown_timeout is only meaningful with ssl')
|
|
|
|
|
2022-02-20 08:17:15 -04:00
|
|
|
if sock is not None:
|
|
|
|
_check_ssl_socket(sock)
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
transport, protocol = await self._create_connection_transport(
|
2017-12-19 15:45:42 -04:00
|
|
|
sock, protocol_factory, ssl, '', server_side=True,
|
2022-02-15 09:04:00 -04:00
|
|
|
ssl_handshake_timeout=ssl_handshake_timeout,
|
|
|
|
ssl_shutdown_timeout=ssl_shutdown_timeout)
|
2016-07-12 19:23:10 -03:00
|
|
|
if self._debug:
|
|
|
|
# Get the socket from the transport because SSL transport closes
|
|
|
|
# the old socket and creates a new SSL socket
|
|
|
|
sock = transport.get_extra_info('socket')
|
|
|
|
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
|
|
|
|
return transport, protocol
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def connect_read_pipe(self, protocol_factory, pipe):
|
2013-10-17 17:40:50 -03:00
|
|
|
protocol = protocol_factory()
|
2016-05-16 16:38:39 -03:00
|
|
|
waiter = self.create_future()
|
2013-10-17 17:40:50 -03:00
|
|
|
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
|
2015-01-26 06:02:18 -04:00
|
|
|
|
|
|
|
try:
|
2017-12-08 18:23:48 -04:00
|
|
|
await waiter
|
2015-01-26 06:02:18 -04:00
|
|
|
except:
|
|
|
|
transport.close()
|
|
|
|
raise
|
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
if self._debug:
|
|
|
|
logger.debug('Read pipe %r connected: (%r, %r)',
|
|
|
|
pipe.fileno(), transport, protocol)
|
2013-10-17 17:40:50 -03:00
|
|
|
return transport, protocol
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def connect_write_pipe(self, protocol_factory, pipe):
|
2013-10-17 17:40:50 -03:00
|
|
|
protocol = protocol_factory()
|
2016-05-16 16:38:39 -03:00
|
|
|
waiter = self.create_future()
|
2013-10-17 17:40:50 -03:00
|
|
|
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
|
2015-01-26 06:02:18 -04:00
|
|
|
|
|
|
|
try:
|
2017-12-08 18:23:48 -04:00
|
|
|
await waiter
|
2015-01-26 06:02:18 -04:00
|
|
|
except:
|
|
|
|
transport.close()
|
|
|
|
raise
|
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
if self._debug:
|
|
|
|
logger.debug('Write pipe %r connected: (%r, %r)',
|
|
|
|
pipe.fileno(), transport, protocol)
|
2013-10-17 17:40:50 -03:00
|
|
|
return transport, protocol
|
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
def _log_subprocess(self, msg, stdin, stdout, stderr):
|
|
|
|
info = [msg]
|
|
|
|
if stdin is not None:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'stdin={_format_pipe(stdin)}')
|
2014-07-14 13:33:40 -03:00
|
|
|
if stdout is not None and stderr == subprocess.STDOUT:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'stdout=stderr={_format_pipe(stdout)}')
|
2014-07-14 13:33:40 -03:00
|
|
|
else:
|
|
|
|
if stdout is not None:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'stdout={_format_pipe(stdout)}')
|
2014-07-14 13:33:40 -03:00
|
|
|
if stderr is not None:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'stderr={_format_pipe(stderr)}')
|
2014-07-14 13:33:40 -03:00
|
|
|
logger.debug(' '.join(info))
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def subprocess_shell(self, protocol_factory, cmd, *,
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
universal_newlines=False,
|
|
|
|
shell=True, bufsize=0,
|
2019-05-27 20:51:19 -03:00
|
|
|
encoding=None, errors=None, text=None,
|
2017-12-08 18:23:48 -04:00
|
|
|
**kwargs):
|
2014-02-11 06:44:56 -04:00
|
|
|
if not isinstance(cmd, (bytes, str)):
|
2014-01-29 18:35:15 -04:00
|
|
|
raise ValueError("cmd must be a string")
|
|
|
|
if universal_newlines:
|
|
|
|
raise ValueError("universal_newlines must be False")
|
|
|
|
if not shell:
|
2014-01-31 07:28:30 -04:00
|
|
|
raise ValueError("shell must be True")
|
2014-01-29 18:35:15 -04:00
|
|
|
if bufsize != 0:
|
|
|
|
raise ValueError("bufsize must be 0")
|
2019-05-27 20:51:19 -03:00
|
|
|
if text:
|
|
|
|
raise ValueError("text must be False")
|
|
|
|
if encoding is not None:
|
|
|
|
raise ValueError("encoding must be None")
|
|
|
|
if errors is not None:
|
|
|
|
raise ValueError("errors must be None")
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
protocol = protocol_factory()
|
2018-06-08 19:24:37 -03:00
|
|
|
debug_log = None
|
2014-07-14 13:33:40 -03:00
|
|
|
if self._debug:
|
|
|
|
# don't log parameters: they may contain sensitive information
|
|
|
|
# (password) and may be too long
|
|
|
|
debug_log = 'run shell command %r' % cmd
|
|
|
|
self._log_subprocess(debug_log, stdin, stdout, stderr)
|
2017-12-08 18:23:48 -04:00
|
|
|
transport = await self._make_subprocess_transport(
|
2013-10-17 17:40:50 -03:00
|
|
|
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
|
2018-06-08 19:24:37 -03:00
|
|
|
if self._debug and debug_log is not None:
|
2016-08-31 04:22:29 -03:00
|
|
|
logger.info('%s: %r', debug_log, transport)
|
2013-10-17 17:40:50 -03:00
|
|
|
return transport, protocol
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def subprocess_exec(self, protocol_factory, program, *args,
|
|
|
|
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE, universal_newlines=False,
|
2019-05-27 20:51:19 -03:00
|
|
|
shell=False, bufsize=0,
|
|
|
|
encoding=None, errors=None, text=None,
|
|
|
|
**kwargs):
|
2014-01-29 18:35:15 -04:00
|
|
|
if universal_newlines:
|
|
|
|
raise ValueError("universal_newlines must be False")
|
|
|
|
if shell:
|
|
|
|
raise ValueError("shell must be False")
|
|
|
|
if bufsize != 0:
|
|
|
|
raise ValueError("bufsize must be 0")
|
2019-05-27 20:51:19 -03:00
|
|
|
if text:
|
|
|
|
raise ValueError("text must be False")
|
|
|
|
if encoding is not None:
|
|
|
|
raise ValueError("encoding must be None")
|
|
|
|
if errors is not None:
|
|
|
|
raise ValueError("errors must be None")
|
|
|
|
|
2014-02-11 06:44:56 -04:00
|
|
|
popen_args = (program,) + args
|
2013-10-17 17:40:50 -03:00
|
|
|
protocol = protocol_factory()
|
2018-06-08 19:24:37 -03:00
|
|
|
debug_log = None
|
2014-07-14 13:33:40 -03:00
|
|
|
if self._debug:
|
|
|
|
# don't log parameters: they may contain sensitive information
|
|
|
|
# (password) and may be too long
|
2017-12-10 19:36:12 -04:00
|
|
|
debug_log = f'execute program {program!r}'
|
2014-07-14 13:33:40 -03:00
|
|
|
self._log_subprocess(debug_log, stdin, stdout, stderr)
|
2017-12-08 18:23:48 -04:00
|
|
|
transport = await self._make_subprocess_transport(
|
2014-02-18 23:56:15 -04:00
|
|
|
protocol, popen_args, False, stdin, stdout, stderr,
|
|
|
|
bufsize, **kwargs)
|
2018-06-08 19:24:37 -03:00
|
|
|
if self._debug and debug_log is not None:
|
2016-08-31 04:22:29 -03:00
|
|
|
logger.info('%s: %r', debug_log, transport)
|
2013-10-17 17:40:50 -03:00
|
|
|
return transport, protocol
|
|
|
|
|
2016-05-16 16:20:38 -03:00
|
|
|
def get_exception_handler(self):
|
|
|
|
"""Return an exception handler, or None if the default one is in use.
|
|
|
|
"""
|
|
|
|
return self._exception_handler
|
|
|
|
|
2014-02-18 19:02:19 -04:00
|
|
|
def set_exception_handler(self, handler):
|
|
|
|
"""Set handler as the new event loop exception handler.
|
|
|
|
|
|
|
|
If handler is None, the default exception handler will
|
|
|
|
be set.
|
|
|
|
|
|
|
|
If handler is a callable object, it should have a
|
2014-07-14 13:33:40 -03:00
|
|
|
signature matching '(loop, context)', where 'loop'
|
2014-02-18 19:02:19 -04:00
|
|
|
will be a reference to the active event loop, 'context'
|
|
|
|
will be a dict object (see `call_exception_handler()`
|
|
|
|
documentation for details about context).
|
|
|
|
"""
|
|
|
|
if handler is not None and not callable(handler):
|
2017-12-10 19:36:12 -04:00
|
|
|
raise TypeError(f'A callable object or None is expected, '
|
|
|
|
f'got {handler!r}')
|
2014-02-18 19:02:19 -04:00
|
|
|
self._exception_handler = handler
|
|
|
|
|
|
|
|
def default_exception_handler(self, context):
|
|
|
|
"""Default exception handler.
|
|
|
|
|
|
|
|
This is called when an exception occurs and no exception
|
|
|
|
handler is set, and can be called by a custom exception
|
|
|
|
handler that wants to defer to the default behavior.
|
|
|
|
|
2017-11-07 12:23:29 -04:00
|
|
|
This default handler logs the error message and other
|
|
|
|
context-dependent information. In debug mode, a truncated
|
|
|
|
stack trace is also appended showing where the given object
|
|
|
|
(e.g. a handle or future or task) was created, if any.
|
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
The context parameter has the same meaning as in
|
2014-02-18 19:02:19 -04:00
|
|
|
`call_exception_handler()`.
|
|
|
|
"""
|
|
|
|
message = context.get('message')
|
|
|
|
if not message:
|
|
|
|
message = 'Unhandled exception in event loop'
|
|
|
|
|
|
|
|
exception = context.get('exception')
|
|
|
|
if exception is not None:
|
|
|
|
exc_info = (type(exception), exception, exception.__traceback__)
|
|
|
|
else:
|
|
|
|
exc_info = False
|
|
|
|
|
2017-12-10 19:36:12 -04:00
|
|
|
if ('source_traceback' not in context and
|
|
|
|
self._current_handle is not None and
|
|
|
|
self._current_handle._source_traceback):
|
|
|
|
context['handle_traceback'] = \
|
|
|
|
self._current_handle._source_traceback
|
2015-01-26 06:05:12 -04:00
|
|
|
|
2014-02-18 19:02:19 -04:00
|
|
|
log_lines = [message]
|
|
|
|
for key in sorted(context):
|
|
|
|
if key in {'message', 'exception'}:
|
|
|
|
continue
|
2014-06-27 08:52:20 -03:00
|
|
|
value = context[key]
|
|
|
|
if key == 'source_traceback':
|
|
|
|
tb = ''.join(traceback.format_list(value))
|
|
|
|
value = 'Object created at (most recent call last):\n'
|
|
|
|
value += tb.rstrip()
|
2015-01-26 06:05:12 -04:00
|
|
|
elif key == 'handle_traceback':
|
|
|
|
tb = ''.join(traceback.format_list(value))
|
|
|
|
value = 'Handle created at (most recent call last):\n'
|
|
|
|
value += tb.rstrip()
|
2014-06-27 08:52:20 -03:00
|
|
|
else:
|
|
|
|
value = repr(value)
|
2017-12-10 19:36:12 -04:00
|
|
|
log_lines.append(f'{key}: {value}')
|
2014-02-18 19:02:19 -04:00
|
|
|
|
|
|
|
logger.error('\n'.join(log_lines), exc_info=exc_info)
|
|
|
|
|
|
|
|
def call_exception_handler(self, context):
|
2014-07-14 13:33:40 -03:00
|
|
|
"""Call the current event loop's exception handler.
|
|
|
|
|
|
|
|
The context argument is a dict containing the following keys:
|
2014-02-18 19:02:19 -04:00
|
|
|
|
|
|
|
- 'message': Error message;
|
|
|
|
- 'exception' (optional): Exception object;
|
|
|
|
- 'future' (optional): Future instance;
|
2018-01-21 15:56:59 -04:00
|
|
|
- 'task' (optional): Task instance;
|
2014-02-18 19:02:19 -04:00
|
|
|
- 'handle' (optional): Handle instance;
|
|
|
|
- 'protocol' (optional): Protocol instance;
|
|
|
|
- 'transport' (optional): Transport instance;
|
2016-09-09 02:01:51 -03:00
|
|
|
- 'socket' (optional): Socket instance;
|
|
|
|
- 'asyncgen' (optional): Asynchronous generator that caused
|
|
|
|
the exception.
|
2014-02-18 19:02:19 -04:00
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
New keys maybe introduced in the future.
|
|
|
|
|
|
|
|
Note: do not overload this method in an event loop subclass.
|
|
|
|
For custom exception handling, use the
|
2014-02-18 19:02:19 -04:00
|
|
|
`set_exception_handler()` method.
|
|
|
|
"""
|
|
|
|
if self._exception_handler is None:
|
|
|
|
try:
|
|
|
|
self.default_exception_handler(context)
|
2019-05-27 09:45:12 -03:00
|
|
|
except (SystemExit, KeyboardInterrupt):
|
|
|
|
raise
|
|
|
|
except BaseException:
|
2014-02-18 19:02:19 -04:00
|
|
|
# Second protection layer for unexpected errors
|
|
|
|
# in the default implementation, as well as for subclassed
|
|
|
|
# event loops with overloaded "default_exception_handler".
|
|
|
|
logger.error('Exception in default exception handler',
|
|
|
|
exc_info=True)
|
|
|
|
else:
|
|
|
|
try:
|
2022-10-05 03:49:10 -03:00
|
|
|
ctx = None
|
|
|
|
thing = context.get("task")
|
|
|
|
if thing is None:
|
|
|
|
# Even though Futures don't have a context,
|
|
|
|
# Task is a subclass of Future,
|
|
|
|
# and sometimes the 'future' key holds a Task.
|
|
|
|
thing = context.get("future")
|
|
|
|
if thing is None:
|
|
|
|
# Handles also have a context.
|
|
|
|
thing = context.get("handle")
|
|
|
|
if thing is not None and hasattr(thing, "get_context"):
|
|
|
|
ctx = thing.get_context()
|
|
|
|
if ctx is not None and hasattr(ctx, "run"):
|
|
|
|
ctx.run(self._exception_handler, self, context)
|
|
|
|
else:
|
|
|
|
self._exception_handler(self, context)
|
2019-05-27 09:45:12 -03:00
|
|
|
except (SystemExit, KeyboardInterrupt):
|
|
|
|
raise
|
|
|
|
except BaseException as exc:
|
2014-02-18 19:02:19 -04:00
|
|
|
# Exception in the user set custom exception handler.
|
|
|
|
try:
|
|
|
|
# Let's try default handler.
|
|
|
|
self.default_exception_handler({
|
|
|
|
'message': 'Unhandled error in exception handler',
|
|
|
|
'exception': exc,
|
|
|
|
'context': context,
|
|
|
|
})
|
2019-05-27 09:45:12 -03:00
|
|
|
except (SystemExit, KeyboardInterrupt):
|
|
|
|
raise
|
|
|
|
except BaseException:
|
2014-07-14 13:33:40 -03:00
|
|
|
# Guard 'default_exception_handler' in case it is
|
2014-02-18 19:02:19 -04:00
|
|
|
# overloaded.
|
|
|
|
logger.error('Exception in default exception handler '
|
|
|
|
'while handling an unexpected error '
|
|
|
|
'in custom exception handler',
|
|
|
|
exc_info=True)
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
def _add_callback(self, handle):
|
2023-01-21 05:16:07 -04:00
|
|
|
"""Add a Handle to _ready."""
|
|
|
|
if not handle._cancelled:
|
|
|
|
self._ready.append(handle)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
def _add_callback_signalsafe(self, handle):
|
|
|
|
"""Like _add_callback() but called from a signal handler."""
|
|
|
|
self._add_callback(handle)
|
|
|
|
self._write_to_self()
|
|
|
|
|
2014-09-25 13:07:56 -03:00
|
|
|
def _timer_handle_cancelled(self, handle):
|
|
|
|
"""Notification that a TimerHandle has been cancelled."""
|
|
|
|
if handle._scheduled:
|
|
|
|
self._timer_cancelled_count += 1
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
def _run_once(self):
|
|
|
|
"""Run one full iteration of the event loop.
|
|
|
|
|
|
|
|
This calls all currently ready callbacks, polls for I/O,
|
|
|
|
schedules the resulting callbacks, and finally schedules
|
|
|
|
'call_later' callbacks.
|
|
|
|
"""
|
2014-09-25 13:07:56 -03:00
|
|
|
|
|
|
|
sched_count = len(self._scheduled)
|
|
|
|
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
|
|
|
|
self._timer_cancelled_count / sched_count >
|
|
|
|
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
|
2014-09-30 13:08:36 -03:00
|
|
|
# Remove delayed calls that were cancelled if their number
|
|
|
|
# is too high
|
|
|
|
new_scheduled = []
|
2014-09-25 13:07:56 -03:00
|
|
|
for handle in self._scheduled:
|
|
|
|
if handle._cancelled:
|
|
|
|
handle._scheduled = False
|
2014-09-30 13:08:36 -03:00
|
|
|
else:
|
|
|
|
new_scheduled.append(handle)
|
2014-09-25 13:07:56 -03:00
|
|
|
|
2014-09-30 13:08:36 -03:00
|
|
|
heapq.heapify(new_scheduled)
|
|
|
|
self._scheduled = new_scheduled
|
2014-09-25 13:07:56 -03:00
|
|
|
self._timer_cancelled_count = 0
|
|
|
|
else:
|
|
|
|
# Remove delayed calls that were cancelled from head of queue.
|
|
|
|
while self._scheduled and self._scheduled[0]._cancelled:
|
|
|
|
self._timer_cancelled_count -= 1
|
|
|
|
handle = heapq.heappop(self._scheduled)
|
|
|
|
handle._scheduled = False
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
timeout = None
|
2015-11-19 17:28:47 -04:00
|
|
|
if self._ready or self._stopping:
|
2013-10-17 17:40:50 -03:00
|
|
|
timeout = 0
|
|
|
|
elif self._scheduled:
|
|
|
|
# Compute the desired timeout.
|
2023-10-11 17:59:27 -03:00
|
|
|
timeout = self._scheduled[0]._when - self.time()
|
|
|
|
if timeout > MAXIMUM_SELECT_TIMEOUT:
|
|
|
|
timeout = MAXIMUM_SELECT_TIMEOUT
|
|
|
|
elif timeout < 0:
|
|
|
|
timeout = 0
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2018-09-30 02:28:40 -03:00
|
|
|
event_list = self._selector.select(timeout)
|
2013-10-17 17:40:50 -03:00
|
|
|
self._process_events(event_list)
|
2022-11-22 11:06:20 -04:00
|
|
|
# Needed to break cycles when an exception occurs.
|
|
|
|
event_list = None
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
# Handle 'later' callbacks that are ready.
|
2014-02-10 18:42:32 -04:00
|
|
|
end_time = self.time() + self._clock_resolution
|
2013-10-17 17:40:50 -03:00
|
|
|
while self._scheduled:
|
|
|
|
handle = self._scheduled[0]
|
2014-02-10 18:42:32 -04:00
|
|
|
if handle._when >= end_time:
|
2013-10-17 17:40:50 -03:00
|
|
|
break
|
|
|
|
handle = heapq.heappop(self._scheduled)
|
2014-09-25 13:07:56 -03:00
|
|
|
handle._scheduled = False
|
2013-10-17 17:40:50 -03:00
|
|
|
self._ready.append(handle)
|
|
|
|
|
|
|
|
# This is the only place where callbacks are actually *called*.
|
|
|
|
# All other places just add them to ready.
|
|
|
|
# Note: We run all currently scheduled callbacks, but not any
|
|
|
|
# callbacks scheduled by callbacks run this time around --
|
|
|
|
# they will be run the next time (after another I/O poll).
|
2014-07-14 13:33:40 -03:00
|
|
|
# Use an idiom that is thread-safe without using locks.
|
2013-10-17 17:40:50 -03:00
|
|
|
ntodo = len(self._ready)
|
|
|
|
for i in range(ntodo):
|
|
|
|
handle = self._ready.popleft()
|
2014-06-20 12:34:15 -03:00
|
|
|
if handle._cancelled:
|
|
|
|
continue
|
|
|
|
if self._debug:
|
2015-01-26 06:05:12 -04:00
|
|
|
try:
|
|
|
|
self._current_handle = handle
|
|
|
|
t0 = self.time()
|
|
|
|
handle._run()
|
|
|
|
dt = self.time() - t0
|
|
|
|
if dt >= self.slow_callback_duration:
|
|
|
|
logger.warning('Executing %s took %.3f seconds',
|
|
|
|
_format_handle(handle), dt)
|
|
|
|
finally:
|
|
|
|
self._current_handle = None
|
2014-06-20 12:34:15 -03:00
|
|
|
else:
|
2013-10-17 17:40:50 -03:00
|
|
|
handle._run()
|
|
|
|
handle = None # Needed to break cycles when an exception occurs.
|
2014-02-19 18:15:02 -04:00
|
|
|
|
2018-01-21 10:44:07 -04:00
|
|
|
def _set_coroutine_origin_tracking(self, enabled):
|
|
|
|
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
|
2015-05-12 12:43:04 -03:00
|
|
|
return
|
|
|
|
|
|
|
|
if enabled:
|
2018-01-21 10:44:07 -04:00
|
|
|
self._coroutine_origin_tracking_saved_depth = (
|
|
|
|
sys.get_coroutine_origin_tracking_depth())
|
|
|
|
sys.set_coroutine_origin_tracking_depth(
|
|
|
|
constants.DEBUG_STACK_DEPTH)
|
2015-05-12 12:43:04 -03:00
|
|
|
else:
|
2018-01-21 10:44:07 -04:00
|
|
|
sys.set_coroutine_origin_tracking_depth(
|
|
|
|
self._coroutine_origin_tracking_saved_depth)
|
|
|
|
|
|
|
|
self._coroutine_origin_tracking_enabled = enabled
|
2015-05-12 12:43:04 -03:00
|
|
|
|
2014-02-19 18:15:02 -04:00
|
|
|
def get_debug(self):
|
|
|
|
return self._debug
|
|
|
|
|
|
|
|
def set_debug(self, enabled):
|
|
|
|
self._debug = enabled
|
2015-05-11 23:27:25 -03:00
|
|
|
|
2015-05-12 12:43:04 -03:00
|
|
|
if self.is_running():
|
2018-01-21 10:44:07 -04:00
|
|
|
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
|