2017-12-10 19:36:12 -04:00
|
|
|
__all__ = (
|
2019-09-30 01:59:55 -03:00
|
|
|
'StreamReader', 'StreamWriter', 'StreamReaderProtocol',
|
|
|
|
'open_connection', 'start_server')
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2022-08-29 15:31:11 -03:00
|
|
|
import collections
|
2014-02-18 13:15:06 -04:00
|
|
|
import socket
|
2018-09-12 15:43:04 -03:00
|
|
|
import sys
|
2023-08-05 09:18:15 -03:00
|
|
|
import warnings
|
2018-09-12 15:43:04 -03:00
|
|
|
import weakref
|
2014-02-18 13:15:06 -04:00
|
|
|
|
2014-02-18 14:24:30 -04:00
|
|
|
if hasattr(socket, 'AF_UNIX'):
|
2019-09-30 01:59:55 -03:00
|
|
|
__all__ += ('open_unix_connection', 'start_unix_server')
|
2014-02-18 14:24:30 -04:00
|
|
|
|
2014-06-28 19:46:45 -03:00
|
|
|
from . import coroutines
|
2013-10-17 17:40:50 -03:00
|
|
|
from . import events
|
2018-09-11 14:13:04 -03:00
|
|
|
from . import exceptions
|
2018-09-12 15:43:04 -03:00
|
|
|
from . import format_helpers
|
2013-10-17 17:40:50 -03:00
|
|
|
from . import protocols
|
2014-07-14 13:33:40 -03:00
|
|
|
from .log import logger
|
2019-09-30 01:59:55 -03:00
|
|
|
from .tasks import sleep
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
|
2018-05-29 11:02:07 -03:00
|
|
|
_DEFAULT_LIMIT = 2 ** 16 # 64 KiB
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2014-01-30 20:05:28 -04:00
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def open_connection(host=None, port=None, *,
|
2020-11-26 03:36:37 -04:00
|
|
|
limit=_DEFAULT_LIMIT, **kwds):
|
2013-10-17 17:40:50 -03:00
|
|
|
"""A wrapper for create_connection() returning a (reader, writer) pair.
|
|
|
|
|
|
|
|
The reader returned is a StreamReader instance; the writer is a
|
2014-01-23 12:40:03 -04:00
|
|
|
StreamWriter instance.
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
The arguments are all the usual arguments to create_connection()
|
|
|
|
except protocol_factory; most common are positional host and port,
|
|
|
|
with various optional keyword arguments following.
|
|
|
|
|
|
|
|
Additional optional keyword arguments are loop (to set the event loop
|
|
|
|
instance to use) and limit (to set the buffer limit passed to the
|
|
|
|
StreamReader).
|
|
|
|
|
|
|
|
(If you want to customize the StreamReader and/or
|
|
|
|
StreamReaderProtocol classes, just copy the code -- there's
|
|
|
|
really nothing special here except some convenience.)
|
|
|
|
"""
|
2020-11-26 03:36:37 -04:00
|
|
|
loop = events.get_running_loop()
|
2019-05-27 16:56:22 -03:00
|
|
|
reader = StreamReader(limit=limit, loop=loop)
|
2019-09-30 01:59:55 -03:00
|
|
|
protocol = StreamReaderProtocol(reader, loop=loop)
|
2017-12-08 18:23:48 -04:00
|
|
|
transport, _ = await loop.create_connection(
|
2013-10-17 17:40:50 -03:00
|
|
|
lambda: protocol, host, port, **kwds)
|
2019-05-27 16:56:22 -03:00
|
|
|
writer = StreamWriter(transport, protocol, reader, loop)
|
2013-10-18 19:17:11 -03:00
|
|
|
return reader, writer
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def start_server(client_connected_cb, host=None, port=None, *,
|
2020-11-26 03:36:37 -04:00
|
|
|
limit=_DEFAULT_LIMIT, **kwds):
|
2013-11-19 15:43:38 -04:00
|
|
|
"""Start a socket server, call back for each client connected.
|
|
|
|
|
|
|
|
The first parameter, `client_connected_cb`, takes two parameters:
|
|
|
|
client_reader, client_writer. client_reader is a StreamReader
|
|
|
|
object, while client_writer is a StreamWriter object. This
|
|
|
|
parameter can either be a plain callback function or a coroutine;
|
|
|
|
if it is a coroutine, it will be automatically converted into a
|
|
|
|
Task.
|
|
|
|
|
|
|
|
The rest of the arguments are all the usual arguments to
|
|
|
|
loop.create_server() except protocol_factory; most common are
|
|
|
|
positional host and port, with various optional keyword arguments
|
|
|
|
following. The return value is the same as loop.create_server().
|
|
|
|
|
2023-09-27 00:34:15 -03:00
|
|
|
Additional optional keyword argument is limit (to set the buffer
|
|
|
|
limit passed to the StreamReader).
|
2013-11-19 15:43:38 -04:00
|
|
|
|
|
|
|
The return value is the same as loop.create_server(), i.e. a
|
|
|
|
Server object which can be used to stop the service.
|
|
|
|
"""
|
2020-11-26 03:36:37 -04:00
|
|
|
loop = events.get_running_loop()
|
2013-11-19 15:43:38 -04:00
|
|
|
|
|
|
|
def factory():
|
2019-05-27 16:56:22 -03:00
|
|
|
reader = StreamReader(limit=limit, loop=loop)
|
2013-11-19 15:43:38 -04:00
|
|
|
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
2019-09-30 01:59:55 -03:00
|
|
|
loop=loop)
|
2013-11-19 15:43:38 -04:00
|
|
|
return protocol
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
return await loop.create_server(factory, host, port, **kwds)
|
2013-11-19 15:43:38 -04:00
|
|
|
|
|
|
|
|
2014-02-18 13:15:06 -04:00
|
|
|
if hasattr(socket, 'AF_UNIX'):
|
|
|
|
# UNIX Domain Sockets are supported on this platform
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def open_unix_connection(path=None, *,
|
2020-11-26 03:36:37 -04:00
|
|
|
limit=_DEFAULT_LIMIT, **kwds):
|
2014-02-18 13:15:06 -04:00
|
|
|
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
|
2020-11-26 03:36:37 -04:00
|
|
|
loop = events.get_running_loop()
|
|
|
|
|
2019-05-27 16:56:22 -03:00
|
|
|
reader = StreamReader(limit=limit, loop=loop)
|
2019-09-30 01:59:55 -03:00
|
|
|
protocol = StreamReaderProtocol(reader, loop=loop)
|
2017-12-08 18:23:48 -04:00
|
|
|
transport, _ = await loop.create_unix_connection(
|
2014-02-18 13:15:06 -04:00
|
|
|
lambda: protocol, path, **kwds)
|
2019-05-27 16:56:22 -03:00
|
|
|
writer = StreamWriter(transport, protocol, reader, loop)
|
2014-02-18 13:15:06 -04:00
|
|
|
return reader, writer
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def start_unix_server(client_connected_cb, path=None, *,
|
2020-11-26 03:36:37 -04:00
|
|
|
limit=_DEFAULT_LIMIT, **kwds):
|
2014-02-18 13:15:06 -04:00
|
|
|
"""Similar to `start_server` but works with UNIX Domain Sockets."""
|
2020-11-26 03:36:37 -04:00
|
|
|
loop = events.get_running_loop()
|
2014-02-18 13:15:06 -04:00
|
|
|
|
|
|
|
def factory():
|
2019-05-27 16:56:22 -03:00
|
|
|
reader = StreamReader(limit=limit, loop=loop)
|
2014-02-18 13:15:06 -04:00
|
|
|
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
2019-09-30 01:59:55 -03:00
|
|
|
loop=loop)
|
2014-02-18 13:15:06 -04:00
|
|
|
return protocol
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
return await loop.create_unix_server(factory, path, **kwds)
|
2014-02-18 13:15:06 -04:00
|
|
|
|
|
|
|
|
2014-01-29 18:24:45 -04:00
|
|
|
class FlowControlMixin(protocols.Protocol):
|
|
|
|
"""Reusable flow control logic for StreamWriter.drain().
|
|
|
|
|
|
|
|
This implements the protocol methods pause_writing(),
|
2017-12-01 08:33:40 -04:00
|
|
|
resume_writing() and connection_lost(). If the subclass overrides
|
2014-01-29 18:24:45 -04:00
|
|
|
these it must call the super methods.
|
|
|
|
|
2014-07-22 07:03:40 -03:00
|
|
|
StreamWriter.drain() must wait for _drain_helper() coroutine.
|
2014-01-29 18:24:45 -04:00
|
|
|
"""
|
|
|
|
|
2019-09-30 01:59:55 -03:00
|
|
|
def __init__(self, loop=None):
|
2015-01-09 16:32:05 -04:00
|
|
|
if loop is None:
|
2022-12-06 13:42:12 -04:00
|
|
|
self._loop = events.get_event_loop()
|
2015-01-09 16:32:05 -04:00
|
|
|
else:
|
|
|
|
self._loop = loop
|
2014-01-29 18:24:45 -04:00
|
|
|
self._paused = False
|
2022-08-29 15:31:11 -03:00
|
|
|
self._drain_waiters = collections.deque()
|
2014-07-22 07:03:40 -03:00
|
|
|
self._connection_lost = False
|
2014-01-29 18:24:45 -04:00
|
|
|
|
|
|
|
def pause_writing(self):
|
|
|
|
assert not self._paused
|
|
|
|
self._paused = True
|
2014-07-14 13:33:40 -03:00
|
|
|
if self._loop.get_debug():
|
|
|
|
logger.debug("%r pauses writing", self)
|
2014-01-29 18:24:45 -04:00
|
|
|
|
|
|
|
def resume_writing(self):
|
|
|
|
assert self._paused
|
|
|
|
self._paused = False
|
2014-07-14 13:33:40 -03:00
|
|
|
if self._loop.get_debug():
|
|
|
|
logger.debug("%r resumes writing", self)
|
|
|
|
|
2022-08-29 15:31:11 -03:00
|
|
|
for waiter in self._drain_waiters:
|
2014-01-29 18:24:45 -04:00
|
|
|
if not waiter.done():
|
|
|
|
waiter.set_result(None)
|
|
|
|
|
|
|
|
def connection_lost(self, exc):
|
2014-07-22 07:03:40 -03:00
|
|
|
self._connection_lost = True
|
2022-08-29 15:31:11 -03:00
|
|
|
# Wake up the writer(s) if currently paused.
|
2014-01-29 18:24:45 -04:00
|
|
|
if not self._paused:
|
|
|
|
return
|
2022-08-29 15:31:11 -03:00
|
|
|
|
|
|
|
for waiter in self._drain_waiters:
|
|
|
|
if not waiter.done():
|
|
|
|
if exc is None:
|
|
|
|
waiter.set_result(None)
|
|
|
|
else:
|
|
|
|
waiter.set_exception(exc)
|
2014-01-29 18:24:45 -04:00
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def _drain_helper(self):
|
2014-07-22 07:03:40 -03:00
|
|
|
if self._connection_lost:
|
|
|
|
raise ConnectionResetError('Connection lost')
|
2014-01-29 18:24:45 -04:00
|
|
|
if not self._paused:
|
2014-07-22 07:03:40 -03:00
|
|
|
return
|
2016-05-16 16:38:39 -03:00
|
|
|
waiter = self._loop.create_future()
|
2022-08-29 15:31:11 -03:00
|
|
|
self._drain_waiters.append(waiter)
|
|
|
|
try:
|
|
|
|
await waiter
|
|
|
|
finally:
|
|
|
|
self._drain_waiters.remove(waiter)
|
2014-01-29 18:24:45 -04:00
|
|
|
|
2019-05-07 17:53:19 -03:00
|
|
|
def _get_close_waiter(self, stream):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2014-01-29 18:24:45 -04:00
|
|
|
|
|
|
|
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
|
|
|
|
"""Helper class to adapt between Protocol and StreamReader.
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
(This is a helper class instead of making StreamReader itself a
|
|
|
|
Protocol subclass, because the StreamReader has other potential
|
|
|
|
uses, and to prevent the user of the StreamReader to accidentally
|
|
|
|
call inappropriate methods of the protocol.)
|
|
|
|
"""
|
|
|
|
|
2019-09-30 01:59:55 -03:00
|
|
|
_source_traceback = None
|
|
|
|
|
|
|
|
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
|
|
|
|
super().__init__(loop=loop)
|
|
|
|
if stream_reader is not None:
|
2019-12-07 07:22:00 -04:00
|
|
|
self._stream_reader_wr = weakref.ref(stream_reader)
|
2019-09-30 01:59:55 -03:00
|
|
|
self._source_traceback = stream_reader._source_traceback
|
|
|
|
else:
|
|
|
|
self._stream_reader_wr = None
|
|
|
|
if client_connected_cb is not None:
|
|
|
|
# This is a stream created by the `create_server()` function.
|
|
|
|
# Keep a strong reference to the reader until a connection
|
|
|
|
# is established.
|
|
|
|
self._strong_reader = stream_reader
|
|
|
|
self._reject_connection = False
|
2022-08-27 16:32:01 -03:00
|
|
|
self._task = None
|
2019-09-30 01:59:55 -03:00
|
|
|
self._transport = None
|
2013-11-19 15:43:38 -04:00
|
|
|
self._client_connected_cb = client_connected_cb
|
2016-05-20 12:31:40 -03:00
|
|
|
self._over_ssl = False
|
2018-01-24 18:30:30 -04:00
|
|
|
self._closed = self._loop.create_future()
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2019-09-30 01:59:55 -03:00
|
|
|
@property
|
|
|
|
def _stream_reader(self):
|
|
|
|
if self._stream_reader_wr is None:
|
|
|
|
return None
|
|
|
|
return self._stream_reader_wr()
|
|
|
|
|
2024-02-27 21:27:44 -04:00
|
|
|
def _replace_transport(self, transport):
|
2022-04-15 09:23:14 -03:00
|
|
|
loop = self._loop
|
|
|
|
self._transport = transport
|
|
|
|
self._over_ssl = transport.get_extra_info('sslcontext') is not None
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
def connection_made(self, transport):
|
2019-09-30 01:59:55 -03:00
|
|
|
if self._reject_connection:
|
|
|
|
context = {
|
|
|
|
'message': ('An open stream was garbage collected prior to '
|
|
|
|
'establishing network connection; '
|
|
|
|
'call "stream.close()" explicitly.')
|
|
|
|
}
|
|
|
|
if self._source_traceback:
|
|
|
|
context['source_traceback'] = self._source_traceback
|
|
|
|
self._loop.call_exception_handler(context)
|
|
|
|
transport.abort()
|
|
|
|
return
|
|
|
|
self._transport = transport
|
|
|
|
reader = self._stream_reader
|
|
|
|
if reader is not None:
|
|
|
|
reader.set_transport(transport)
|
2016-05-20 12:31:40 -03:00
|
|
|
self._over_ssl = transport.get_extra_info('sslcontext') is not None
|
2013-11-19 15:43:38 -04:00
|
|
|
if self._client_connected_cb is not None:
|
2024-02-27 21:27:44 -04:00
|
|
|
writer = StreamWriter(transport, self, reader, self._loop)
|
|
|
|
res = self._client_connected_cb(reader, writer)
|
2014-06-28 19:46:45 -03:00
|
|
|
if coroutines.iscoroutine(res):
|
2023-11-02 04:38:18 -03:00
|
|
|
def callback(task):
|
2024-01-04 16:20:21 -04:00
|
|
|
if task.cancelled():
|
|
|
|
transport.close()
|
|
|
|
return
|
2023-11-02 04:38:18 -03:00
|
|
|
exc = task.exception()
|
|
|
|
if exc is not None:
|
|
|
|
self._loop.call_exception_handler({
|
|
|
|
'message': 'Unhandled exception in client_connected_cb',
|
|
|
|
'exception': exc,
|
|
|
|
'transport': transport,
|
|
|
|
})
|
|
|
|
transport.close()
|
|
|
|
|
2022-08-27 16:32:01 -03:00
|
|
|
self._task = self._loop.create_task(res)
|
2023-11-02 04:38:18 -03:00
|
|
|
self._task.add_done_callback(callback)
|
|
|
|
|
2019-09-30 01:59:55 -03:00
|
|
|
self._strong_reader = None
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
def connection_lost(self, exc):
|
2019-09-30 01:59:55 -03:00
|
|
|
reader = self._stream_reader
|
|
|
|
if reader is not None:
|
2016-05-13 16:58:00 -03:00
|
|
|
if exc is None:
|
2019-09-30 01:59:55 -03:00
|
|
|
reader.feed_eof()
|
2016-05-13 16:58:00 -03:00
|
|
|
else:
|
2019-09-30 01:59:55 -03:00
|
|
|
reader.set_exception(exc)
|
2018-01-24 18:30:30 -04:00
|
|
|
if not self._closed.done():
|
|
|
|
if exc is None:
|
|
|
|
self._closed.set_result(None)
|
|
|
|
else:
|
|
|
|
self._closed.set_exception(exc)
|
2014-01-29 18:24:45 -04:00
|
|
|
super().connection_lost(exc)
|
2019-09-30 01:59:55 -03:00
|
|
|
self._stream_reader_wr = None
|
2016-05-13 16:58:00 -03:00
|
|
|
self._stream_writer = None
|
2022-08-27 16:32:01 -03:00
|
|
|
self._task = None
|
2019-09-30 01:59:55 -03:00
|
|
|
self._transport = None
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
def data_received(self, data):
|
2019-09-30 01:59:55 -03:00
|
|
|
reader = self._stream_reader
|
|
|
|
if reader is not None:
|
|
|
|
reader.feed_data(data)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
def eof_received(self):
|
2019-09-30 01:59:55 -03:00
|
|
|
reader = self._stream_reader
|
|
|
|
if reader is not None:
|
|
|
|
reader.feed_eof()
|
2016-05-20 12:31:40 -03:00
|
|
|
if self._over_ssl:
|
|
|
|
# Prevent a warning in SSLProtocol.eof_received:
|
|
|
|
# "returning true from eof_received()
|
|
|
|
# has no effect when using ssl"
|
|
|
|
return False
|
2015-07-24 21:40:40 -03:00
|
|
|
return True
|
2013-10-18 19:17:11 -03:00
|
|
|
|
2019-09-30 01:59:55 -03:00
|
|
|
def _get_close_waiter(self, stream):
|
|
|
|
return self._closed
|
|
|
|
|
2018-01-24 18:30:30 -04:00
|
|
|
def __del__(self):
|
|
|
|
# Prevent reports about unhandled exceptions.
|
|
|
|
# Better than self._closed._log_traceback = False hack
|
2021-04-25 07:40:44 -03:00
|
|
|
try:
|
|
|
|
closed = self._closed
|
|
|
|
except AttributeError:
|
|
|
|
pass # failed constructor
|
|
|
|
else:
|
|
|
|
if closed.done() and not closed.cancelled():
|
|
|
|
closed.exception()
|
2018-01-24 18:30:30 -04:00
|
|
|
|
2013-10-18 19:17:11 -03:00
|
|
|
|
|
|
|
class StreamWriter:
|
|
|
|
"""Wraps a Transport.
|
|
|
|
|
|
|
|
This exposes write(), writelines(), [can_]write_eof(),
|
|
|
|
get_extra_info() and close(). It adds drain() which returns an
|
|
|
|
optional Future on which you can wait for flow control. It also
|
2014-01-10 17:26:38 -04:00
|
|
|
adds a transport property which references the Transport
|
2013-10-18 19:17:11 -03:00
|
|
|
directly.
|
|
|
|
"""
|
|
|
|
|
2019-05-27 16:56:22 -03:00
|
|
|
def __init__(self, transport, protocol, reader, loop):
|
2013-10-18 19:17:11 -03:00
|
|
|
self._transport = transport
|
|
|
|
self._protocol = protocol
|
2015-11-01 23:37:02 -04:00
|
|
|
# drain() expects that the reader has an exception() method
|
2014-07-22 07:03:40 -03:00
|
|
|
assert reader is None or isinstance(reader, StreamReader)
|
2013-10-18 19:17:11 -03:00
|
|
|
self._reader = reader
|
|
|
|
self._loop = loop
|
2019-09-30 01:59:55 -03:00
|
|
|
self._complete_fut = self._loop.create_future()
|
|
|
|
self._complete_fut.set_result(None)
|
2013-10-18 19:17:11 -03:00
|
|
|
|
2014-07-14 13:33:40 -03:00
|
|
|
def __repr__(self):
|
2017-12-10 19:36:12 -04:00
|
|
|
info = [self.__class__.__name__, f'transport={self._transport!r}']
|
2014-07-14 13:33:40 -03:00
|
|
|
if self._reader is not None:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'reader={self._reader!r}')
|
|
|
|
return '<{}>'.format(' '.join(info))
|
2014-07-14 13:33:40 -03:00
|
|
|
|
2013-10-18 19:17:11 -03:00
|
|
|
@property
|
|
|
|
def transport(self):
|
|
|
|
return self._transport
|
|
|
|
|
|
|
|
def write(self, data):
|
|
|
|
self._transport.write(data)
|
|
|
|
|
|
|
|
def writelines(self, data):
|
|
|
|
self._transport.writelines(data)
|
|
|
|
|
|
|
|
def write_eof(self):
|
|
|
|
return self._transport.write_eof()
|
|
|
|
|
|
|
|
def can_write_eof(self):
|
|
|
|
return self._transport.can_write_eof()
|
|
|
|
|
2015-01-15 16:50:19 -04:00
|
|
|
def close(self):
|
2019-05-27 16:56:22 -03:00
|
|
|
return self._transport.close()
|
2015-01-15 16:50:19 -04:00
|
|
|
|
2018-01-24 18:30:30 -04:00
|
|
|
def is_closing(self):
|
|
|
|
return self._transport.is_closing()
|
|
|
|
|
|
|
|
async def wait_closed(self):
|
2019-09-30 01:59:55 -03:00
|
|
|
await self._protocol._get_close_waiter(self)
|
2018-01-24 18:30:30 -04:00
|
|
|
|
2013-10-18 19:17:11 -03:00
|
|
|
def get_extra_info(self, name, default=None):
|
|
|
|
return self._transport.get_extra_info(name, default)
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def drain(self):
|
2014-07-22 07:03:40 -03:00
|
|
|
"""Flush the write buffer.
|
2013-10-18 19:17:11 -03:00
|
|
|
|
|
|
|
The intended use is to write
|
|
|
|
|
|
|
|
w.write(data)
|
2017-12-08 18:23:48 -04:00
|
|
|
await w.drain()
|
2013-10-18 19:17:11 -03:00
|
|
|
"""
|
2014-07-22 07:03:40 -03:00
|
|
|
if self._reader is not None:
|
|
|
|
exc = self._reader.exception()
|
|
|
|
if exc is not None:
|
|
|
|
raise exc
|
2018-01-24 18:30:30 -04:00
|
|
|
if self._transport.is_closing():
|
2019-09-30 01:59:55 -03:00
|
|
|
# Wait for protocol.connection_lost() call
|
|
|
|
# Raise connection closing error if any,
|
|
|
|
# ConnectionResetError otherwise
|
2019-05-27 16:56:22 -03:00
|
|
|
# Yield to the event loop so connection_lost() may be
|
|
|
|
# called. Without this, _drain_helper() would return
|
|
|
|
# immediately, and code that calls
|
|
|
|
# write(...); await drain()
|
|
|
|
# in a loop would never call connection_lost(), so it
|
|
|
|
# would not see an error when the socket is closed.
|
2019-09-30 01:59:55 -03:00
|
|
|
await sleep(0)
|
2017-12-08 18:23:48 -04:00
|
|
|
await self._protocol._drain_helper()
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2022-04-15 09:23:14 -03:00
|
|
|
async def start_tls(self, sslcontext, *,
|
|
|
|
server_hostname=None,
|
2023-02-01 07:03:59 -04:00
|
|
|
ssl_handshake_timeout=None,
|
|
|
|
ssl_shutdown_timeout=None):
|
2022-04-15 09:23:14 -03:00
|
|
|
"""Upgrade an existing stream-based connection to TLS."""
|
|
|
|
server_side = self._protocol._client_connected_cb is not None
|
|
|
|
protocol = self._protocol
|
|
|
|
await self.drain()
|
|
|
|
new_transport = await self._loop.start_tls( # type: ignore
|
|
|
|
self._transport, protocol, sslcontext,
|
|
|
|
server_side=server_side, server_hostname=server_hostname,
|
2023-02-01 07:03:59 -04:00
|
|
|
ssl_handshake_timeout=ssl_handshake_timeout,
|
|
|
|
ssl_shutdown_timeout=ssl_shutdown_timeout)
|
2022-04-15 09:23:14 -03:00
|
|
|
self._transport = new_transport
|
2024-02-27 21:27:44 -04:00
|
|
|
protocol._replace_transport(new_transport)
|
2022-04-15 09:23:14 -03:00
|
|
|
|
2023-08-05 09:18:15 -03:00
|
|
|
def __del__(self, warnings=warnings):
|
|
|
|
if not self._transport.is_closing():
|
2023-11-14 21:17:51 -04:00
|
|
|
if self._loop.is_closed():
|
|
|
|
warnings.warn("loop is closed", ResourceWarning)
|
|
|
|
else:
|
|
|
|
self.close()
|
|
|
|
warnings.warn(f"unclosed {self!r}", ResourceWarning)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
class StreamReader:
|
|
|
|
|
2019-09-30 01:59:55 -03:00
|
|
|
_source_traceback = None
|
|
|
|
|
2019-05-27 16:56:22 -03:00
|
|
|
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
|
2013-10-17 17:40:50 -03:00
|
|
|
# The line length limit is a security feature;
|
|
|
|
# it also doubles as half the buffer limit.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
|
|
|
if limit <= 0:
|
|
|
|
raise ValueError('Limit cannot be <= 0')
|
|
|
|
|
2013-10-18 19:17:11 -03:00
|
|
|
self._limit = limit
|
2013-10-17 17:40:50 -03:00
|
|
|
if loop is None:
|
2022-12-06 13:42:12 -04:00
|
|
|
self._loop = events.get_event_loop()
|
2015-01-09 16:32:05 -04:00
|
|
|
else:
|
|
|
|
self._loop = loop
|
2014-02-05 19:11:13 -04:00
|
|
|
self._buffer = bytearray()
|
2015-01-13 19:53:37 -04:00
|
|
|
self._eof = False # Whether we're done.
|
|
|
|
self._waiter = None # A future used by _wait_for_data()
|
2013-10-17 17:40:50 -03:00
|
|
|
self._exception = None
|
|
|
|
self._transport = None
|
|
|
|
self._paused = False
|
2019-09-30 01:59:55 -03:00
|
|
|
if self._loop.get_debug():
|
|
|
|
self._source_traceback = format_helpers.extract_stack(
|
|
|
|
sys._getframe(1))
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2015-07-24 21:40:40 -03:00
|
|
|
def __repr__(self):
|
|
|
|
info = ['StreamReader']
|
|
|
|
if self._buffer:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'{len(self._buffer)} bytes')
|
2015-07-24 21:40:40 -03:00
|
|
|
if self._eof:
|
|
|
|
info.append('eof')
|
|
|
|
if self._limit != _DEFAULT_LIMIT:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'limit={self._limit}')
|
2015-07-24 21:40:40 -03:00
|
|
|
if self._waiter:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'waiter={self._waiter!r}')
|
2015-07-24 21:40:40 -03:00
|
|
|
if self._exception:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'exception={self._exception!r}')
|
2015-07-24 21:40:40 -03:00
|
|
|
if self._transport:
|
2017-12-10 19:36:12 -04:00
|
|
|
info.append(f'transport={self._transport!r}')
|
2015-07-24 21:40:40 -03:00
|
|
|
if self._paused:
|
|
|
|
info.append('paused')
|
2017-12-10 19:36:12 -04:00
|
|
|
return '<{}>'.format(' '.join(info))
|
2015-07-24 21:40:40 -03:00
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
def exception(self):
|
|
|
|
return self._exception
|
|
|
|
|
|
|
|
def set_exception(self, exc):
|
|
|
|
self._exception = exc
|
|
|
|
|
2013-10-18 19:17:11 -03:00
|
|
|
waiter = self._waiter
|
2013-10-17 17:40:50 -03:00
|
|
|
if waiter is not None:
|
2013-10-18 19:17:11 -03:00
|
|
|
self._waiter = None
|
2013-10-17 17:40:50 -03:00
|
|
|
if not waiter.cancelled():
|
|
|
|
waiter.set_exception(exc)
|
|
|
|
|
2015-01-13 19:53:37 -04:00
|
|
|
def _wakeup_waiter(self):
|
2016-01-11 13:28:19 -04:00
|
|
|
"""Wakeup read*() functions waiting for data or EOF."""
|
2015-01-13 19:53:37 -04:00
|
|
|
waiter = self._waiter
|
|
|
|
if waiter is not None:
|
|
|
|
self._waiter = None
|
|
|
|
if not waiter.cancelled():
|
|
|
|
waiter.set_result(None)
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
def set_transport(self, transport):
|
|
|
|
assert self._transport is None, 'Transport already set'
|
|
|
|
self._transport = transport
|
|
|
|
|
|
|
|
def _maybe_resume_transport(self):
|
2014-02-05 19:11:13 -04:00
|
|
|
if self._paused and len(self._buffer) <= self._limit:
|
2013-10-17 17:40:50 -03:00
|
|
|
self._paused = False
|
2013-10-18 11:58:20 -03:00
|
|
|
self._transport.resume_reading()
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
def feed_eof(self):
|
2013-10-18 19:17:11 -03:00
|
|
|
self._eof = True
|
2015-01-13 19:53:37 -04:00
|
|
|
self._wakeup_waiter()
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2014-02-06 01:14:30 -04:00
|
|
|
def at_eof(self):
|
|
|
|
"""Return True if the buffer is empty and 'feed_eof' was called."""
|
|
|
|
return self._eof and not self._buffer
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
def feed_data(self, data):
|
2014-02-05 19:11:13 -04:00
|
|
|
assert not self._eof, 'feed_data after feed_eof'
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
if not data:
|
|
|
|
return
|
|
|
|
|
2014-02-05 19:11:13 -04:00
|
|
|
self._buffer.extend(data)
|
2015-01-13 19:53:37 -04:00
|
|
|
self._wakeup_waiter()
|
2013-10-17 17:40:50 -03:00
|
|
|
|
|
|
|
if (self._transport is not None and
|
2016-05-16 17:32:38 -03:00
|
|
|
not self._paused and
|
|
|
|
len(self._buffer) > 2 * self._limit):
|
2013-10-17 17:40:50 -03:00
|
|
|
try:
|
2013-10-18 11:58:20 -03:00
|
|
|
self._transport.pause_reading()
|
2013-10-17 17:40:50 -03:00
|
|
|
except NotImplementedError:
|
|
|
|
# The transport can't be paused.
|
|
|
|
# We'll just have to buffer all data.
|
|
|
|
# Forget the transport so we don't keep trying.
|
|
|
|
self._transport = None
|
|
|
|
else:
|
|
|
|
self._paused = True
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def _wait_for_data(self, func_name):
|
2016-01-11 13:28:19 -04:00
|
|
|
"""Wait until feed_data() or feed_eof() is called.
|
|
|
|
|
|
|
|
If stream was paused, automatically resume it.
|
|
|
|
"""
|
2014-01-23 12:40:03 -04:00
|
|
|
# StreamReader uses a future to link the protocol feed_data() method
|
|
|
|
# to a read coroutine. Running two read coroutines at the same time
|
|
|
|
# would have an unexpected behaviour. It would not possible to know
|
|
|
|
# which coroutine would get the next data.
|
|
|
|
if self._waiter is not None:
|
2017-12-10 19:36:12 -04:00
|
|
|
raise RuntimeError(
|
|
|
|
f'{func_name}() called while another coroutine is '
|
|
|
|
f'already waiting for incoming data')
|
2015-01-13 19:53:37 -04:00
|
|
|
|
2016-01-11 13:28:19 -04:00
|
|
|
assert not self._eof, '_wait_for_data after EOF'
|
|
|
|
|
|
|
|
# Waiting for data while paused will make deadlock, so prevent it.
|
2016-10-05 19:01:12 -03:00
|
|
|
# This is essential for readexactly(n) for case when n > self._limit.
|
2016-01-11 13:28:19 -04:00
|
|
|
if self._paused:
|
|
|
|
self._paused = False
|
|
|
|
self._transport.resume_reading()
|
|
|
|
|
2016-05-16 16:38:39 -03:00
|
|
|
self._waiter = self._loop.create_future()
|
2015-01-13 19:53:37 -04:00
|
|
|
try:
|
2017-12-08 18:23:48 -04:00
|
|
|
await self._waiter
|
2015-01-13 19:53:37 -04:00
|
|
|
finally:
|
|
|
|
self._waiter = None
|
2014-01-23 12:40:03 -04:00
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def readline(self):
|
2016-01-11 13:28:19 -04:00
|
|
|
"""Read chunk of data from the stream until newline (b'\n') is found.
|
|
|
|
|
|
|
|
On success, return chunk that ends with newline. If only partial
|
|
|
|
line can be read due to EOF, return incomplete line without
|
|
|
|
terminating newline. When EOF was reached while no bytes read, empty
|
|
|
|
bytes object is returned.
|
|
|
|
|
|
|
|
If limit is reached, ValueError will be raised. In that case, if
|
|
|
|
newline was found, complete line including newline will be removed
|
|
|
|
from internal buffer. Else, internal buffer will be cleared. Limit is
|
|
|
|
compared against part of the line without newline.
|
|
|
|
|
|
|
|
If stream was paused, this function will automatically resume it if
|
|
|
|
needed.
|
|
|
|
"""
|
|
|
|
sep = b'\n'
|
|
|
|
seplen = len(sep)
|
|
|
|
try:
|
2017-12-08 18:23:48 -04:00
|
|
|
line = await self.readuntil(sep)
|
2018-09-11 14:13:04 -03:00
|
|
|
except exceptions.IncompleteReadError as e:
|
2016-01-11 13:28:19 -04:00
|
|
|
return e.partial
|
2018-09-11 14:13:04 -03:00
|
|
|
except exceptions.LimitOverrunError as e:
|
2016-01-11 13:28:19 -04:00
|
|
|
if self._buffer.startswith(sep, e.consumed):
|
|
|
|
del self._buffer[:e.consumed + seplen]
|
|
|
|
else:
|
|
|
|
self._buffer.clear()
|
|
|
|
self._maybe_resume_transport()
|
|
|
|
raise ValueError(e.args[0])
|
|
|
|
return line
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def readuntil(self, separator=b'\n'):
|
2016-05-16 17:32:38 -03:00
|
|
|
"""Read data from the stream until ``separator`` is found.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
2016-05-16 17:32:38 -03:00
|
|
|
On success, the data and separator will be removed from the
|
|
|
|
internal buffer (consumed). Returned data will include the
|
|
|
|
separator at the end.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
2016-05-16 17:32:38 -03:00
|
|
|
Configured stream limit is used to check result. Limit sets the
|
|
|
|
maximal length of data that can be returned, not counting the
|
|
|
|
separator.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
2016-05-16 17:32:38 -03:00
|
|
|
If an EOF occurs and the complete separator is still not found,
|
|
|
|
an IncompleteReadError exception will be raised, and the internal
|
|
|
|
buffer will be reset. The IncompleteReadError.partial attribute
|
|
|
|
may contain the separator partially.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
2016-05-16 17:32:38 -03:00
|
|
|
If the data cannot be read because of over limit, a
|
|
|
|
LimitOverrunError exception will be raised, and the data
|
|
|
|
will be left in the internal buffer, so it can be read again.
|
2024-04-08 13:58:02 -03:00
|
|
|
|
2024-04-11 11:41:55 -03:00
|
|
|
The ``separator`` may also be a tuple of separators. In this
|
2024-04-08 13:58:02 -03:00
|
|
|
case the return value will be the shortest possible that has any
|
|
|
|
separator as the suffix. For the purposes of LimitOverrunError,
|
|
|
|
the shortest possible separator is considered to be the one that
|
|
|
|
matched.
|
2016-01-11 13:28:19 -04:00
|
|
|
"""
|
2024-04-11 11:41:55 -03:00
|
|
|
if isinstance(separator, tuple):
|
|
|
|
# Makes sure shortest matches wins
|
2024-04-08 13:58:02 -03:00
|
|
|
separator = sorted(separator, key=len)
|
2024-04-11 11:41:55 -03:00
|
|
|
else:
|
|
|
|
separator = [separator]
|
2024-04-08 13:58:02 -03:00
|
|
|
if not separator:
|
|
|
|
raise ValueError('Separator should contain at least one element')
|
|
|
|
min_seplen = len(separator[0])
|
|
|
|
max_seplen = len(separator[-1])
|
|
|
|
if min_seplen == 0:
|
2016-01-11 13:28:19 -04:00
|
|
|
raise ValueError('Separator should be at least one-byte string')
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
if self._exception is not None:
|
|
|
|
raise self._exception
|
|
|
|
|
2016-01-11 13:28:19 -04:00
|
|
|
# Consume whole buffer except last bytes, which length is
|
2024-04-08 13:58:02 -03:00
|
|
|
# one less than max_seplen. Let's check corner cases with
|
|
|
|
# separator[-1]='SEPARATOR':
|
2016-01-11 13:28:19 -04:00
|
|
|
# * we have received almost complete separator (without last
|
|
|
|
# byte). i.e buffer='some textSEPARATO'. In this case we
|
2024-04-08 13:58:02 -03:00
|
|
|
# can safely consume max_seplen - 1 bytes.
|
2016-01-11 13:28:19 -04:00
|
|
|
# * last byte of buffer is first byte of separator, i.e.
|
|
|
|
# buffer='abcdefghijklmnopqrS'. We may safely consume
|
|
|
|
# everything except that last byte, but this require to
|
|
|
|
# analyze bytes of buffer that match partial separator.
|
|
|
|
# This is slow and/or require FSM. For this case our
|
|
|
|
# implementation is not optimal, since require rescanning
|
|
|
|
# of data that is known to not belong to separator. In
|
|
|
|
# real world, separator will not be so long to notice
|
|
|
|
# performance problems. Even when reading MIME-encoded
|
|
|
|
# messages :)
|
|
|
|
|
2016-05-16 17:32:38 -03:00
|
|
|
# `offset` is the number of bytes from the beginning of the buffer
|
2024-04-08 13:58:02 -03:00
|
|
|
# where there is no occurrence of any `separator`.
|
2016-01-11 13:28:19 -04:00
|
|
|
offset = 0
|
|
|
|
|
2024-04-08 13:58:02 -03:00
|
|
|
# Loop until we find a `separator` in the buffer, exceed the buffer size,
|
2016-01-11 13:28:19 -04:00
|
|
|
# or an EOF has happened.
|
|
|
|
while True:
|
|
|
|
buflen = len(self._buffer)
|
|
|
|
|
2024-04-08 13:58:02 -03:00
|
|
|
# Check if we now have enough data in the buffer for shortest
|
|
|
|
# separator to fit.
|
|
|
|
if buflen - offset >= min_seplen:
|
|
|
|
match_start = None
|
|
|
|
match_end = None
|
|
|
|
for sep in separator:
|
|
|
|
isep = self._buffer.find(sep, offset)
|
|
|
|
|
|
|
|
if isep != -1:
|
|
|
|
# `separator` is in the buffer. `match_start` and
|
|
|
|
# `match_end` will be used later to retrieve the
|
|
|
|
# data.
|
|
|
|
end = isep + len(sep)
|
|
|
|
if match_end is None or end < match_end:
|
|
|
|
match_end = end
|
|
|
|
match_start = isep
|
|
|
|
if match_end is not None:
|
2016-01-11 13:28:19 -04:00
|
|
|
break
|
|
|
|
|
|
|
|
# see upper comment for explanation.
|
2024-04-08 13:58:02 -03:00
|
|
|
offset = max(0, buflen + 1 - max_seplen)
|
2016-01-11 13:28:19 -04:00
|
|
|
if offset > self._limit:
|
2018-09-11 14:13:04 -03:00
|
|
|
raise exceptions.LimitOverrunError(
|
2016-05-16 17:32:38 -03:00
|
|
|
'Separator is not found, and chunk exceed the limit',
|
|
|
|
offset)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2016-01-11 13:28:19 -04:00
|
|
|
# Complete message (with full separator) may be present in buffer
|
|
|
|
# even when EOF flag is set. This may happen when the last chunk
|
|
|
|
# adds data which makes separator be found. That's why we check for
|
2024-04-08 13:58:02 -03:00
|
|
|
# EOF *after* inspecting the buffer.
|
2013-10-18 19:17:11 -03:00
|
|
|
if self._eof:
|
2016-01-11 13:28:19 -04:00
|
|
|
chunk = bytes(self._buffer)
|
|
|
|
self._buffer.clear()
|
2018-09-11 14:13:04 -03:00
|
|
|
raise exceptions.IncompleteReadError(chunk, None)
|
2016-01-11 13:28:19 -04:00
|
|
|
|
|
|
|
# _wait_for_data() will resume reading if stream was paused.
|
2017-12-08 18:23:48 -04:00
|
|
|
await self._wait_for_data('readuntil')
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2024-04-08 13:58:02 -03:00
|
|
|
if match_start > self._limit:
|
2018-09-11 14:13:04 -03:00
|
|
|
raise exceptions.LimitOverrunError(
|
2024-04-08 13:58:02 -03:00
|
|
|
'Separator is found, but chunk is longer than limit', match_start)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2024-04-08 13:58:02 -03:00
|
|
|
chunk = self._buffer[:match_end]
|
|
|
|
del self._buffer[:match_end]
|
2013-10-17 17:40:50 -03:00
|
|
|
self._maybe_resume_transport()
|
2016-01-11 13:28:19 -04:00
|
|
|
return bytes(chunk)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def read(self, n=-1):
|
2016-01-11 13:28:19 -04:00
|
|
|
"""Read up to `n` bytes from the stream.
|
|
|
|
|
2023-02-17 17:01:26 -04:00
|
|
|
If `n` is not provided or set to -1,
|
|
|
|
read until EOF, then return all read bytes.
|
|
|
|
If EOF was received and the internal buffer is empty,
|
|
|
|
return an empty bytes object.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
2023-02-17 17:01:26 -04:00
|
|
|
If `n` is 0, return an empty bytes object immediately.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
2023-02-17 17:01:26 -04:00
|
|
|
If `n` is positive, return at most `n` available bytes
|
|
|
|
as soon as at least 1 byte is available in the internal buffer.
|
|
|
|
If EOF is received before any byte is read, return an empty
|
|
|
|
bytes object.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
2016-05-16 17:32:38 -03:00
|
|
|
Returned value is not limited with limit, configured at stream
|
|
|
|
creation.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
|
|
|
If stream was paused, this function will automatically resume it if
|
|
|
|
needed.
|
|
|
|
"""
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
if self._exception is not None:
|
|
|
|
raise self._exception
|
|
|
|
|
2016-01-11 13:28:19 -04:00
|
|
|
if n == 0:
|
2013-10-17 17:40:50 -03:00
|
|
|
return b''
|
|
|
|
|
|
|
|
if n < 0:
|
2014-05-12 14:04:37 -03:00
|
|
|
# This used to just loop creating a new waiter hoping to
|
|
|
|
# collect everything in self._buffer, but that would
|
|
|
|
# deadlock if the subprocess sends more than self.limit
|
|
|
|
# bytes. So just call self.read(self._limit) until EOF.
|
|
|
|
blocks = []
|
|
|
|
while True:
|
2017-12-08 18:23:48 -04:00
|
|
|
block = await self.read(self._limit)
|
2014-05-12 14:04:37 -03:00
|
|
|
if not block:
|
|
|
|
break
|
|
|
|
blocks.append(block)
|
|
|
|
return b''.join(blocks)
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2016-01-11 13:28:19 -04:00
|
|
|
if not self._buffer and not self._eof:
|
2017-12-08 18:23:48 -04:00
|
|
|
await self._wait_for_data('read')
|
2016-01-11 13:28:19 -04:00
|
|
|
|
|
|
|
# This will work right even if buffer is less than n bytes
|
2022-12-10 19:07:02 -04:00
|
|
|
data = bytes(memoryview(self._buffer)[:n])
|
2016-01-11 13:28:19 -04:00
|
|
|
del self._buffer[:n]
|
2014-02-05 19:11:13 -04:00
|
|
|
|
|
|
|
self._maybe_resume_transport()
|
|
|
|
return data
|
2013-10-17 17:40:50 -03:00
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def readexactly(self, n):
|
2016-01-11 13:28:19 -04:00
|
|
|
"""Read exactly `n` bytes.
|
|
|
|
|
2016-05-16 17:32:38 -03:00
|
|
|
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
|
|
|
|
read. The IncompleteReadError.partial attribute of the exception will
|
2016-01-11 13:28:19 -04:00
|
|
|
contain the partial read bytes.
|
|
|
|
|
|
|
|
if n is zero, return empty bytes object.
|
|
|
|
|
2016-05-16 17:32:38 -03:00
|
|
|
Returned value is not limited with limit, configured at stream
|
|
|
|
creation.
|
2016-01-11 13:28:19 -04:00
|
|
|
|
|
|
|
If stream was paused, this function will automatically resume it if
|
|
|
|
needed.
|
|
|
|
"""
|
2015-12-11 12:32:59 -04:00
|
|
|
if n < 0:
|
|
|
|
raise ValueError('readexactly size can not be less than zero')
|
|
|
|
|
2013-10-17 17:40:50 -03:00
|
|
|
if self._exception is not None:
|
|
|
|
raise self._exception
|
|
|
|
|
2016-01-11 13:28:19 -04:00
|
|
|
if n == 0:
|
|
|
|
return b''
|
|
|
|
|
2016-10-05 19:01:12 -03:00
|
|
|
while len(self._buffer) < n:
|
|
|
|
if self._eof:
|
|
|
|
incomplete = bytes(self._buffer)
|
|
|
|
self._buffer.clear()
|
2018-09-11 14:13:04 -03:00
|
|
|
raise exceptions.IncompleteReadError(incomplete, n)
|
2016-10-05 19:01:12 -03:00
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
await self._wait_for_data('readexactly')
|
2016-10-05 19:01:12 -03:00
|
|
|
|
|
|
|
if len(self._buffer) == n:
|
|
|
|
data = bytes(self._buffer)
|
|
|
|
self._buffer.clear()
|
|
|
|
else:
|
2022-12-10 19:07:02 -04:00
|
|
|
data = bytes(memoryview(self._buffer)[:n])
|
2016-10-05 19:01:12 -03:00
|
|
|
del self._buffer[:n]
|
|
|
|
self._maybe_resume_transport()
|
|
|
|
return data
|
2015-05-13 16:15:56 -03:00
|
|
|
|
2017-10-06 03:08:57 -03:00
|
|
|
def __aiter__(self):
|
|
|
|
return self
|
|
|
|
|
2017-12-08 18:23:48 -04:00
|
|
|
async def __anext__(self):
|
|
|
|
val = await self.readline()
|
2017-10-06 03:08:57 -03:00
|
|
|
if val == b'':
|
|
|
|
raise StopAsyncIteration
|
|
|
|
return val
|