cpython/Lib/multiprocessing/connection.py

1161 lines
40 KiB
Python
Raw Normal View History

#
# A higher level module for using sockets (or Windows named pipes)
#
# multiprocessing/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
import io
import os
import sys
import socket
import struct
import time
import tempfile
import itertools
import _multiprocessing
from . import util
from . import AuthenticationError, BufferTooShort
from .context import reduction
_ForkingPickler = reduction.ForkingPickler
try:
import _winapi
from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
except ImportError:
if sys.platform == 'win32':
raise
_winapi = None
#
#
#
BUFSIZE = 8192
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.
_mmap_counter = itertools.count()
default_family = 'AF_INET'
families = ['AF_INET']
if hasattr(socket, 'AF_UNIX'):
default_family = 'AF_UNIX'
families += ['AF_UNIX']
if sys.platform == 'win32':
default_family = 'AF_PIPE'
families += ['AF_PIPE']
def _init_timeout(timeout=CONNECTION_TIMEOUT):
return time.monotonic() + timeout
def _check_timeout(t):
return time.monotonic() > t
#
#
#
def arbitrary_address(family):
'''
Return an arbitrary free address for the given family
'''
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), next(_mmap_counter)), dir="")
else:
raise ValueError('unrecognized family')
def _validate_family(family):
'''
Checks if the family is valid for the current environment.
'''
if sys.platform != 'win32' and family == 'AF_PIPE':
raise ValueError('Family %s is not recognized.' % family)
if sys.platform == 'win32' and family == 'AF_UNIX':
# double check
if not hasattr(socket, family):
raise ValueError('Family %s is not recognized.' % family)
def address_type(address):
'''
Return the types of the address
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
'''
if type(address) == tuple:
return 'AF_INET'
elif type(address) is str and address.startswith('\\\\'):
return 'AF_PIPE'
elif type(address) is str or util.is_abstract_socket_namespace(address):
return 'AF_UNIX'
else:
raise ValueError('address type of %r unrecognized' % address)
#
# Connection classes
#
class _ConnectionBase:
_handle = None
def __init__(self, handle, readable=True, writable=True):
handle = handle.__index__()
if handle < 0:
raise ValueError("invalid handle")
if not readable and not writable:
raise ValueError(
"at least one of `readable` and `writable` must be True")
self._handle = handle
self._readable = readable
self._writable = writable
2011-07-08 20:03:46 -03:00
# XXX should we use util.Finalize instead of a __del__?
def __del__(self):
if self._handle is not None:
self._close()
def _check_closed(self):
if self._handle is None:
2012-12-25 10:47:37 -04:00
raise OSError("handle is closed")
def _check_readable(self):
if not self._readable:
2012-12-25 10:47:37 -04:00
raise OSError("connection is write-only")
def _check_writable(self):
if not self._writable:
2012-12-25 10:47:37 -04:00
raise OSError("connection is read-only")
def _bad_message_length(self):
if self._writable:
self._readable = False
else:
self.close()
2012-12-25 10:47:37 -04:00
raise OSError("bad message length")
@property
def closed(self):
"""True if the connection is closed"""
return self._handle is None
@property
def readable(self):
"""True if the connection is readable"""
return self._readable
@property
def writable(self):
"""True if the connection is writable"""
return self._writable
def fileno(self):
"""File descriptor or handle of the connection"""
self._check_closed()
return self._handle
def close(self):
"""Close the connection"""
if self._handle is not None:
try:
self._close()
finally:
self._handle = None
def send_bytes(self, buf, offset=0, size=None):
"""Send the bytes data from a bytes-like object"""
self._check_closed()
self._check_writable()
m = memoryview(buf)
if m.itemsize > 1:
m = m.cast('B')
n = m.nbytes
if offset < 0:
raise ValueError("offset is negative")
if n < offset:
raise ValueError("buffer length < offset")
if size is None:
size = n - offset
elif size < 0:
raise ValueError("size is negative")
elif offset + size > n:
raise ValueError("buffer length < offset + size")
self._send_bytes(m[offset:offset + size])
def send(self, obj):
"""Send a (picklable) object"""
self._check_closed()
self._check_writable()
self._send_bytes(_ForkingPickler.dumps(obj))
def recv_bytes(self, maxlength=None):
"""
Receive bytes data as a bytes object.
"""
self._check_closed()
self._check_readable()
if maxlength is not None and maxlength < 0:
raise ValueError("negative maxlength")
buf = self._recv_bytes(maxlength)
if buf is None:
self._bad_message_length()
return buf.getvalue()
def recv_bytes_into(self, buf, offset=0):
"""
Receive bytes data into a writeable bytes-like object.
Return the number of bytes read.
"""
self._check_closed()
self._check_readable()
with memoryview(buf) as m:
# Get bytesize of arbitrary buffer
itemsize = m.itemsize
bytesize = itemsize * len(m)
if offset < 0:
raise ValueError("negative offset")
elif offset > bytesize:
raise ValueError("offset too large")
result = self._recv_bytes()
size = result.tell()
if bytesize < offset + size:
raise BufferTooShort(result.getvalue())
# Message can fit in dest
result.seek(0)
result.readinto(m[offset // itemsize :
(offset + size) // itemsize])
return size
def recv(self):
"""Receive a (picklable) object"""
self._check_closed()
self._check_readable()
buf = self._recv_bytes()
return _ForkingPickler.loads(buf.getbuffer())
def poll(self, timeout=0.0):
"""Whether there is any input available to be read"""
self._check_closed()
self._check_readable()
return self._poll(timeout)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
if _winapi:
class PipeConnection(_ConnectionBase):
"""
Connection class based on a Windows named pipe.
Overlapped I/O is used, so the handles must have been created
with FILE_FLAG_OVERLAPPED.
"""
_got_empty_message = False
def _close(self, _CloseHandle=_winapi.CloseHandle):
_CloseHandle(self._handle)
def _send_bytes(self, buf):
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nwritten, err = ov.GetOverlappedResult(True)
assert err == 0
assert nwritten == len(buf)
def _recv_bytes(self, maxsize=None):
if self._got_empty_message:
self._got_empty_message = False
return io.BytesIO()
else:
bsize = 128 if maxsize is None else min(maxsize, 128)
try:
ov, err = _winapi.ReadFile(self._handle, bsize,
overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nread, err = ov.GetOverlappedResult(True)
if err == 0:
f = io.BytesIO()
f.write(ov.getbuffer())
return f
elif err == _winapi.ERROR_MORE_DATA:
return self._get_more_data(ov, maxsize)
2012-12-25 10:47:37 -04:00
except OSError as e:
if e.winerror == _winapi.ERROR_BROKEN_PIPE:
raise EOFError
else:
raise
raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")
def _poll(self, timeout):
if (self._got_empty_message or
_winapi.PeekNamedPipe(self._handle)[0] != 0):
return True
return bool(wait([self], timeout))
def _get_more_data(self, ov, maxsize):
buf = ov.getbuffer()
f = io.BytesIO()
f.write(buf)
left = _winapi.PeekNamedPipe(self._handle)[1]
assert left > 0
if maxsize is not None and len(buf) + left > maxsize:
self._bad_message_length()
ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
rbytes, err = ov.GetOverlappedResult(True)
assert err == 0
assert rbytes == left
f.write(ov.getbuffer())
return f
class Connection(_ConnectionBase):
"""
Connection class based on an arbitrary file descriptor (Unix only), or
a socket handle (Windows).
"""
if _winapi:
def _close(self, _close=_multiprocessing.closesocket):
_close(self._handle)
_write = _multiprocessing.send
_read = _multiprocessing.recv
else:
def _close(self, _close=os.close):
_close(self._handle)
_write = os.write
_read = os.read
def _send(self, buf, write=_write):
remaining = len(buf)
while True:
n = write(self._handle, buf)
remaining -= n
if remaining == 0:
break
buf = buf[n:]
def _recv(self, size, read=_read):
buf = io.BytesIO()
handle = self._handle
remaining = size
while remaining > 0:
chunk = read(handle, remaining)
n = len(chunk)
if n == 0:
if remaining == size:
raise EOFError
else:
2012-12-25 10:47:37 -04:00
raise OSError("got end of file during message")
buf.write(chunk)
remaining -= n
return buf
def _send_bytes(self, buf):
n = len(buf)
if n > 0x7fffffff:
pre_header = struct.pack("!i", -1)
header = struct.pack("!Q", n)
self._send(pre_header)
self._send(header)
self._send(buf)
else:
# For wire compatibility with 3.7 and lower
header = struct.pack("!i", n)
if n > 16384:
# The payload is large so Nagle's algorithm won't be triggered
# and we'd better avoid the cost of concatenation.
self._send(header)
self._send(buf)
else:
# Issue #20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
# Also note we want to avoid sending a 0-length buffer separately,
# to avoid "broken pipe" errors if the other end closed the pipe.
self._send(header + buf)
def _recv_bytes(self, maxsize=None):
buf = self._recv(4)
size, = struct.unpack("!i", buf.getvalue())
if size == -1:
buf = self._recv(8)
size, = struct.unpack("!Q", buf.getvalue())
if maxsize is not None and size > maxsize:
return None
return self._recv(size)
def _poll(self, timeout):
r = wait([self], timeout)
return bool(r)
#
# Public functions
#
class Listener(object):
'''
Returns a listener object.
This is a wrapper for a bound socket which is 'listening' for
connections, or for a Windows named pipe.
'''
def __init__(self, address=None, family=None, backlog=1, authkey=None):
family = family or (address and address_type(address)) \
or default_family
address = address or arbitrary_address(family)
_validate_family(family)
if family == 'AF_PIPE':
self._listener = PipeListener(address, backlog)
else:
self._listener = SocketListener(address, family, backlog)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
self._authkey = authkey
def accept(self):
'''
Accept a connection on the bound socket or named pipe of `self`.
Returns a `Connection` object.
'''
if self._listener is None:
2012-12-25 10:47:37 -04:00
raise OSError('listener is closed')
c = self._listener.accept()
if self._authkey:
deliver_challenge(c, self._authkey)
answer_challenge(c, self._authkey)
return c
def close(self):
'''
Close the bound socket or named pipe of `self`.
'''
listener = self._listener
if listener is not None:
self._listener = None
listener.close()
@property
def address(self):
return self._listener._address
@property
def last_accepted(self):
return self._listener._last_accepted
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def Client(address, family=None, authkey=None):
'''
Returns a connection to the address of a `Listener`
'''
family = family or address_type(address)
_validate_family(family)
if family == 'AF_PIPE':
c = PipeClient(address)
else:
c = SocketClient(address)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
if authkey is not None:
answer_challenge(c, authkey)
deliver_challenge(c, authkey)
return c
if sys.platform != 'win32':
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
if duplex:
s1, s2 = socket.socketpair()
s1.setblocking(True)
s2.setblocking(True)
c1 = Connection(s1.detach())
c2 = Connection(s2.detach())
else:
fd1, fd2 = os.pipe()
c1 = Connection(fd1, writable=False)
c2 = Connection(fd2, readable=False)
return c1, c2
else:
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
address = arbitrary_address('AF_PIPE')
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = BUFSIZE, BUFSIZE
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, BUFSIZE
h1 = _winapi.CreateNamedPipe(
address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
_winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
# default security descriptor: the handle cannot be inherited
_winapi.NULL
)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
_winapi.SetNamedPipeHandleState(
h2, _winapi.PIPE_READMODE_MESSAGE, None, None
)
overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
_, err = overlapped.GetOverlappedResult(True)
assert err == 0
c1 = PipeConnection(h1, writable=duplex)
c2 = PipeConnection(h2, readable=duplex)
return c1, c2
#
# Definitions for connections based on sockets
#
class SocketListener(object):
'''
Merged revisions 65437,65469,65476,65480,65502,65528,65539,65543,65558,65561-65562,65565,65591,65601,65608,65610,65639 via svnmerge from svn+ssh://pythondev@svn.python.org/python/trunk ........ r65437 | georg.brandl | 2008-08-03 22:28:55 +0000 (Sun, 03 Aug 2008) | 2 lines Note the removal of several committers. ........ r65469 | gregory.p.smith | 2008-08-04 01:03:50 +0000 (Mon, 04 Aug 2008) | 3 lines issue1606: Add warnings to the subprocess documentation about common pitfalls of using pipes that cause deadlocks. ........ r65476 | georg.brandl | 2008-08-04 06:29:36 +0000 (Mon, 04 Aug 2008) | 2 lines Fix markup. ........ r65480 | georg.brandl | 2008-08-04 07:31:50 +0000 (Mon, 04 Aug 2008) | 3 lines Clarify the meaning of the select() parameters and sync names with docstring. ........ r65502 | gregory.p.smith | 2008-08-04 18:34:07 +0000 (Mon, 04 Aug 2008) | 2 lines more cleanup ups of the recently added warnings in the subprocess docs. ........ r65528 | brett.cannon | 2008-08-04 21:52:25 +0000 (Mon, 04 Aug 2008) | 4 lines Add a note about all the modules/packages changed to silence -3 warnings. More changes are needed once some decisions are made, but this is the work up to this point. ........ r65539 | andrew.kuchling | 2008-08-05 01:38:08 +0000 (Tue, 05 Aug 2008) | 6 lines #3367 from Kristjan Valur Jonsson: If a PyTokenizer_FromString() is called with an empty string, the tokenizer's line_start member never gets initialized. Later, it is compared with the token pointer 'a' in parsetok.c:193 and that behavior can result in undefined behavior. ........ r65543 | andrew.kuchling | 2008-08-05 02:05:23 +0000 (Tue, 05 Aug 2008) | 1 line #3367: revert rev. 65539: this change causes test_parser to fail ........ r65558 | georg.brandl | 2008-08-06 17:20:41 +0000 (Wed, 06 Aug 2008) | 2 lines Fix longstringitem definition. #3505. ........ r65561 | mark.dickinson | 2008-08-06 20:12:30 +0000 (Wed, 06 Aug 2008) | 2 lines Docstring typo ........ r65562 | mark.dickinson | 2008-08-06 21:36:57 +0000 (Wed, 06 Aug 2008) | 2 lines Remove duplicate import ........ r65565 | andrew.kuchling | 2008-08-07 01:47:34 +0000 (Thu, 07 Aug 2008) | 1 line Add some items ........ r65591 | georg.brandl | 2008-08-08 06:42:20 +0000 (Fri, 08 Aug 2008) | 2 lines #3519: callee is an expression too. ........ r65601 | georg.brandl | 2008-08-08 15:34:34 +0000 (Fri, 08 Aug 2008) | 2 lines Remove mention of backquotes in the tutorial. ........ r65608 | guido.van.rossum | 2008-08-09 14:55:34 +0000 (Sat, 09 Aug 2008) | 2 lines Add news item about _sre.compile() re-bytecode validator. ........ r65610 | antoine.pitrou | 2008-08-09 17:27:23 +0000 (Sat, 09 Aug 2008) | 3 lines move NEWS entry to the appropriate section (oops!) ........ r65639 | georg.brandl | 2008-08-11 10:27:31 +0000 (Mon, 11 Aug 2008) | 2 lines #3540: fix exception name. ........
2008-08-12 05:18:18 -03:00
Representation of a socket which is bound to an address and listening
'''
def __init__(self, address, family, backlog=1):
self._socket = socket.socket(getattr(socket, family))
try:
# SO_REUSEADDR has different semantics on Windows (issue #2550).
if os.name == 'posix':
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self._socket.setblocking(True)
self._socket.bind(address)
self._socket.listen(backlog)
self._address = self._socket.getsockname()
except OSError:
self._socket.close()
raise
self._family = family
self._last_accepted = None
if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address):
# Linux abstract socket namespaces do not need to be explicitly unlinked
self._unlink = util.Finalize(
Merged revisions 64722,64729,64753,64845-64846,64849,64871,64880-64882,64885,64888,64897,64900-64901,64915,64926-64929,64938-64941,64944,64961,64966,64973 via svnmerge from svn+ssh://pythondev@svn.python.org/python/trunk ........ r64722 | georg.brandl | 2008-07-05 12:13:36 +0200 (Sat, 05 Jul 2008) | 4 lines #2663: support an *ignore* argument to shutil.copytree(). Patch by Tarek Ziade. This is a new feature, but Barry authorized adding it in the beta period. ........ r64729 | mark.dickinson | 2008-07-05 13:33:52 +0200 (Sat, 05 Jul 2008) | 5 lines Issue 3188: accept float('infinity') as well as float('inf'). This makes the float constructor behave in the same way as specified by various other language standards, including C99, IEEE 754r, and the IBM Decimal standard. ........ r64753 | gregory.p.smith | 2008-07-06 05:35:58 +0200 (Sun, 06 Jul 2008) | 4 lines - Issue #2862: Make int and float freelist management consistent with other freelists. Changes their CompactFreeList apis into ClearFreeList apis and calls them via gc.collect(). ........ r64845 | raymond.hettinger | 2008-07-10 16:03:19 +0200 (Thu, 10 Jul 2008) | 1 line Issue 3301: Bisect functions behaved badly when lo was negative. ........ r64846 | raymond.hettinger | 2008-07-10 16:34:57 +0200 (Thu, 10 Jul 2008) | 1 line Issue 3285: Fractions from_float() and from_decimal() accept Integral arguments. ........ r64849 | andrew.kuchling | 2008-07-10 16:43:31 +0200 (Thu, 10 Jul 2008) | 1 line Wording changes ........ r64871 | raymond.hettinger | 2008-07-11 14:00:21 +0200 (Fri, 11 Jul 2008) | 1 line Add cautionary note on the use of PySequence_Fast_ITEMS. ........ r64880 | amaury.forgeotdarc | 2008-07-11 23:28:25 +0200 (Fri, 11 Jul 2008) | 5 lines #3317 in zipfile module, restore the previous names of global variables: some applications relied on them. Also remove duplicated lines. ........ r64881 | amaury.forgeotdarc | 2008-07-11 23:45:06 +0200 (Fri, 11 Jul 2008) | 3 lines #3342: In tracebacks, printed source lines were not indented since r62555. #3343: Py_DisplaySourceLine should be a private function. Rename it to _Py_DisplaySourceLine. ........ r64882 | josiah.carlson | 2008-07-12 00:17:14 +0200 (Sat, 12 Jul 2008) | 2 lines Fix for the AttributeError in test_asynchat. ........ r64885 | josiah.carlson | 2008-07-12 01:26:59 +0200 (Sat, 12 Jul 2008) | 2 lines Fixed test for asyncore. ........ r64888 | matthias.klose | 2008-07-12 09:51:48 +0200 (Sat, 12 Jul 2008) | 2 lines - Fix bashisms in Tools/faqwiz/move-faqwiz.sh ........ r64897 | benjamin.peterson | 2008-07-12 22:16:19 +0200 (Sat, 12 Jul 2008) | 1 line fix various doc typos #3320 ........ r64900 | alexandre.vassalotti | 2008-07-13 00:06:53 +0200 (Sun, 13 Jul 2008) | 2 lines Fixed typo. ........ r64901 | benjamin.peterson | 2008-07-13 01:41:19 +0200 (Sun, 13 Jul 2008) | 1 line #1778443 robotparser fixes from Aristotelis Mikropoulos ........ r64915 | nick.coghlan | 2008-07-13 16:52:36 +0200 (Sun, 13 Jul 2008) | 1 line Fix issue 3221 by emitting a RuntimeWarning instead of raising SystemError when the parent module can't be found during an absolute import (likely due to non-PEP 361 aware code which sets a module level __package__ attribute) ........ r64926 | martin.v.loewis | 2008-07-13 22:31:49 +0200 (Sun, 13 Jul 2008) | 2 lines Add turtle into the module index. ........ r64927 | alexandre.vassalotti | 2008-07-13 22:42:44 +0200 (Sun, 13 Jul 2008) | 3 lines Issue #3274: Use a less common identifier for the temporary variable in Py_CLEAR(). ........ r64928 | andrew.kuchling | 2008-07-13 23:43:25 +0200 (Sun, 13 Jul 2008) | 1 line Re-word ........ r64929 | andrew.kuchling | 2008-07-13 23:43:52 +0200 (Sun, 13 Jul 2008) | 1 line Add various items; move ctypes items into a subsection of their own ........ r64938 | andrew.kuchling | 2008-07-14 02:35:32 +0200 (Mon, 14 Jul 2008) | 1 line Typo fixes ........ r64939 | andrew.kuchling | 2008-07-14 02:40:55 +0200 (Mon, 14 Jul 2008) | 1 line Typo fix ........ r64940 | andrew.kuchling | 2008-07-14 03:18:16 +0200 (Mon, 14 Jul 2008) | 1 line Typo fix ........ r64941 | andrew.kuchling | 2008-07-14 03:18:31 +0200 (Mon, 14 Jul 2008) | 1 line Expand the multiprocessing section ........ r64944 | gregory.p.smith | 2008-07-14 08:06:48 +0200 (Mon, 14 Jul 2008) | 7 lines Fix posix.fork1() / os.fork1() to only call PyOS_AfterFork() in the child process rather than both parent and child. Does anyone actually use fork1()? It appears to be a Solaris thing but if Python is built with pthreads on Solaris, fork1() and fork() should be the same. ........ r64961 | jesse.noller | 2008-07-15 15:47:33 +0200 (Tue, 15 Jul 2008) | 1 line multiprocessing/connection.py patch to remove fqdn oddness for issue 3270 ........ r64966 | nick.coghlan | 2008-07-15 17:40:22 +0200 (Tue, 15 Jul 2008) | 1 line Add missing NEWS entry for r64962 ........ r64973 | jesse.noller | 2008-07-15 20:29:18 +0200 (Tue, 15 Jul 2008) | 1 line Revert 3270 patch: self._address is in pretty widespread use, need to revisit ........
2008-07-16 09:55:28 -03:00
self, os.unlink, args=(address,), exitpriority=0
)
else:
self._unlink = None
def accept(self):
s, self._last_accepted = self._socket.accept()
s.setblocking(True)
return Connection(s.detach())
def close(self):
try:
self._socket.close()
finally:
unlink = self._unlink
if unlink is not None:
self._unlink = None
unlink()
def SocketClient(address):
'''
Return a connection object connected to the socket given by `address`
'''
family = address_type(address)
with socket.socket( getattr(socket, family) ) as s:
s.setblocking(True)
s.connect(address)
return Connection(s.detach())
#
# Definitions for connections based on named pipes
#
if sys.platform == 'win32':
class PipeListener(object):
'''
Representation of a named pipe
'''
def __init__(self, address, backlog=None):
self._address = address
self._handle_queue = [self._new_handle(first=True)]
self._last_accepted = None
util.sub_debug('listener created with address=%r', self._address)
self.close = util.Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
def _new_handle(self, first=False):
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
return _winapi.CreateNamedPipe(
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
_winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
)
def accept(self):
self._handle_queue.append(self._new_handle())
handle = self._handle_queue.pop(0)
try:
2012-05-05 15:45:37 -03:00
ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
except OSError as e:
if e.winerror != _winapi.ERROR_NO_DATA:
raise
# ERROR_NO_DATA can occur if a client has already connected,
# written data and then disconnected -- see Issue 14725.
else:
try:
res = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
except:
ov.cancel()
_winapi.CloseHandle(handle)
raise
finally:
_, err = ov.GetOverlappedResult(True)
assert err == 0
return PipeConnection(handle)
@staticmethod
def _finalize_pipe_listener(queue, address):
util.sub_debug('closing listener with address=%r', address)
for handle in queue:
_winapi.CloseHandle(handle)
def PipeClient(address):
'''
Return a connection object connected to the pipe given by `address`
'''
t = _init_timeout()
while 1:
try:
_winapi.WaitNamedPipe(address, 1000)
h = _winapi.CreateFile(
address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
except OSError as e:
if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
_winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
raise
else:
break
else:
raise
_winapi.SetNamedPipeHandleState(
h, _winapi.PIPE_READMODE_MESSAGE, None, None
)
return PipeConnection(h)
#
# Authentication stuff
#
MESSAGE_LENGTH = 40 # MUST be > 20
_CHALLENGE = b'#CHALLENGE#'
_WELCOME = b'#WELCOME#'
_FAILURE = b'#FAILURE#'
# multiprocessing.connection Authentication Handshake Protocol Description
# (as documented for reference after reading the existing code)
# =============================================================================
#
# On Windows: native pipes with "overlapped IO" are used to send the bytes,
# instead of the length prefix SIZE scheme described below. (ie: the OS deals
# with message sizes for us)
#
# Protocol error behaviors:
#
# On POSIX, any failure to receive the length prefix into SIZE, for SIZE greater
# than the requested maxsize to receive, or receiving fewer than SIZE bytes
# results in the connection being closed and auth to fail.
#
# On Windows, receiving too few bytes is never a low level _recv_bytes read
# error, receiving too many will trigger an error only if receive maxsize
# value was larger than 128 OR the if the data arrived in smaller pieces.
#
# Serving side Client side
# ------------------------------ ---------------------------------------
# 0. Open a connection on the pipe.
# 1. Accept connection.
# 2. Random 20+ bytes -> MESSAGE
# Modern servers always send
# more than 20 bytes and include
# a {digest} prefix on it with
# their preferred HMAC digest.
# Legacy ones send ==20 bytes.
# 3. send 4 byte length (net order)
# prefix followed by:
# b'#CHALLENGE#' + MESSAGE
# 4. Receive 4 bytes, parse as network byte
# order integer. If it is -1, receive an
# additional 8 bytes, parse that as network
# byte order. The result is the length of
# the data that follows -> SIZE.
# 5. Receive min(SIZE, 256) bytes -> M1
# 6. Assert that M1 starts with:
# b'#CHALLENGE#'
# 7. Strip that prefix from M1 into -> M2
# 7.1. Parse M2: if it is exactly 20 bytes in
# length this indicates a legacy server
# supporting only HMAC-MD5. Otherwise the
# 7.2. preferred digest is looked up from an
# expected "{digest}" prefix on M2. No prefix
# or unsupported digest? <- AuthenticationError
# 7.3. Put divined algorithm name in -> D_NAME
# 8. Compute HMAC-D_NAME of AUTHKEY, M2 -> C_DIGEST
# 9. Send 4 byte length prefix (net order)
# followed by C_DIGEST bytes.
# 10. Receive 4 or 4+8 byte length
# prefix (#4 dance) -> SIZE.
# 11. Receive min(SIZE, 256) -> C_D.
# 11.1. Parse C_D: legacy servers
# accept it as is, "md5" -> D_NAME
# 11.2. modern servers check the length
# of C_D, IF it is 16 bytes?
# 11.2.1. "md5" -> D_NAME
# and skip to step 12.
# 11.3. longer? expect and parse a "{digest}"
# prefix into -> D_NAME.
# Strip the prefix and store remaining
# bytes in -> C_D.
# 11.4. Don't like D_NAME? <- AuthenticationError
# 12. Compute HMAC-D_NAME of AUTHKEY,
# MESSAGE into -> M_DIGEST.
# 13. Compare M_DIGEST == C_D:
# 14a: Match? Send length prefix &
# b'#WELCOME#'
# <- RETURN
# 14b: Mismatch? Send len prefix &
# b'#FAILURE#'
# <- CLOSE & AuthenticationError
# 15. Receive 4 or 4+8 byte length prefix (net
# order) again as in #4 into -> SIZE.
# 16. Receive min(SIZE, 256) bytes -> M3.
# 17. Compare M3 == b'#WELCOME#':
# 17a. Match? <- RETURN
# 17b. Mismatch? <- CLOSE & AuthenticationError
#
# If this RETURNed, the connection remains open: it has been authenticated.
#
# Length prefixes are used consistently. Even on the legacy protocol, this
# was good fortune and allowed us to evolve the protocol by using the length
# of the opening challenge or length of the returned digest as a signal as
# to which protocol the other end supports.
_ALLOWED_DIGESTS = frozenset(
{b'md5', b'sha256', b'sha384', b'sha3_256', b'sha3_384'})
_MAX_DIGEST_LEN = max(len(_) for _ in _ALLOWED_DIGESTS)
# Old hmac-md5 only server versions from Python <=3.11 sent a message of this
# length. It happens to not match the length of any supported digest so we can
# use a message of this length to indicate that we should work in backwards
# compatible md5-only mode without a {digest_name} prefix on our response.
_MD5ONLY_MESSAGE_LENGTH = 20
_MD5_DIGEST_LEN = 16
_LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN)
def _get_digest_name_and_payload(message: bytes) -> (str, bytes):
"""Returns a digest name and the payload for a response hash.
If a legacy protocol is detected based on the message length
or contents the digest name returned will be empty to indicate
legacy mode where MD5 and no digest prefix should be sent.
"""
# modern message format: b"{digest}payload" longer than 20 bytes
# legacy message format: 16 or 20 byte b"payload"
if len(message) in _LEGACY_LENGTHS:
# Either this was a legacy server challenge, or we're processing
# a reply from a legacy client that sent an unprefixed 16-byte
# HMAC-MD5 response. All messages using the modern protocol will
# be longer than either of these lengths.
return '', message
if (message.startswith(b'{') and
(curly := message.find(b'}', 1, _MAX_DIGEST_LEN+2)) > 0):
digest = message[1:curly]
if digest in _ALLOWED_DIGESTS:
payload = message[curly+1:]
return digest.decode('ascii'), payload
raise AuthenticationError(
'unsupported message length, missing digest prefix, '
f'or unsupported digest: {message=}')
def _create_response(authkey, message):
"""Create a MAC based on authkey and message
The MAC algorithm defaults to HMAC-MD5, unless MD5 is not available or
the message has a '{digest_name}' prefix. For legacy HMAC-MD5, the response
is the raw MAC, otherwise the response is prefixed with '{digest_name}',
e.g. b'{sha256}abcdefg...'
Note: The MAC protects the entire message including the digest_name prefix.
"""
import hmac
digest_name = _get_digest_name_and_payload(message)[0]
# The MAC protects the entire message: digest header and payload.
if not digest_name:
# Legacy server without a {digest} prefix on message.
# Generate a legacy non-prefixed HMAC-MD5 reply.
try:
return hmac.new(authkey, message, 'md5').digest()
except ValueError:
# HMAC-MD5 is not available (FIPS mode?), fall back to
# HMAC-SHA2-256 modern protocol. The legacy server probably
# doesn't support it and will reject us anyways. :shrug:
digest_name = 'sha256'
# Modern protocol, indicate the digest used in the reply.
response = hmac.new(authkey, message, digest_name).digest()
return b'{%s}%s' % (digest_name.encode('ascii'), response)
def _verify_challenge(authkey, message, response):
"""Verify MAC challenge
If our message did not include a digest_name prefix, the client is allowed
to select a stronger digest_name from _ALLOWED_DIGESTS.
In case our message is prefixed, a client cannot downgrade to a weaker
algorithm, because the MAC is calculated over the entire message
including the '{digest_name}' prefix.
"""
import hmac
response_digest, response_mac = _get_digest_name_and_payload(response)
response_digest = response_digest or 'md5'
try:
expected = hmac.new(authkey, message, response_digest).digest()
except ValueError:
raise AuthenticationError(f'{response_digest=} unsupported')
if len(expected) != len(response_mac):
raise AuthenticationError(
f'expected {response_digest!r} of length {len(expected)} '
f'got {len(response_mac)}')
if not hmac.compare_digest(expected, response_mac):
raise AuthenticationError('digest received was wrong')
def deliver_challenge(connection, authkey: bytes, digest_name='sha256'):
if not isinstance(authkey, bytes):
raise ValueError(
"Authkey must be bytes, not {0!s}".format(type(authkey)))
assert MESSAGE_LENGTH > _MD5ONLY_MESSAGE_LENGTH, "protocol constraint"
message = os.urandom(MESSAGE_LENGTH)
message = b'{%s}%s' % (digest_name.encode('ascii'), message)
# Even when sending a challenge to a legacy client that does not support
# digest prefixes, they'll take the entire thing as a challenge and
# respond to it with a raw HMAC-MD5.
connection.send_bytes(_CHALLENGE + message)
response = connection.recv_bytes(256) # reject large message
try:
_verify_challenge(authkey, message, response)
except AuthenticationError:
connection.send_bytes(_FAILURE)
raise
else:
connection.send_bytes(_WELCOME)
def answer_challenge(connection, authkey: bytes):
if not isinstance(authkey, bytes):
raise ValueError(
"Authkey must be bytes, not {0!s}".format(type(authkey)))
message = connection.recv_bytes(256) # reject large message
if not message.startswith(_CHALLENGE):
raise AuthenticationError(
f'Protocol error, expected challenge: {message=}')
message = message[len(_CHALLENGE):]
if len(message) < _MD5ONLY_MESSAGE_LENGTH:
raise AuthenticationError('challenge too short: {len(message)} bytes')
digest = _create_response(authkey, message)
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != _WELCOME:
raise AuthenticationError('digest sent was rejected')
#
# Support for using xmlrpclib for serialization
#
class ConnectionWrapper(object):
def __init__(self, conn, dumps, loads):
self._conn = conn
self._dumps = dumps
self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
setattr(self, attr, obj)
def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)
def recv(self):
s = self._conn.recv_bytes()
return self._loads(s)
def _xml_dumps(obj):
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')
def _xml_loads(s):
(obj,), method = xmlrpclib.loads(s.decode('utf-8'))
return obj
class XmlListener(Listener):
def accept(self):
global xmlrpclib
import xmlrpc.client as xmlrpclib
obj = Listener.accept(self)
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
def XmlClient(*args, **kwds):
global xmlrpclib
import xmlrpc.client as xmlrpclib
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
#
# Wait
#
if sys.platform == 'win32':
def _exhaustive_wait(handles, timeout):
# Return ALL handles which are currently signalled. (Only
# returning the first signalled might create starvation issues.)
L = list(handles)
ready = []
while L:
res = _winapi.WaitForMultipleObjects(L, False, timeout)
if res == WAIT_TIMEOUT:
break
elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
res -= WAIT_OBJECT_0
elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
res -= WAIT_ABANDONED_0
else:
raise RuntimeError('Should not get here')
ready.append(L[res])
L = L[res+1:]
timeout = 0
return ready
_ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
if timeout is None:
timeout = INFINITE
elif timeout < 0:
timeout = 0
else:
timeout = int(timeout * 1000 + 0.5)
object_list = list(object_list)
waithandle_to_obj = {}
ov_list = []
ready_objects = set()
ready_handles = set()
try:
for o in object_list:
try:
fileno = getattr(o, 'fileno')
except AttributeError:
waithandle_to_obj[o.__index__()] = o
else:
# start an overlapped read of length zero
try:
ov, err = _winapi.ReadFile(fileno(), 0, True)
except OSError as e:
ov, err = None, e.winerror
if err not in _ready_errors:
raise
if err == _winapi.ERROR_IO_PENDING:
ov_list.append(ov)
waithandle_to_obj[ov.event] = o
else:
# If o.fileno() is an overlapped pipe handle and
# err == 0 then there is a zero length message
# in the pipe, but it HAS NOT been consumed...
if ov and sys.getwindowsversion()[:2] >= (6, 2):
# ... except on Windows 8 and later, where
# the message HAS been consumed.
try:
_, err = ov.GetOverlappedResult(False)
except OSError as e:
err = e.winerror
if not err and hasattr(o, '_got_empty_message'):
o._got_empty_message = True
ready_objects.add(o)
timeout = 0
ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
finally:
# request that overlapped reads stop
for ov in ov_list:
ov.cancel()
# wait for all overlapped reads to stop
for ov in ov_list:
try:
_, err = ov.GetOverlappedResult(True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err != _winapi.ERROR_OPERATION_ABORTED:
o = waithandle_to_obj[ov.event]
ready_objects.add(o)
if err == 0:
# If o.fileno() is an overlapped pipe handle then
# a zero length message HAS been consumed.
if hasattr(o, '_got_empty_message'):
o._got_empty_message = True
ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
return [o for o in object_list if o in ready_objects]
else:
import selectors
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_WaitSelector = selectors.PollSelector
else:
_WaitSelector = selectors.SelectSelector
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
with _WaitSelector() as selector:
for obj in object_list:
selector.register(obj, selectors.EVENT_READ)
if timeout is not None:
deadline = time.monotonic() + timeout
while True:
ready = selector.select(timeout)
if ready:
return [key.fileobj for (key, events) in ready]
else:
if timeout is not None:
timeout = deadline - time.monotonic()
if timeout < 0:
return ready
#
# Make connection and socket objects shareable if possible
#
if sys.platform == 'win32':
def reduce_connection(conn):
handle = conn.fileno()
with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
from . import resource_sharer
ds = resource_sharer.DupSocket(s)
return rebuild_connection, (ds, conn.readable, conn.writable)
def rebuild_connection(ds, readable, writable):
sock = ds.detach()
return Connection(sock.detach(), readable, writable)
reduction.register(Connection, reduce_connection)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = reduction.DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
def rebuild_pipe_connection(dh, readable, writable):
handle = dh.detach()
return PipeConnection(handle, readable, writable)
reduction.register(PipeConnection, reduce_pipe_connection)
else:
def reduce_connection(conn):
df = reduction.DupFd(conn.fileno())
return rebuild_connection, (df, conn.readable, conn.writable)
def rebuild_connection(df, readable, writable):
fd = df.detach()
return Connection(fd, readable, writable)
reduction.register(Connection, reduce_connection)