mirror of https://github.com/python/cpython
bpo-45975: Simplify some while-loops with walrus operator (GH-29347)
This commit is contained in:
parent
25bc115df9
commit
024ac542d7
|
@ -638,10 +638,7 @@ class RawIOBase(IOBase):
|
|||
def readall(self):
|
||||
"""Read until EOF, using multiple read() call."""
|
||||
res = bytearray()
|
||||
while True:
|
||||
data = self.read(DEFAULT_BUFFER_SIZE)
|
||||
if not data:
|
||||
break
|
||||
while data := self.read(DEFAULT_BUFFER_SIZE):
|
||||
res += data
|
||||
if res:
|
||||
return bytes(res)
|
||||
|
|
|
@ -508,14 +508,8 @@ MAXBINSIZE = (MAXLINESIZE//4)*3
|
|||
|
||||
def encode(input, output):
|
||||
"""Encode a file; input and output are binary files."""
|
||||
while True:
|
||||
s = input.read(MAXBINSIZE)
|
||||
if not s:
|
||||
break
|
||||
while len(s) < MAXBINSIZE:
|
||||
ns = input.read(MAXBINSIZE-len(s))
|
||||
if not ns:
|
||||
break
|
||||
while s := input.read(MAXBINSIZE):
|
||||
while len(s) < MAXBINSIZE and (ns := input.read(MAXBINSIZE-len(s))):
|
||||
s += ns
|
||||
line = binascii.b2a_base64(s)
|
||||
output.write(line)
|
||||
|
@ -523,10 +517,7 @@ def encode(input, output):
|
|||
|
||||
def decode(input, output):
|
||||
"""Decode a file; input and output are binary files."""
|
||||
while True:
|
||||
line = input.readline()
|
||||
if not line:
|
||||
break
|
||||
while line := input.readline():
|
||||
s = binascii.a2b_base64(line)
|
||||
output.write(s)
|
||||
|
||||
|
|
|
@ -108,12 +108,8 @@ def get_ld_headers(file):
|
|||
p = Popen(["/usr/bin/dump", f"-X{AIX_ABI}", "-H", file],
|
||||
universal_newlines=True, stdout=PIPE, stderr=DEVNULL)
|
||||
# be sure to read to the end-of-file - getting all entries
|
||||
while True:
|
||||
ld_header = get_ld_header(p)
|
||||
if ld_header:
|
||||
ldr_headers.append((ld_header, get_ld_header_info(p)))
|
||||
else:
|
||||
break
|
||||
while ld_header := get_ld_header(p):
|
||||
ldr_headers.append((ld_header, get_ld_header_info(p)))
|
||||
p.stdout.close()
|
||||
p.wait()
|
||||
return ldr_headers
|
||||
|
|
|
@ -49,10 +49,7 @@ class Parser:
|
|||
feedparser = FeedParser(self._class, policy=self.policy)
|
||||
if headersonly:
|
||||
feedparser._set_headersonly()
|
||||
while True:
|
||||
data = fp.read(8192)
|
||||
if not data:
|
||||
break
|
||||
while data := fp.read(8192):
|
||||
feedparser.feed(data)
|
||||
return feedparser.close()
|
||||
|
||||
|
|
|
@ -434,10 +434,7 @@ class FTP:
|
|||
"""
|
||||
self.voidcmd('TYPE I')
|
||||
with self.transfercmd(cmd, rest) as conn:
|
||||
while 1:
|
||||
data = conn.recv(blocksize)
|
||||
if not data:
|
||||
break
|
||||
while data := conn.recv(blocksize):
|
||||
callback(data)
|
||||
# shutdown ssl layer
|
||||
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
|
||||
|
@ -496,10 +493,7 @@ class FTP:
|
|||
"""
|
||||
self.voidcmd('TYPE I')
|
||||
with self.transfercmd(cmd, rest) as conn:
|
||||
while 1:
|
||||
buf = fp.read(blocksize)
|
||||
if not buf:
|
||||
break
|
||||
while buf := fp.read(blocksize):
|
||||
conn.sendall(buf)
|
||||
if callback:
|
||||
callback(buf)
|
||||
|
|
|
@ -578,11 +578,7 @@ class HTTPResponse(io.BufferedIOBase):
|
|||
assert self.chunked != _UNKNOWN
|
||||
value = []
|
||||
try:
|
||||
while True:
|
||||
chunk_left = self._get_chunk_left()
|
||||
if chunk_left is None:
|
||||
break
|
||||
|
||||
while (chunk_left := self._get_chunk_left()) is not None:
|
||||
if amt is not None and amt <= chunk_left:
|
||||
value.append(self._safe_read(amt))
|
||||
self.chunk_left = chunk_left - amt
|
||||
|
@ -998,10 +994,7 @@ class HTTPConnection:
|
|||
encode = self._is_textIO(data)
|
||||
if encode and self.debuglevel > 0:
|
||||
print("encoding file using iso-8859-1")
|
||||
while 1:
|
||||
datablock = data.read(self.blocksize)
|
||||
if not datablock:
|
||||
break
|
||||
while datablock := data.read(self.blocksize):
|
||||
if encode:
|
||||
datablock = datablock.encode("iso-8859-1")
|
||||
sys.audit("http.client.send", self, datablock)
|
||||
|
@ -1031,10 +1024,7 @@ class HTTPConnection:
|
|||
encode = self._is_textIO(readable)
|
||||
if encode and self.debuglevel > 0:
|
||||
print("encoding file using iso-8859-1")
|
||||
while True:
|
||||
datablock = readable.read(self.blocksize)
|
||||
if not datablock:
|
||||
break
|
||||
while datablock := readable.read(self.blocksize):
|
||||
if encode:
|
||||
datablock = datablock.encode("iso-8859-1")
|
||||
yield datablock
|
||||
|
|
|
@ -1915,9 +1915,7 @@ class LWPCookieJar(FileCookieJar):
|
|||
"comment", "commenturl")
|
||||
|
||||
try:
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if line == "": break
|
||||
while (line := f.readline()) != "":
|
||||
if not line.startswith(header):
|
||||
continue
|
||||
line = line[len(header):].strip()
|
||||
|
@ -2017,12 +2015,9 @@ class MozillaCookieJar(FileCookieJar):
|
|||
filename)
|
||||
|
||||
try:
|
||||
while 1:
|
||||
line = f.readline()
|
||||
while (line := f.readline()) != "":
|
||||
rest = {}
|
||||
|
||||
if line == "": break
|
||||
|
||||
# httponly is a cookie flag as defined in rfc6265
|
||||
# when encoded in a netscape cookie file,
|
||||
# the line is prepended with "#HttpOnly_"
|
||||
|
|
|
@ -1956,10 +1956,7 @@ class _ProxyFile:
|
|||
|
||||
def __iter__(self):
|
||||
"""Iterate over lines."""
|
||||
while True:
|
||||
line = self.readline()
|
||||
if not line:
|
||||
return
|
||||
while line := self.readline():
|
||||
yield line
|
||||
|
||||
def tell(self):
|
||||
|
|
|
@ -90,9 +90,7 @@ def _readmailcapfile(fp, lineno):
|
|||
the viewing command is stored with the key "view".
|
||||
"""
|
||||
caps = {}
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line: break
|
||||
while line := fp.readline():
|
||||
# Ignore comments and blank lines
|
||||
if line[0] == '#' or line.strip() == '':
|
||||
continue
|
||||
|
|
|
@ -217,10 +217,7 @@ class MimeTypes:
|
|||
list of standard types, else to the list of non-standard
|
||||
types.
|
||||
"""
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
while line := fp.readline():
|
||||
words = line.split()
|
||||
for i in range(len(words)):
|
||||
if words[i][0] == '#':
|
||||
|
|
|
@ -223,8 +223,6 @@ class Stats:
|
|||
for word, tup in self.sort_arg_dict_default.items():
|
||||
fragment = word
|
||||
while fragment:
|
||||
if not fragment:
|
||||
break
|
||||
if fragment in dict:
|
||||
bad_list[fragment] = 0
|
||||
break
|
||||
|
|
|
@ -686,9 +686,7 @@ class HTMLDoc(Doc):
|
|||
r'RFC[- ]?(\d+)|'
|
||||
r'PEP[- ]?(\d+)|'
|
||||
r'(self\.)?(\w+))')
|
||||
while True:
|
||||
match = pattern.search(text, here)
|
||||
if not match: break
|
||||
while match := pattern.search(text, here):
|
||||
start, end = match.span()
|
||||
results.append(escape(text[here:start]))
|
||||
|
||||
|
|
|
@ -67,10 +67,7 @@ def encode(input, output, quotetabs, header=False):
|
|||
output.write(s + lineEnd)
|
||||
|
||||
prevline = None
|
||||
while 1:
|
||||
line = input.readline()
|
||||
if not line:
|
||||
break
|
||||
while line := input.readline():
|
||||
outline = []
|
||||
# Strip off any readline induced trailing newline
|
||||
stripped = b''
|
||||
|
@ -126,9 +123,7 @@ def decode(input, output, header=False):
|
|||
return
|
||||
|
||||
new = b''
|
||||
while 1:
|
||||
line = input.readline()
|
||||
if not line: break
|
||||
while line := input.readline():
|
||||
i, n = 0, len(line)
|
||||
if n > 0 and line[n-1:n] == b'\n':
|
||||
partial = 0; n = n-1
|
||||
|
|
|
@ -333,10 +333,7 @@ def quote(s):
|
|||
|
||||
|
||||
def _print_tokens(lexer):
|
||||
while 1:
|
||||
tt = lexer.get_token()
|
||||
if not tt:
|
||||
break
|
||||
while tt := lexer.get_token():
|
||||
print("Token: " + repr(tt))
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -194,10 +194,7 @@ def copyfileobj(fsrc, fdst, length=0):
|
|||
# Localize variable access to minimize overhead.
|
||||
fsrc_read = fsrc.read
|
||||
fdst_write = fdst.write
|
||||
while True:
|
||||
buf = fsrc_read(length)
|
||||
if not buf:
|
||||
break
|
||||
while buf := fsrc_read(length):
|
||||
fdst_write(buf)
|
||||
|
||||
def _samefile(src, dst):
|
||||
|
|
|
@ -1099,10 +1099,7 @@ if __name__ == '__main__':
|
|||
toaddrs = prompt("To").split(',')
|
||||
print("Enter message, end with ^D:")
|
||||
msg = ''
|
||||
while 1:
|
||||
line = sys.stdin.readline()
|
||||
if not line:
|
||||
break
|
||||
while line := sys.stdin.readline():
|
||||
msg = msg + line
|
||||
print("Message length is %d" % len(msg))
|
||||
|
||||
|
|
|
@ -292,8 +292,7 @@ class BaseServer:
|
|||
selector.register(self, selectors.EVENT_READ)
|
||||
|
||||
while True:
|
||||
ready = selector.select(timeout)
|
||||
if ready:
|
||||
if selector.select(timeout):
|
||||
return self._handle_request_noblock()
|
||||
else:
|
||||
if timeout is not None:
|
||||
|
|
|
@ -1262,11 +1262,7 @@ class TarInfo(object):
|
|||
# the newline. keyword and value are both UTF-8 encoded strings.
|
||||
regex = re.compile(br"(\d+) ([^=]+)=")
|
||||
pos = 0
|
||||
while True:
|
||||
match = regex.match(buf, pos)
|
||||
if not match:
|
||||
break
|
||||
|
||||
while match := regex.match(buf, pos):
|
||||
length, keyword = match.groups()
|
||||
length = int(length)
|
||||
if length == 0:
|
||||
|
@ -2418,10 +2414,8 @@ class TarFile(object):
|
|||
"""Read through the entire archive file and look for readable
|
||||
members.
|
||||
"""
|
||||
while True:
|
||||
tarinfo = self.next()
|
||||
if tarinfo is None:
|
||||
break
|
||||
while self.next() is not None:
|
||||
pass
|
||||
self._loaded = True
|
||||
|
||||
def _check(self, mode=None):
|
||||
|
|
|
@ -825,10 +825,7 @@ class FileTestCase(unittest.TestCase):
|
|||
def test_read_10(self):
|
||||
with LZMAFile(BytesIO(COMPRESSED_XZ)) as f:
|
||||
chunks = []
|
||||
while True:
|
||||
result = f.read(10)
|
||||
if not result:
|
||||
break
|
||||
while result := f.read(10):
|
||||
self.assertLessEqual(len(result), 10)
|
||||
chunks.append(result)
|
||||
self.assertEqual(b"".join(chunks), INPUT)
|
||||
|
@ -911,10 +908,7 @@ class FileTestCase(unittest.TestCase):
|
|||
def test_read1(self):
|
||||
with LZMAFile(BytesIO(COMPRESSED_XZ)) as f:
|
||||
blocks = []
|
||||
while True:
|
||||
result = f.read1()
|
||||
if not result:
|
||||
break
|
||||
while result := f.read1():
|
||||
blocks.append(result)
|
||||
self.assertEqual(b"".join(blocks), INPUT)
|
||||
self.assertEqual(f.read1(), b"")
|
||||
|
@ -926,10 +920,7 @@ class FileTestCase(unittest.TestCase):
|
|||
def test_read1_10(self):
|
||||
with LZMAFile(BytesIO(COMPRESSED_XZ)) as f:
|
||||
blocks = []
|
||||
while True:
|
||||
result = f.read1(10)
|
||||
if not result:
|
||||
break
|
||||
while result := f.read1(10):
|
||||
blocks.append(result)
|
||||
self.assertEqual(b"".join(blocks), INPUT)
|
||||
self.assertEqual(f.read1(), b"")
|
||||
|
@ -937,10 +928,7 @@ class FileTestCase(unittest.TestCase):
|
|||
def test_read1_multistream(self):
|
||||
with LZMAFile(BytesIO(COMPRESSED_XZ * 5)) as f:
|
||||
blocks = []
|
||||
while True:
|
||||
result = f.read1()
|
||||
if not result:
|
||||
break
|
||||
while result := f.read1():
|
||||
blocks.append(result)
|
||||
self.assertEqual(b"".join(blocks), INPUT * 5)
|
||||
self.assertEqual(f.read1(), b"")
|
||||
|
|
|
@ -265,10 +265,7 @@ def urlretrieve(url, filename=None, reporthook=None, data=None):
|
|||
if reporthook:
|
||||
reporthook(blocknum, bs, size)
|
||||
|
||||
while True:
|
||||
block = fp.read(bs)
|
||||
if not block:
|
||||
break
|
||||
while block := fp.read(bs):
|
||||
read += len(block)
|
||||
tfp.write(block)
|
||||
blocknum += 1
|
||||
|
@ -1847,10 +1844,7 @@ class URLopener:
|
|||
size = int(headers["Content-Length"])
|
||||
if reporthook:
|
||||
reporthook(blocknum, bs, size)
|
||||
while 1:
|
||||
block = fp.read(bs)
|
||||
if not block:
|
||||
break
|
||||
while block := fp.read(bs):
|
||||
read += len(block)
|
||||
tfp.write(block)
|
||||
blocknum += 1
|
||||
|
|
|
@ -475,10 +475,7 @@ class SimpleHandler(BaseHandler):
|
|||
from warnings import warn
|
||||
warn("SimpleHandler.stdout.write() should not do partial writes",
|
||||
DeprecationWarning)
|
||||
while True:
|
||||
data = data[result:]
|
||||
if not data:
|
||||
break
|
||||
while data := data[result:]:
|
||||
result = self.stdout.write(data)
|
||||
|
||||
def _flush(self):
|
||||
|
|
|
@ -214,10 +214,7 @@ class InputWrapper:
|
|||
return lines
|
||||
|
||||
def __iter__(self):
|
||||
while 1:
|
||||
line = self.readline()
|
||||
if not line:
|
||||
return
|
||||
while line := self.readline():
|
||||
yield line
|
||||
|
||||
def close(self):
|
||||
|
|
|
@ -224,9 +224,7 @@ class Unpacker:
|
|||
|
||||
def unpack_list(self, unpack_item):
|
||||
list = []
|
||||
while 1:
|
||||
x = self.unpack_uint()
|
||||
if x == 0: break
|
||||
while (x := self.unpack_uint()) != 0:
|
||||
if x != 1:
|
||||
raise ConversionError('0 or 1 expected, got %r' % (x,))
|
||||
item = unpack_item()
|
||||
|
|
|
@ -200,10 +200,7 @@ class ExpatBuilder:
|
|||
parser = self.getParser()
|
||||
first_buffer = True
|
||||
try:
|
||||
while 1:
|
||||
buffer = file.read(16*1024)
|
||||
if not buffer:
|
||||
break
|
||||
while buffer := file.read(16*1024):
|
||||
parser.Parse(buffer, False)
|
||||
if first_buffer and self.document.documentElement:
|
||||
self._setup_subset(buffer)
|
||||
|
|
|
@ -566,10 +566,7 @@ class ElementTree:
|
|||
# it with chunks.
|
||||
self._root = parser._parse_whole(source)
|
||||
return self._root
|
||||
while True:
|
||||
data = source.read(65536)
|
||||
if not data:
|
||||
break
|
||||
while data := source.read(65536):
|
||||
parser.feed(data)
|
||||
self._root = parser.close()
|
||||
return self._root
|
||||
|
|
|
@ -120,10 +120,8 @@ class IncrementalParser(XMLReader):
|
|||
file = source.getCharacterStream()
|
||||
if file is None:
|
||||
file = source.getByteStream()
|
||||
buffer = file.read(self._bufsize)
|
||||
while buffer:
|
||||
while buffer := file.read(self._bufsize):
|
||||
self.feed(buffer)
|
||||
buffer = file.read(self._bufsize)
|
||||
self.close()
|
||||
|
||||
def feed(self, data):
|
||||
|
|
|
@ -1339,10 +1339,7 @@ class Transport:
|
|||
|
||||
p, u = self.getparser()
|
||||
|
||||
while 1:
|
||||
data = stream.read(1024)
|
||||
if not data:
|
||||
break
|
||||
while data := stream.read(1024):
|
||||
if self.verbose:
|
||||
print("body:", repr(data))
|
||||
p.feed(data)
|
||||
|
|
|
@ -720,9 +720,7 @@ class ServerHTMLDoc(pydoc.HTMLDoc):
|
|||
r'RFC[- ]?(\d+)|'
|
||||
r'PEP[- ]?(\d+)|'
|
||||
r'(self\.)?((?:\w|\.)+))\b')
|
||||
while 1:
|
||||
match = pattern.search(text, here)
|
||||
if not match: break
|
||||
while match := pattern.search(text, here):
|
||||
start, end = match.span()
|
||||
results.append(escape(text[here:start]))
|
||||
|
||||
|
|
Loading…
Reference in New Issue