Issue #23681: Fixed Python 2 to 3 poring bugs.
Indexing bytes retiurns an integer, not bytes.
This commit is contained in:
commit
ee4c0b9dcf
|
@ -136,7 +136,7 @@ class POP3:
|
||||||
# so only possibilities are ...LF, ...CRLF, CR...LF
|
# so only possibilities are ...LF, ...CRLF, CR...LF
|
||||||
if line[-2:] == CRLF:
|
if line[-2:] == CRLF:
|
||||||
return line[:-2], octets
|
return line[:-2], octets
|
||||||
if line[0] == CR:
|
if line[:1] == CR:
|
||||||
return line[1:-1], octets
|
return line[1:-1], octets
|
||||||
return line[:-1], octets
|
return line[:-1], octets
|
||||||
|
|
||||||
|
|
|
@ -145,7 +145,7 @@ def decode(input, output, header=False):
|
||||||
new = new + c; i = i+1
|
new = new + c; i = i+1
|
||||||
elif i+1 == n and not partial:
|
elif i+1 == n and not partial:
|
||||||
partial = 1; break
|
partial = 1; break
|
||||||
elif i+1 < n and line[i+1] == ESCAPE:
|
elif i+1 < n and line[i+1:i+2] == ESCAPE:
|
||||||
new = new + ESCAPE; i = i+2
|
new = new + ESCAPE; i = i+2
|
||||||
elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
|
elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
|
||||||
new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
|
new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
|
||||||
|
|
|
@ -154,7 +154,7 @@ class SMTPChannel(asynchat.async_chat):
|
||||||
else:
|
else:
|
||||||
self._emptystring = b''
|
self._emptystring = b''
|
||||||
self._linesep = b'\r\n'
|
self._linesep = b'\r\n'
|
||||||
self._dotsep = b'.'
|
self._dotsep = ord(b'.')
|
||||||
self._newline = b'\n'
|
self._newline = b'\n'
|
||||||
self._set_rset_state()
|
self._set_rset_state()
|
||||||
self.seen_greeting = ''
|
self.seen_greeting = ''
|
||||||
|
|
|
@ -210,12 +210,9 @@ class Au_read:
|
||||||
self._framesize = self._framesize * self._nchannels
|
self._framesize = self._framesize * self._nchannels
|
||||||
if self._hdr_size > 24:
|
if self._hdr_size > 24:
|
||||||
self._info = file.read(self._hdr_size - 24)
|
self._info = file.read(self._hdr_size - 24)
|
||||||
for i in range(len(self._info)):
|
self._info, _, _ = self._info.partition(b'\0')
|
||||||
if self._info[i] == b'\0':
|
|
||||||
self._info = self._info[:i]
|
|
||||||
break
|
|
||||||
else:
|
else:
|
||||||
self._info = ''
|
self._info = b''
|
||||||
try:
|
try:
|
||||||
self._data_pos = file.tell()
|
self._data_pos = file.tell()
|
||||||
except (AttributeError, OSError):
|
except (AttributeError, OSError):
|
||||||
|
|
|
@ -150,15 +150,15 @@ def randrange_fmt(mode, char, obj):
|
||||||
format character."""
|
format character."""
|
||||||
x = randrange(*fmtdict[mode][char])
|
x = randrange(*fmtdict[mode][char])
|
||||||
if char == 'c':
|
if char == 'c':
|
||||||
x = bytes(chr(x), 'latin1')
|
x = bytes([x])
|
||||||
|
if obj == 'numpy' and x == b'\x00':
|
||||||
|
# http://projects.scipy.org/numpy/ticket/1925
|
||||||
|
x = b'\x01'
|
||||||
if char == '?':
|
if char == '?':
|
||||||
x = bool(x)
|
x = bool(x)
|
||||||
if char == 'f' or char == 'd':
|
if char == 'f' or char == 'd':
|
||||||
x = struct.pack(char, x)
|
x = struct.pack(char, x)
|
||||||
x = struct.unpack(char, x)[0]
|
x = struct.unpack(char, x)[0]
|
||||||
if obj == 'numpy' and x == b'\x00':
|
|
||||||
# http://projects.scipy.org/numpy/ticket/1925
|
|
||||||
x = b'\x01'
|
|
||||||
return x
|
return x
|
||||||
|
|
||||||
def gen_item(fmt, obj):
|
def gen_item(fmt, obj):
|
||||||
|
|
|
@ -1068,7 +1068,7 @@ class TestTokenize(TestCase):
|
||||||
encoding = object()
|
encoding = object()
|
||||||
encoding_used = None
|
encoding_used = None
|
||||||
def mock_detect_encoding(readline):
|
def mock_detect_encoding(readline):
|
||||||
return encoding, ['first', 'second']
|
return encoding, [b'first', b'second']
|
||||||
|
|
||||||
def mock__tokenize(readline, encoding):
|
def mock__tokenize(readline, encoding):
|
||||||
nonlocal encoding_used
|
nonlocal encoding_used
|
||||||
|
@ -1087,7 +1087,7 @@ class TestTokenize(TestCase):
|
||||||
counter += 1
|
counter += 1
|
||||||
if counter == 5:
|
if counter == 5:
|
||||||
return b''
|
return b''
|
||||||
return counter
|
return str(counter).encode()
|
||||||
|
|
||||||
orig_detect_encoding = tokenize_module.detect_encoding
|
orig_detect_encoding = tokenize_module.detect_encoding
|
||||||
orig__tokenize = tokenize_module._tokenize
|
orig__tokenize = tokenize_module._tokenize
|
||||||
|
@ -1095,7 +1095,8 @@ class TestTokenize(TestCase):
|
||||||
tokenize_module._tokenize = mock__tokenize
|
tokenize_module._tokenize = mock__tokenize
|
||||||
try:
|
try:
|
||||||
results = tokenize(mock_readline)
|
results = tokenize(mock_readline)
|
||||||
self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4])
|
self.assertEqual(list(results),
|
||||||
|
[b'first', b'second', b'1', b'2', b'3', b'4'])
|
||||||
finally:
|
finally:
|
||||||
tokenize_module.detect_encoding = orig_detect_encoding
|
tokenize_module.detect_encoding = orig_detect_encoding
|
||||||
tokenize_module._tokenize = orig__tokenize
|
tokenize_module._tokenize = orig__tokenize
|
||||||
|
|
Loading…
Reference in New Issue