mirror of https://github.com/python/cpython
bpo-43882 - urllib.parse should sanitize urls containing ASCII newline and tabs. (GH-25595)
* issue43882 - urllib.parse should sanitize urls containing ASCII newline and tabs. Co-authored-by: Gregory P. Smith <greg@krypto.org> Co-authored-by: Serhiy Storchaka <storchaka@gmail.com>
This commit is contained in:
parent
14fc2bdfab
commit
76cd81d603
|
@ -312,6 +312,9 @@ or on combining URL components into a URL string.
|
|||
``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is
|
||||
decomposed before parsing, no error will be raised.
|
||||
|
||||
Following the `WHATWG spec`_ that updates RFC 3986, ASCII newline
|
||||
``\n``, ``\r`` and tab ``\t`` characters are stripped from the URL.
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Out-of-range port numbers now raise :exc:`ValueError`, instead of
|
||||
returning :const:`None`.
|
||||
|
@ -320,6 +323,10 @@ or on combining URL components into a URL string.
|
|||
Characters that affect netloc parsing under NFKC normalization will
|
||||
now raise :exc:`ValueError`.
|
||||
|
||||
.. versionchanged:: 3.10
|
||||
ASCII newline and tab characters are stripped from the URL.
|
||||
|
||||
.. _WHATWG spec: https://url.spec.whatwg.org/#concept-basic-url-parser
|
||||
|
||||
.. function:: urlunsplit(parts)
|
||||
|
||||
|
@ -674,6 +681,10 @@ task isn't already covered by the URL parsing functions above.
|
|||
|
||||
.. seealso::
|
||||
|
||||
`WHATWG`_ - URL Living standard
|
||||
Working Group for the URL Standard that defines URLs, domains, IP addresses, the
|
||||
application/x-www-form-urlencoded format, and their API.
|
||||
|
||||
:rfc:`3986` - Uniform Resource Identifiers
|
||||
This is the current standard (STD66). Any changes to urllib.parse module
|
||||
should conform to this. Certain deviations could be observed, which are
|
||||
|
@ -697,3 +708,5 @@ task isn't already covered by the URL parsing functions above.
|
|||
|
||||
:rfc:`1738` - Uniform Resource Locators (URL)
|
||||
This specifies the formal syntax and semantics of absolute URLs.
|
||||
|
||||
.. _WHATWG: https://url.spec.whatwg.org/
|
||||
|
|
|
@ -612,6 +612,35 @@ class UrlParseTestCase(unittest.TestCase):
|
|||
with self.assertRaisesRegex(ValueError, "out of range"):
|
||||
p.port
|
||||
|
||||
def test_urlsplit_remove_unsafe_bytes(self):
|
||||
# Remove ASCII tabs and newlines from input
|
||||
url = "http://www.python.org/java\nscript:\talert('msg\r\n')/#frag"
|
||||
p = urllib.parse.urlsplit(url)
|
||||
self.assertEqual(p.scheme, "http")
|
||||
self.assertEqual(p.netloc, "www.python.org")
|
||||
self.assertEqual(p.path, "/javascript:alert('msg')/")
|
||||
self.assertEqual(p.query, "")
|
||||
self.assertEqual(p.fragment, "frag")
|
||||
self.assertEqual(p.username, None)
|
||||
self.assertEqual(p.password, None)
|
||||
self.assertEqual(p.hostname, "www.python.org")
|
||||
self.assertEqual(p.port, None)
|
||||
self.assertEqual(p.geturl(), "http://www.python.org/javascript:alert('msg')/#frag")
|
||||
|
||||
# Remove ASCII tabs and newlines from input as bytes.
|
||||
url = b"http://www.python.org/java\nscript:\talert('msg\r\n')/#frag"
|
||||
p = urllib.parse.urlsplit(url)
|
||||
self.assertEqual(p.scheme, b"http")
|
||||
self.assertEqual(p.netloc, b"www.python.org")
|
||||
self.assertEqual(p.path, b"/javascript:alert('msg')/")
|
||||
self.assertEqual(p.query, b"")
|
||||
self.assertEqual(p.fragment, b"frag")
|
||||
self.assertEqual(p.username, None)
|
||||
self.assertEqual(p.password, None)
|
||||
self.assertEqual(p.hostname, b"www.python.org")
|
||||
self.assertEqual(p.port, None)
|
||||
self.assertEqual(p.geturl(), b"http://www.python.org/javascript:alert('msg')/#frag")
|
||||
|
||||
def test_attributes_bad_port(self):
|
||||
"""Check handling of invalid ports."""
|
||||
for bytes in (False, True):
|
||||
|
|
|
@ -78,6 +78,9 @@ scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
|
|||
'0123456789'
|
||||
'+-.')
|
||||
|
||||
# Unsafe bytes to be removed per WHATWG spec
|
||||
_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n']
|
||||
|
||||
# XXX: Consider replacing with functools.lru_cache
|
||||
MAX_CACHE_SIZE = 20
|
||||
_parse_cache = {}
|
||||
|
@ -469,6 +472,9 @@ def urlsplit(url, scheme='', allow_fragments=True):
|
|||
else:
|
||||
scheme, url = url[:i].lower(), url[i+1:]
|
||||
|
||||
for b in _UNSAFE_URL_BYTES_TO_REMOVE:
|
||||
url = url.replace(b, "")
|
||||
|
||||
if url[:2] == '//':
|
||||
netloc, url = _splitnetloc(url, 2)
|
||||
if (('[' in netloc and ']' not in netloc) or
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
The presence of newline or tab characters in parts of a URL could allow
|
||||
some forms of attacks.
|
||||
|
||||
Following the controlling specification for URLs defined by WHATWG
|
||||
:func:`urllib.parse` now removes ASCII newlines and tabs from URLs,
|
||||
preventing such attacks.
|
Loading…
Reference in New Issue