#27364: fix "incorrect" uses of escape character in the stdlib.
And most of the tools. Patch by Emanual Barry, reviewed by me, Serhiy Storchaka, and Martin Panter.
This commit is contained in:
parent
513d7478a1
commit
44b548dda8
|
@ -210,7 +210,7 @@ def _remove_universal_flags(_config_vars):
|
||||||
# Do not alter a config var explicitly overridden by env var
|
# Do not alter a config var explicitly overridden by env var
|
||||||
if cv in _config_vars and cv not in os.environ:
|
if cv in _config_vars and cv not in os.environ:
|
||||||
flags = _config_vars[cv]
|
flags = _config_vars[cv]
|
||||||
flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII)
|
flags = re.sub(r'-arch\s+\w+\s', ' ', flags, re.ASCII)
|
||||||
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
|
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
|
||||||
_save_modified_value(_config_vars, cv, flags)
|
_save_modified_value(_config_vars, cv, flags)
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ def _remove_unsupported_archs(_config_vars):
|
||||||
if 'CC' in os.environ:
|
if 'CC' in os.environ:
|
||||||
return _config_vars
|
return _config_vars
|
||||||
|
|
||||||
if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
|
if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None:
|
||||||
# NOTE: Cannot use subprocess here because of bootstrap
|
# NOTE: Cannot use subprocess here because of bootstrap
|
||||||
# issues when building Python itself
|
# issues when building Python itself
|
||||||
status = os.system(
|
status = os.system(
|
||||||
|
@ -251,7 +251,7 @@ def _remove_unsupported_archs(_config_vars):
|
||||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||||
if cv in _config_vars and cv not in os.environ:
|
if cv in _config_vars and cv not in os.environ:
|
||||||
flags = _config_vars[cv]
|
flags = _config_vars[cv]
|
||||||
flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
|
flags = re.sub(r'-arch\s+ppc\w*\s', ' ', flags)
|
||||||
_save_modified_value(_config_vars, cv, flags)
|
_save_modified_value(_config_vars, cv, flags)
|
||||||
|
|
||||||
return _config_vars
|
return _config_vars
|
||||||
|
@ -267,7 +267,7 @@ def _override_all_archs(_config_vars):
|
||||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||||
if cv in _config_vars and '-arch' in _config_vars[cv]:
|
if cv in _config_vars and '-arch' in _config_vars[cv]:
|
||||||
flags = _config_vars[cv]
|
flags = _config_vars[cv]
|
||||||
flags = re.sub('-arch\s+\w+\s', ' ', flags)
|
flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
|
||||||
flags = flags + ' ' + arch
|
flags = flags + ' ' + arch
|
||||||
_save_modified_value(_config_vars, cv, flags)
|
_save_modified_value(_config_vars, cv, flags)
|
||||||
|
|
||||||
|
@ -465,7 +465,7 @@ def get_platform_osx(_config_vars, osname, release, machine):
|
||||||
|
|
||||||
machine = 'fat'
|
machine = 'fat'
|
||||||
|
|
||||||
archs = re.findall('-arch\s+(\S+)', cflags)
|
archs = re.findall(r'-arch\s+(\S+)', cflags)
|
||||||
archs = tuple(sorted(set(archs)))
|
archs = tuple(sorted(set(archs)))
|
||||||
|
|
||||||
if len(archs) == 1:
|
if len(archs) == 1:
|
||||||
|
|
|
@ -215,10 +215,10 @@ class Sniffer:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
matches = []
|
matches = []
|
||||||
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
|
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
|
||||||
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
|
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
|
||||||
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
|
r'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
|
||||||
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
|
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
|
||||||
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
|
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
|
||||||
matches = regexp.findall(data)
|
matches = regexp.findall(data)
|
||||||
if matches:
|
if matches:
|
||||||
|
|
|
@ -1415,7 +1415,7 @@ def _mdiff(fromlines, tolines, context=None, linejunk=None,
|
||||||
import re
|
import re
|
||||||
|
|
||||||
# regular expression for finding intraline change indices
|
# regular expression for finding intraline change indices
|
||||||
change_re = re.compile('(\++|\-+|\^+)')
|
change_re = re.compile(r'(\++|\-+|\^+)')
|
||||||
|
|
||||||
# create the difference iterator to generate the differences
|
# create the difference iterator to generate the differences
|
||||||
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
|
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
|
||||||
|
|
|
@ -221,7 +221,7 @@ class Command:
|
||||||
self._ensure_stringlike(option, "string", default)
|
self._ensure_stringlike(option, "string", default)
|
||||||
|
|
||||||
def ensure_string_list(self, option):
|
def ensure_string_list(self, option):
|
||||||
"""Ensure that 'option' is a list of strings. If 'option' is
|
r"""Ensure that 'option' is a list of strings. If 'option' is
|
||||||
currently a string, we split it either on /,\s*/ or /\s+/, so
|
currently a string, we split it either on /,\s*/ or /\s+/, so
|
||||||
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
|
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
|
||||||
["foo", "bar", "baz"].
|
["foo", "bar", "baz"].
|
||||||
|
|
|
@ -623,7 +623,7 @@ class bdist_msi(Command):
|
||||||
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
|
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
|
||||||
"OK", "OK", "OK", bitmap=False)
|
"OK", "OK", "OK", bitmap=False)
|
||||||
cost.text("Title", 15, 6, 200, 15, 0x30003,
|
cost.text("Title", 15, 6, 200, 15, 0x30003,
|
||||||
"{\DlgFontBold8}Disk Space Requirements")
|
r"{\DlgFontBold8}Disk Space Requirements")
|
||||||
cost.text("Description", 20, 20, 280, 20, 0x30003,
|
cost.text("Description", 20, 20, 280, 20, 0x30003,
|
||||||
"The disk space required for the installation of the selected features.")
|
"The disk space required for the installation of the selected features.")
|
||||||
cost.text("Text", 20, 53, 330, 60, 3,
|
cost.text("Text", 20, 53, 330, 60, 3,
|
||||||
|
@ -670,7 +670,7 @@ class bdist_msi(Command):
|
||||||
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
|
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
|
||||||
"Cancel", "Cancel", "Cancel", bitmap=False)
|
"Cancel", "Cancel", "Cancel", bitmap=False)
|
||||||
progress.text("Title", 20, 15, 200, 15, 0x30003,
|
progress.text("Title", 20, 15, 200, 15, 0x30003,
|
||||||
"{\DlgFontBold8}[Progress1] [ProductName]")
|
r"{\DlgFontBold8}[Progress1] [ProductName]")
|
||||||
progress.text("Text", 35, 65, 300, 30, 3,
|
progress.text("Text", 35, 65, 300, 30, 3,
|
||||||
"Please wait while the Installer [Progress2] [ProductName]. "
|
"Please wait while the Installer [Progress2] [ProductName]. "
|
||||||
"This may take several minutes.")
|
"This may take several minutes.")
|
||||||
|
|
|
@ -51,7 +51,7 @@ class build_scripts(Command):
|
||||||
|
|
||||||
|
|
||||||
def copy_scripts(self):
|
def copy_scripts(self):
|
||||||
"""Copy each script listed in 'self.scripts'; if it's marked as a
|
r"""Copy each script listed in 'self.scripts'; if it's marked as a
|
||||||
Python script in the Unix way (first line matches 'first_line_re',
|
Python script in the Unix way (first line matches 'first_line_re',
|
||||||
ie. starts with "\#!" and contains "python"), then adjust the first
|
ie. starts with "\#!" and contains "python"), then adjust the first
|
||||||
line to refer to the current Python interpreter as we copy.
|
line to refer to the current Python interpreter as we copy.
|
||||||
|
|
|
@ -368,7 +368,7 @@ def check_config_h():
|
||||||
return (CONFIG_H_UNCERTAIN,
|
return (CONFIG_H_UNCERTAIN,
|
||||||
"couldn't read '%s': %s" % (fn, exc.strerror))
|
"couldn't read '%s': %s" % (fn, exc.strerror))
|
||||||
|
|
||||||
RE_VERSION = re.compile(b'(\d+\.\d+(\.\d+)*)')
|
RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
|
||||||
|
|
||||||
def _find_exe_version(cmd):
|
def _find_exe_version(cmd):
|
||||||
"""Find the version of an executable by running `cmd` in the shell.
|
"""Find the version of an executable by running `cmd` in the shell.
|
||||||
|
|
|
@ -716,7 +716,7 @@ class MSVCCompiler(CCompiler) :
|
||||||
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
|
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
|
||||||
re.DOTALL)
|
re.DOTALL)
|
||||||
manifest_buf = re.sub(pattern, "", manifest_buf)
|
manifest_buf = re.sub(pattern, "", manifest_buf)
|
||||||
pattern = "<dependentAssembly>\s*</dependentAssembly>"
|
pattern = r"<dependentAssembly>\s*</dependentAssembly>"
|
||||||
manifest_buf = re.sub(pattern, "", manifest_buf)
|
manifest_buf = re.sub(pattern, "", manifest_buf)
|
||||||
# Now see if any other assemblies are referenced - if not, we
|
# Now see if any other assemblies are referenced - if not, we
|
||||||
# don't want a manifest embedded.
|
# don't want a manifest embedded.
|
||||||
|
|
|
@ -278,7 +278,7 @@ def parse_config_h(fp, g=None):
|
||||||
|
|
||||||
# Regexes needed for parsing Makefile (and similar syntaxes,
|
# Regexes needed for parsing Makefile (and similar syntaxes,
|
||||||
# like old-style Setup files).
|
# like old-style Setup files).
|
||||||
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
|
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
|
||||||
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
|
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
|
||||||
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
|
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
|
||||||
|
|
||||||
|
|
|
@ -154,7 +154,7 @@ def split_provision(value):
|
||||||
global _provision_rx
|
global _provision_rx
|
||||||
if _provision_rx is None:
|
if _provision_rx is None:
|
||||||
_provision_rx = re.compile(
|
_provision_rx = re.compile(
|
||||||
"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$",
|
r"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$",
|
||||||
re.ASCII)
|
re.ASCII)
|
||||||
value = value.strip()
|
value = value.strip()
|
||||||
m = _provision_rx.match(value)
|
m = _provision_rx.match(value)
|
||||||
|
|
|
@ -765,7 +765,7 @@ class DocTestParser:
|
||||||
|
|
||||||
# This regular expression finds the indentation of every non-blank
|
# This regular expression finds the indentation of every non-blank
|
||||||
# line in a string.
|
# line in a string.
|
||||||
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
|
_INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE)
|
||||||
|
|
||||||
def _min_indent(self, s):
|
def _min_indent(self, s):
|
||||||
"Return the minimum indentation of any non-blank line in `s`"
|
"Return the minimum indentation of any non-blank line in `s`"
|
||||||
|
@ -1106,7 +1106,7 @@ class DocTestFinder:
|
||||||
if lineno is not None:
|
if lineno is not None:
|
||||||
if source_lines is None:
|
if source_lines is None:
|
||||||
return lineno+1
|
return lineno+1
|
||||||
pat = re.compile('(^|.*:)\s*\w*("|\')')
|
pat = re.compile(r'(^|.*:)\s*\w*("|\')')
|
||||||
for lineno in range(lineno, len(source_lines)):
|
for lineno in range(lineno, len(source_lines)):
|
||||||
if pat.match(source_lines[lineno]):
|
if pat.match(source_lines[lineno]):
|
||||||
return lineno
|
return lineno
|
||||||
|
@ -1608,11 +1608,11 @@ class OutputChecker:
|
||||||
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
|
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
|
||||||
if not (optionflags & DONT_ACCEPT_BLANKLINE):
|
if not (optionflags & DONT_ACCEPT_BLANKLINE):
|
||||||
# Replace <BLANKLINE> in want with a blank line.
|
# Replace <BLANKLINE> in want with a blank line.
|
||||||
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
|
want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
|
||||||
'', want)
|
'', want)
|
||||||
# If a line in got contains only spaces, then remove the
|
# If a line in got contains only spaces, then remove the
|
||||||
# spaces.
|
# spaces.
|
||||||
got = re.sub('(?m)^\s*?$', '', got)
|
got = re.sub(r'(?m)^\s*?$', '', got)
|
||||||
if got == want:
|
if got == want:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
@ -652,8 +652,8 @@ class Comment(WhiteSpaceTokenList):
|
||||||
if value.token_type == 'comment':
|
if value.token_type == 'comment':
|
||||||
return str(value)
|
return str(value)
|
||||||
return str(value).replace('\\', '\\\\').replace(
|
return str(value).replace('\\', '\\\\').replace(
|
||||||
'(', '\(').replace(
|
'(', r'\(').replace(
|
||||||
')', '\)')
|
')', r'\)')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def content(self):
|
def content(self):
|
||||||
|
@ -1356,15 +1356,15 @@ RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
|
||||||
|
|
||||||
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
|
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
|
||||||
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
|
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
|
||||||
''.join(ATOM_ENDS).replace('\\','\\\\').replace(']','\]'))).match
|
''.join(ATOM_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match
|
||||||
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
|
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
|
||||||
_non_token_end_matcher = re.compile(r"[^{}]+".format(
|
_non_token_end_matcher = re.compile(r"[^{}]+".format(
|
||||||
''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']','\]'))).match
|
''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match
|
||||||
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
|
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
|
||||||
''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']','\]'))).match
|
''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match
|
||||||
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
|
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
|
||||||
''.join(EXTENDED_ATTRIBUTE_ENDS).replace(
|
''.join(EXTENDED_ATTRIBUTE_ENDS).replace(
|
||||||
'\\','\\\\').replace(']','\]'))).match
|
'\\','\\\\').replace(']',r'\]'))).match
|
||||||
|
|
||||||
def _validate_xtext(xtext):
|
def _validate_xtext(xtext):
|
||||||
"""If input token contains ASCII non-printables, register a defect."""
|
"""If input token contains ASCII non-printables, register a defect."""
|
||||||
|
@ -1517,7 +1517,7 @@ def get_unstructured(value):
|
||||||
return unstructured
|
return unstructured
|
||||||
|
|
||||||
def get_qp_ctext(value):
|
def get_qp_ctext(value):
|
||||||
"""ctext = <printable ascii except \ ( )>
|
r"""ctext = <printable ascii except \ ( )>
|
||||||
|
|
||||||
This is not the RFC ctext, since we are handling nested comments in comment
|
This is not the RFC ctext, since we are handling nested comments in comment
|
||||||
and unquoting quoted-pairs here. We allow anything except the '()'
|
and unquoting quoted-pairs here. We allow anything except the '()'
|
||||||
|
@ -1878,7 +1878,7 @@ def get_obs_local_part(value):
|
||||||
return obs_local_part, value
|
return obs_local_part, value
|
||||||
|
|
||||||
def get_dtext(value):
|
def get_dtext(value):
|
||||||
""" dtext = <printable ascii except \ [ ]> / obs-dtext
|
r""" dtext = <printable ascii except \ [ ]> / obs-dtext
|
||||||
obs-dtext = obs-NO-WS-CTL / quoted-pair
|
obs-dtext = obs-NO-WS-CTL / quoted-pair
|
||||||
|
|
||||||
We allow anything except the excluded characters, but if we find any
|
We allow anything except the excluded characters, but if we find any
|
||||||
|
|
|
@ -29,10 +29,10 @@ from email._policybase import compat32
|
||||||
from collections import deque
|
from collections import deque
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
|
|
||||||
NLCRE = re.compile('\r\n|\r|\n')
|
NLCRE = re.compile(r'\r\n|\r|\n')
|
||||||
NLCRE_bol = re.compile('(\r\n|\r|\n)')
|
NLCRE_bol = re.compile(r'(\r\n|\r|\n)')
|
||||||
NLCRE_eol = re.compile('(\r\n|\r|\n)\Z')
|
NLCRE_eol = re.compile(r'(\r\n|\r|\n)\Z')
|
||||||
NLCRE_crack = re.compile('(\r\n|\r|\n)')
|
NLCRE_crack = re.compile(r'(\r\n|\r|\n)')
|
||||||
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
|
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
|
||||||
# except controls, SP, and ":".
|
# except controls, SP, and ":".
|
||||||
headerRE = re.compile(r'^(From |[\041-\071\073-\176]*:|[\t ])')
|
headerRE = re.compile(r'^(From |[\041-\071\073-\176]*:|[\t ])')
|
||||||
|
|
|
@ -106,4 +106,4 @@ def translate(pat):
|
||||||
res = '%s[%s]' % (res, stuff)
|
res = '%s[%s]' % (res, stuff)
|
||||||
else:
|
else:
|
||||||
res = res + re.escape(c)
|
res = res + re.escape(c)
|
||||||
return res + '\Z(?ms)'
|
return res + r'\Z(?ms)'
|
||||||
|
|
|
@ -821,7 +821,7 @@ def parse150(resp):
|
||||||
if _150_re is None:
|
if _150_re is None:
|
||||||
import re
|
import re
|
||||||
_150_re = re.compile(
|
_150_re = re.compile(
|
||||||
"150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII)
|
r"150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII)
|
||||||
m = _150_re.match(resp)
|
m = _150_re.match(resp)
|
||||||
if not m:
|
if not m:
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -34,7 +34,7 @@ commentclose = re.compile(r'--\s*>')
|
||||||
# explode, so don't do it.
|
# explode, so don't do it.
|
||||||
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
|
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
|
||||||
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
|
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
|
||||||
tagfind_tolerant = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
|
tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
|
||||||
attrfind_tolerant = re.compile(
|
attrfind_tolerant = re.compile(
|
||||||
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
|
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
|
||||||
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
|
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
|
||||||
|
@ -56,7 +56,7 @@ locatestarttagend_tolerant = re.compile(r"""
|
||||||
endendtag = re.compile('>')
|
endendtag = re.compile('>')
|
||||||
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
|
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
|
||||||
# </ and the tag name, so maybe this should be fixed
|
# </ and the tag name, so maybe this should be fixed
|
||||||
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
|
endtagfind = re.compile(r'</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
"""HTTP/1.1 client library
|
r"""HTTP/1.1 client library
|
||||||
|
|
||||||
<intro stuff goes here>
|
<intro stuff goes here>
|
||||||
<other stuff, too>
|
<other stuff, too>
|
||||||
|
|
|
@ -200,7 +200,7 @@ def _str2time(day, mon, yr, hr, min, sec, tz):
|
||||||
|
|
||||||
STRICT_DATE_RE = re.compile(
|
STRICT_DATE_RE = re.compile(
|
||||||
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
|
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
|
||||||
"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII)
|
r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII)
|
||||||
WEEKDAY_RE = re.compile(
|
WEEKDAY_RE = re.compile(
|
||||||
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII)
|
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII)
|
||||||
LOOSE_HTTP_DATE_RE = re.compile(
|
LOOSE_HTTP_DATE_RE = re.compile(
|
||||||
|
@ -277,7 +277,7 @@ def http2time(text):
|
||||||
return _str2time(day, mon, yr, hr, min, sec, tz)
|
return _str2time(day, mon, yr, hr, min, sec, tz)
|
||||||
|
|
||||||
ISO_DATE_RE = re.compile(
|
ISO_DATE_RE = re.compile(
|
||||||
"""^
|
r"""^
|
||||||
(\d{4}) # year
|
(\d{4}) # year
|
||||||
[-\/]?
|
[-\/]?
|
||||||
(\d\d?) # numerical month
|
(\d\d?) # numerical month
|
||||||
|
@ -411,7 +411,7 @@ def split_header_words(header_values):
|
||||||
pairs = []
|
pairs = []
|
||||||
else:
|
else:
|
||||||
# skip junk
|
# skip junk
|
||||||
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
|
non_junk, nr_junk_chars = re.subn(r"^[=\s;]*", "", text)
|
||||||
assert nr_junk_chars > 0, (
|
assert nr_junk_chars > 0, (
|
||||||
"split_header_words bug: '%s', '%s', %s" %
|
"split_header_words bug: '%s', '%s', %s" %
|
||||||
(orig_text, text, pairs))
|
(orig_text, text, pairs))
|
||||||
|
|
|
@ -456,7 +456,7 @@ class Morsel(dict):
|
||||||
#
|
#
|
||||||
|
|
||||||
_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
|
_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
|
||||||
_LegalValueChars = _LegalKeyChars + '\[\]'
|
_LegalValueChars = _LegalKeyChars + r'\[\]'
|
||||||
_CookiePattern = re.compile(r"""
|
_CookiePattern = re.compile(r"""
|
||||||
(?x) # This is a verbose pattern
|
(?x) # This is a verbose pattern
|
||||||
\s* # Optional whitespace at start of cookie
|
\s* # Optional whitespace at start of cookie
|
||||||
|
|
|
@ -120,7 +120,7 @@ def get_entity(expression):
|
||||||
_MAX_COLS = 85
|
_MAX_COLS = 85
|
||||||
_MAX_LINES = 5 # enough for bytes
|
_MAX_LINES = 5 # enough for bytes
|
||||||
_INDENT = ' '*4 # for wrapped signatures
|
_INDENT = ' '*4 # for wrapped signatures
|
||||||
_first_param = re.compile('(?<=\()\w*\,?\s*')
|
_first_param = re.compile(r'(?<=\()\w*\,?\s*')
|
||||||
_default_callable_argspec = "See source or doc"
|
_default_callable_argspec = "See source or doc"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ class ReplaceDialogTest(unittest.TestCase):
|
||||||
text.mark_set('insert', 'end')
|
text.mark_set('insert', 'end')
|
||||||
text.insert('insert', '\nline42:')
|
text.insert('insert', '\nline42:')
|
||||||
before_text = text.get('1.0', 'end')
|
before_text = text.get('1.0', 'end')
|
||||||
pv.set('[a-z][\d]+')
|
pv.set(r'[a-z][\d]+')
|
||||||
replace()
|
replace()
|
||||||
after_text = text.get('1.0', 'end')
|
after_text = text.get('1.0', 'end')
|
||||||
equal(before_text, after_text)
|
equal(before_text, after_text)
|
||||||
|
@ -192,7 +192,7 @@ class ReplaceDialogTest(unittest.TestCase):
|
||||||
self.engine.revar.set(True)
|
self.engine.revar.set(True)
|
||||||
|
|
||||||
before_text = text.get('1.0', 'end')
|
before_text = text.get('1.0', 'end')
|
||||||
pv.set('[a-z][\d]+')
|
pv.set(r'[a-z][\d]+')
|
||||||
rv.set('hello')
|
rv.set('hello')
|
||||||
replace()
|
replace()
|
||||||
after_text = text.get('1.0', 'end')
|
after_text = text.get('1.0', 'end')
|
||||||
|
@ -207,7 +207,7 @@ class ReplaceDialogTest(unittest.TestCase):
|
||||||
self.assertIn('error', showerror.title)
|
self.assertIn('error', showerror.title)
|
||||||
self.assertIn('Empty', showerror.message)
|
self.assertIn('Empty', showerror.message)
|
||||||
|
|
||||||
pv.set('[\d')
|
pv.set(r'[\d')
|
||||||
replace()
|
replace()
|
||||||
self.assertIn('error', showerror.title)
|
self.assertIn('error', showerror.title)
|
||||||
self.assertIn('Pattern', showerror.message)
|
self.assertIn('Pattern', showerror.message)
|
||||||
|
|
|
@ -139,10 +139,10 @@ class SearchEngineTest(unittest.TestCase):
|
||||||
|
|
||||||
def test_setcookedpat(self):
|
def test_setcookedpat(self):
|
||||||
engine = self.engine
|
engine = self.engine
|
||||||
engine.setcookedpat('\s')
|
engine.setcookedpat(r'\s')
|
||||||
self.assertEqual(engine.getpat(), '\s')
|
self.assertEqual(engine.getpat(), r'\s')
|
||||||
engine.revar.set(1)
|
engine.revar.set(1)
|
||||||
engine.setcookedpat('\s')
|
engine.setcookedpat(r'\s')
|
||||||
self.assertEqual(engine.getpat(), r'\\s')
|
self.assertEqual(engine.getpat(), r'\\s')
|
||||||
|
|
||||||
def test_getcookedpat(self):
|
def test_getcookedpat(self):
|
||||||
|
@ -156,10 +156,10 @@ class SearchEngineTest(unittest.TestCase):
|
||||||
Equal(engine.getcookedpat(), r'\bhello\b')
|
Equal(engine.getcookedpat(), r'\bhello\b')
|
||||||
engine.wordvar.set(False)
|
engine.wordvar.set(False)
|
||||||
|
|
||||||
engine.setpat('\s')
|
engine.setpat(r'\s')
|
||||||
Equal(engine.getcookedpat(), r'\\s')
|
Equal(engine.getcookedpat(), r'\\s')
|
||||||
engine.revar.set(True)
|
engine.revar.set(True)
|
||||||
Equal(engine.getcookedpat(), '\s')
|
Equal(engine.getcookedpat(), r'\s')
|
||||||
|
|
||||||
def test_getprog(self):
|
def test_getprog(self):
|
||||||
engine = self.engine
|
engine = self.engine
|
||||||
|
@ -282,7 +282,7 @@ class ForwardBackwardTest(unittest.TestCase):
|
||||||
cls.pat = re.compile('target')
|
cls.pat = re.compile('target')
|
||||||
cls.res = (2, (10, 16)) # line, slice indexes of 'target'
|
cls.res = (2, (10, 16)) # line, slice indexes of 'target'
|
||||||
cls.failpat = re.compile('xyz') # not in text
|
cls.failpat = re.compile('xyz') # not in text
|
||||||
cls.emptypat = re.compile('\w*') # empty match possible
|
cls.emptypat = re.compile(r'\w*') # empty match possible
|
||||||
|
|
||||||
def make_search(self, func):
|
def make_search(self, func):
|
||||||
def search(pat, line, col, wrap, ok=0):
|
def search(pat, line, col, wrap, ok=0):
|
||||||
|
|
|
@ -130,7 +130,7 @@ def reformat_paragraph(data, limit):
|
||||||
partial = indent1
|
partial = indent1
|
||||||
while i < n and not is_all_white(lines[i]):
|
while i < n and not is_all_white(lines[i]):
|
||||||
# XXX Should take double space after period (etc.) into account
|
# XXX Should take double space after period (etc.) into account
|
||||||
words = re.split("(\s+)", lines[i])
|
words = re.split(r"(\s+)", lines[i])
|
||||||
for j in range(0, len(words), 2):
|
for j in range(0, len(words), 2):
|
||||||
word = words[j]
|
word = words[j]
|
||||||
if not word:
|
if not word:
|
||||||
|
|
|
@ -132,7 +132,7 @@ _Untagged_status = br'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?'
|
||||||
|
|
||||||
class IMAP4:
|
class IMAP4:
|
||||||
|
|
||||||
"""IMAP4 client class.
|
r"""IMAP4 client class.
|
||||||
|
|
||||||
Instantiate with: IMAP4([host[, port]])
|
Instantiate with: IMAP4([host[, port]])
|
||||||
|
|
||||||
|
@ -1535,7 +1535,7 @@ if __name__ == '__main__':
|
||||||
('select', ('/tmp/yyz 2',)),
|
('select', ('/tmp/yyz 2',)),
|
||||||
('search', (None, 'SUBJECT', 'test')),
|
('search', (None, 'SUBJECT', 'test')),
|
||||||
('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')),
|
('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')),
|
||||||
('store', ('1', 'FLAGS', '(\Deleted)')),
|
('store', ('1', 'FLAGS', r'(\Deleted)')),
|
||||||
('namespace', ()),
|
('namespace', ()),
|
||||||
('expunge', ()),
|
('expunge', ()),
|
||||||
('recent', ()),
|
('recent', ()),
|
||||||
|
|
|
@ -289,7 +289,7 @@ class Directory:
|
||||||
def make_short(self, file):
|
def make_short(self, file):
|
||||||
oldfile = file
|
oldfile = file
|
||||||
file = file.replace('+', '_')
|
file = file.replace('+', '_')
|
||||||
file = ''.join(c for c in file if not c in ' "/\[]:;=,')
|
file = ''.join(c for c in file if not c in r' "/\[]:;=,')
|
||||||
parts = file.split(".")
|
parts = file.split(".")
|
||||||
if len(parts) > 1:
|
if len(parts) > 1:
|
||||||
prefix = "".join(parts[:-1]).upper()
|
prefix = "".join(parts[:-1]).upper()
|
||||||
|
|
|
@ -251,13 +251,13 @@ def _dist_try_harder(distname, version, id):
|
||||||
|
|
||||||
_release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
|
_release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
|
||||||
_lsb_release_version = re.compile(r'(.+)'
|
_lsb_release_version = re.compile(r'(.+)'
|
||||||
' release '
|
r' release '
|
||||||
'([\d.]+)'
|
r'([\d.]+)'
|
||||||
'[^(]*(?:\((.+)\))?', re.ASCII)
|
r'[^(]*(?:\((.+)\))?', re.ASCII)
|
||||||
_release_version = re.compile(r'([^0-9]+)'
|
_release_version = re.compile(r'([^0-9]+)'
|
||||||
'(?: release )?'
|
r'(?: release )?'
|
||||||
'([\d.]+)'
|
r'([\d.]+)'
|
||||||
'[^(]*(?:\((.+)\))?', re.ASCII)
|
r'[^(]*(?:\((.+)\))?', re.ASCII)
|
||||||
|
|
||||||
# See also http://www.novell.com/coolsolutions/feature/11251.html
|
# See also http://www.novell.com/coolsolutions/feature/11251.html
|
||||||
# and http://linuxmafia.com/faq/Admin/release-files.html
|
# and http://linuxmafia.com/faq/Admin/release-files.html
|
||||||
|
@ -407,8 +407,8 @@ def _norm_version(version, build=''):
|
||||||
return version
|
return version
|
||||||
|
|
||||||
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
|
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
|
||||||
'.*'
|
r'.*'
|
||||||
'\[.* ([\d.]+)\])')
|
r'\[.* ([\d.]+)\])')
|
||||||
|
|
||||||
# Examples of VER command output:
|
# Examples of VER command output:
|
||||||
#
|
#
|
||||||
|
@ -1153,22 +1153,22 @@ _sys_version_parser = re.compile(
|
||||||
|
|
||||||
_ironpython_sys_version_parser = re.compile(
|
_ironpython_sys_version_parser = re.compile(
|
||||||
r'IronPython\s*'
|
r'IronPython\s*'
|
||||||
'([\d\.]+)'
|
r'([\d\.]+)'
|
||||||
'(?: \(([\d\.]+)\))?'
|
r'(?: \(([\d\.]+)\))?'
|
||||||
' on (.NET [\d\.]+)', re.ASCII)
|
r' on (.NET [\d\.]+)', re.ASCII)
|
||||||
|
|
||||||
# IronPython covering 2.6 and 2.7
|
# IronPython covering 2.6 and 2.7
|
||||||
_ironpython26_sys_version_parser = re.compile(
|
_ironpython26_sys_version_parser = re.compile(
|
||||||
r'([\d.]+)\s*'
|
r'([\d.]+)\s*'
|
||||||
'\(IronPython\s*'
|
r'\(IronPython\s*'
|
||||||
'[\d.]+\s*'
|
r'[\d.]+\s*'
|
||||||
'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
|
r'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
|
||||||
)
|
)
|
||||||
|
|
||||||
_pypy_sys_version_parser = re.compile(
|
_pypy_sys_version_parser = re.compile(
|
||||||
r'([\w.+]+)\s*'
|
r'([\w.+]+)\s*'
|
||||||
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
|
r'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
|
||||||
'\[PyPy [^\]]+\]?')
|
r'\[PyPy [^\]]+\]?')
|
||||||
|
|
||||||
_sys_version_cache = {}
|
_sys_version_cache = {}
|
||||||
|
|
||||||
|
@ -1403,7 +1403,7 @@ def platform(aliased=0, terse=0):
|
||||||
# see issue #1322 for more information
|
# see issue #1322 for more information
|
||||||
warnings.filterwarnings(
|
warnings.filterwarnings(
|
||||||
'ignore',
|
'ignore',
|
||||||
'dist\(\) and linux_distribution\(\) '
|
r'dist\(\) and linux_distribution\(\) '
|
||||||
'functions are deprecated .*',
|
'functions are deprecated .*',
|
||||||
PendingDeprecationWarning,
|
PendingDeprecationWarning,
|
||||||
)
|
)
|
||||||
|
|
|
@ -28,7 +28,7 @@ ascii_letters = ascii_lowercase + ascii_uppercase
|
||||||
digits = '0123456789'
|
digits = '0123456789'
|
||||||
hexdigits = digits + 'abcdef' + 'ABCDEF'
|
hexdigits = digits + 'abcdef' + 'ABCDEF'
|
||||||
octdigits = '01234567'
|
octdigits = '01234567'
|
||||||
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
|
punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
|
||||||
printable = digits + ascii_letters + punctuation + whitespace
|
printable = digits + ascii_letters + punctuation + whitespace
|
||||||
|
|
||||||
# Functions which aren't available as string methods.
|
# Functions which aren't available as string methods.
|
||||||
|
|
|
@ -215,7 +215,7 @@ def _parse_makefile(filename, vars=None):
|
||||||
# Regexes needed for parsing Makefile (and similar syntaxes,
|
# Regexes needed for parsing Makefile (and similar syntaxes,
|
||||||
# like old-style Setup files).
|
# like old-style Setup files).
|
||||||
import re
|
import re
|
||||||
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
|
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
|
||||||
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
|
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
|
||||||
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
|
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
|
||||||
|
|
||||||
|
|
|
@ -3857,7 +3857,7 @@ class TestSemaphoreTracker(unittest.TestCase):
|
||||||
p.stderr.close()
|
p.stderr.close()
|
||||||
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
|
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
|
||||||
self.assertRegex(err, expected)
|
self.assertRegex(err, expected)
|
||||||
self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
|
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Mixins
|
# Mixins
|
||||||
|
|
|
@ -4001,12 +4001,12 @@ class Oddballs(unittest.TestCase):
|
||||||
datetime(xx, xx, xx, xx, xx, xx, xx))
|
datetime(xx, xx, xx, xx, xx, xx, xx))
|
||||||
|
|
||||||
with self.assertRaisesRegex(TypeError, '^an integer is required '
|
with self.assertRaisesRegex(TypeError, '^an integer is required '
|
||||||
'\(got type str\)$'):
|
r'\(got type str\)$'):
|
||||||
datetime(10, 10, '10')
|
datetime(10, 10, '10')
|
||||||
|
|
||||||
f10 = Number(10.9)
|
f10 = Number(10.9)
|
||||||
with self.assertRaisesRegex(TypeError, '^__int__ returned non-int '
|
with self.assertRaisesRegex(TypeError, '^__int__ returned non-int '
|
||||||
'\(type float\)$'):
|
r'\(type float\)$'):
|
||||||
datetime(10, 10, f10)
|
datetime(10, 10, f10)
|
||||||
|
|
||||||
class Float(float):
|
class Float(float):
|
||||||
|
|
|
@ -158,7 +158,7 @@ tests = [
|
||||||
('(abc', '-', SYNTAX_ERROR),
|
('(abc', '-', SYNTAX_ERROR),
|
||||||
('a]', 'a]', SUCCEED, 'found', 'a]'),
|
('a]', 'a]', SUCCEED, 'found', 'a]'),
|
||||||
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
|
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
|
||||||
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
|
('a[\\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
|
||||||
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
|
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
|
||||||
('a[^bc]d', 'abd', FAIL),
|
('a[^bc]d', 'abd', FAIL),
|
||||||
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
|
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
|
||||||
|
@ -551,7 +551,7 @@ tests = [
|
||||||
# lookbehind: split by : but not if it is escaped by -.
|
# lookbehind: split by : but not if it is escaped by -.
|
||||||
('(?<!-):(.*?)(?<!-):', 'a:bc-:de:f', SUCCEED, 'g1', 'bc-:de' ),
|
('(?<!-):(.*?)(?<!-):', 'a:bc-:de:f', SUCCEED, 'g1', 'bc-:de' ),
|
||||||
# escaping with \ as we know it
|
# escaping with \ as we know it
|
||||||
('(?<!\\\):(.*?)(?<!\\\):', 'a:bc\\:de:f', SUCCEED, 'g1', 'bc\\:de' ),
|
('(?<!\\\\):(.*?)(?<!\\\\):', 'a:bc\\:de:f', SUCCEED, 'g1', 'bc\\:de' ),
|
||||||
# terminating with ' and escaping with ? as in edifact
|
# terminating with ' and escaping with ? as in edifact
|
||||||
("(?<!\\?)'(.*?)(?<!\\?)'", "a'bc?'de'f", SUCCEED, 'g1', "bc?'de" ),
|
("(?<!\\?)'(.*?)(?<!\\?)'", "a'bc?'de'f", SUCCEED, 'g1', "bc?'de" ),
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ def doit(L):
|
||||||
flush()
|
flush()
|
||||||
|
|
||||||
def tabulate(r):
|
def tabulate(r):
|
||||||
"""Tabulate sort speed for lists of various sizes.
|
r"""Tabulate sort speed for lists of various sizes.
|
||||||
|
|
||||||
The sizes are 2**i for i in r (the argument, a list).
|
The sizes are 2**i for i in r (the argument, a list).
|
||||||
|
|
||||||
|
|
|
@ -2202,7 +2202,7 @@ def can_xattr():
|
||||||
os.setxattr(fp.fileno(), b"user.test", b"")
|
os.setxattr(fp.fileno(), b"user.test", b"")
|
||||||
# Kernels < 2.6.39 don't respect setxattr flags.
|
# Kernels < 2.6.39 don't respect setxattr flags.
|
||||||
kernel_version = platform.release()
|
kernel_version = platform.release()
|
||||||
m = re.match("2.6.(\d{1,2})", kernel_version)
|
m = re.match(r"2.6.(\d{1,2})", kernel_version)
|
||||||
can = m is None or int(m.group(1)) >= 39
|
can = m is None or int(m.group(1)) >= 39
|
||||||
except OSError:
|
except OSError:
|
||||||
can = False
|
can = False
|
||||||
|
|
|
@ -831,7 +831,7 @@ os.close(fd)
|
||||||
stream._waiter = asyncio.Future(loop=self.loop)
|
stream._waiter = asyncio.Future(loop=self.loop)
|
||||||
self.assertRegex(
|
self.assertRegex(
|
||||||
repr(stream),
|
repr(stream),
|
||||||
"<StreamReader w=<Future pending[\S ]*>>")
|
r"<StreamReader w=<Future pending[\S ]*>>")
|
||||||
stream._waiter.set_result(None)
|
stream._waiter.set_result(None)
|
||||||
self.loop.run_until_complete(stream._waiter)
|
self.loop.run_until_complete(stream._waiter)
|
||||||
stream._waiter = None
|
stream._waiter = None
|
||||||
|
|
|
@ -83,7 +83,7 @@ test_conv_no_sign = [
|
||||||
('', ValueError),
|
('', ValueError),
|
||||||
(' ', ValueError),
|
(' ', ValueError),
|
||||||
(' \t\t ', ValueError),
|
(' \t\t ', ValueError),
|
||||||
(str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
|
(str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
|
||||||
(chr(0x200), ValueError),
|
(chr(0x200), ValueError),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ test_conv_sign = [
|
||||||
('', ValueError),
|
('', ValueError),
|
||||||
(' ', ValueError),
|
(' ', ValueError),
|
||||||
(' \t\t ', ValueError),
|
(' \t\t ', ValueError),
|
||||||
(str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
|
(str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
|
||||||
(chr(0x200), ValueError),
|
(chr(0x200), ValueError),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
@ -533,21 +533,21 @@ class SkipitemTest(unittest.TestCase):
|
||||||
parse((1, 2, 3), {}, b'OOO', ['', '', 'a'])
|
parse((1, 2, 3), {}, b'OOO', ['', '', 'a'])
|
||||||
parse((1, 2), {'a': 3}, b'OOO', ['', '', 'a'])
|
parse((1, 2), {'a': 3}, b'OOO', ['', '', 'a'])
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
'Function takes at least 2 positional arguments \(1 given\)'):
|
r'Function takes at least 2 positional arguments \(1 given\)'):
|
||||||
parse((1,), {'a': 3}, b'OOO', ['', '', 'a'])
|
parse((1,), {'a': 3}, b'OOO', ['', '', 'a'])
|
||||||
parse((1,), {}, b'O|OO', ['', '', 'a'])
|
parse((1,), {}, b'O|OO', ['', '', 'a'])
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
'Function takes at least 1 positional arguments \(0 given\)'):
|
r'Function takes at least 1 positional arguments \(0 given\)'):
|
||||||
parse((), {}, b'O|OO', ['', '', 'a'])
|
parse((), {}, b'O|OO', ['', '', 'a'])
|
||||||
parse((1, 2), {'a': 3}, b'OO$O', ['', '', 'a'])
|
parse((1, 2), {'a': 3}, b'OO$O', ['', '', 'a'])
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
'Function takes exactly 2 positional arguments \(1 given\)'):
|
r'Function takes exactly 2 positional arguments \(1 given\)'):
|
||||||
parse((1,), {'a': 3}, b'OO$O', ['', '', 'a'])
|
parse((1,), {'a': 3}, b'OO$O', ['', '', 'a'])
|
||||||
parse((1,), {}, b'O|O$O', ['', '', 'a'])
|
parse((1,), {}, b'O|O$O', ['', '', 'a'])
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
'Function takes at least 1 positional arguments \(0 given\)'):
|
r'Function takes at least 1 positional arguments \(0 given\)'):
|
||||||
parse((), {}, b'O|O$O', ['', '', 'a'])
|
parse((), {}, b'O|O$O', ['', '', 'a'])
|
||||||
with self.assertRaisesRegex(SystemError, 'Empty parameter name after \$'):
|
with self.assertRaisesRegex(SystemError, r'Empty parameter name after \$'):
|
||||||
parse((1,), {}, b'O|$OO', ['', '', 'a'])
|
parse((1,), {}, b'O|$OO', ['', '', 'a'])
|
||||||
with self.assertRaisesRegex(SystemError, 'Empty keyword'):
|
with self.assertRaisesRegex(SystemError, 'Empty keyword'):
|
||||||
parse((1,), {}, b'O|OO', ['', 'a', ''])
|
parse((1,), {}, b'O|OO', ['', 'a', ''])
|
||||||
|
|
|
@ -148,7 +148,7 @@ class CgiTests(unittest.TestCase):
|
||||||
def test_escape(self):
|
def test_escape(self):
|
||||||
# cgi.escape() is deprecated.
|
# cgi.escape() is deprecated.
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.filterwarnings('ignore', 'cgi\.escape',
|
warnings.filterwarnings('ignore', r'cgi\.escape',
|
||||||
DeprecationWarning)
|
DeprecationWarning)
|
||||||
self.assertEqual("test & string", cgi.escape("test & string"))
|
self.assertEqual("test & string", cgi.escape("test & string"))
|
||||||
self.assertEqual("<test string>", cgi.escape("<test string>"))
|
self.assertEqual("<test string>", cgi.escape("<test string>"))
|
||||||
|
|
|
@ -280,12 +280,12 @@ class CodecCallbackTest(unittest.TestCase):
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
b"\\u3042\u3xxx".decode("unicode-escape", "test.handler1"),
|
b"\\u3042\\u3xxx".decode("unicode-escape", "test.handler1"),
|
||||||
"\u3042[<92><117><51>]xxx"
|
"\u3042[<92><117><51>]xxx"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
b"\\u3042\u3xx".decode("unicode-escape", "test.handler1"),
|
b"\\u3042\\u3xx".decode("unicode-escape", "test.handler1"),
|
||||||
"\u3042[<92><117><51>]xx"
|
"\u3042[<92><117><51>]xx"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -2703,8 +2703,8 @@ class TransformCodecTest(unittest.TestCase):
|
||||||
bad_input = "bad input type"
|
bad_input = "bad input type"
|
||||||
for encoding in bytes_transform_encodings:
|
for encoding in bytes_transform_encodings:
|
||||||
with self.subTest(encoding=encoding):
|
with self.subTest(encoding=encoding):
|
||||||
fmt = ( "{!r} is not a text encoding; "
|
fmt = (r"{!r} is not a text encoding; "
|
||||||
"use codecs.encode\(\) to handle arbitrary codecs")
|
r"use codecs.encode\(\) to handle arbitrary codecs")
|
||||||
msg = fmt.format(encoding)
|
msg = fmt.format(encoding)
|
||||||
with self.assertRaisesRegex(LookupError, msg) as failure:
|
with self.assertRaisesRegex(LookupError, msg) as failure:
|
||||||
bad_input.encode(encoding)
|
bad_input.encode(encoding)
|
||||||
|
@ -2713,7 +2713,7 @@ class TransformCodecTest(unittest.TestCase):
|
||||||
def test_text_to_binary_blacklists_text_transforms(self):
|
def test_text_to_binary_blacklists_text_transforms(self):
|
||||||
# Check str.encode gives a good error message for str -> str codecs
|
# Check str.encode gives a good error message for str -> str codecs
|
||||||
msg = (r"^'rot_13' is not a text encoding; "
|
msg = (r"^'rot_13' is not a text encoding; "
|
||||||
"use codecs.encode\(\) to handle arbitrary codecs")
|
r"use codecs.encode\(\) to handle arbitrary codecs")
|
||||||
with self.assertRaisesRegex(LookupError, msg):
|
with self.assertRaisesRegex(LookupError, msg):
|
||||||
"just an example message".encode("rot_13")
|
"just an example message".encode("rot_13")
|
||||||
|
|
||||||
|
@ -2725,7 +2725,7 @@ class TransformCodecTest(unittest.TestCase):
|
||||||
with self.subTest(encoding=encoding):
|
with self.subTest(encoding=encoding):
|
||||||
encoded_data = codecs.encode(data, encoding)
|
encoded_data = codecs.encode(data, encoding)
|
||||||
fmt = (r"{!r} is not a text encoding; "
|
fmt = (r"{!r} is not a text encoding; "
|
||||||
"use codecs.decode\(\) to handle arbitrary codecs")
|
r"use codecs.decode\(\) to handle arbitrary codecs")
|
||||||
msg = fmt.format(encoding)
|
msg = fmt.format(encoding)
|
||||||
with self.assertRaisesRegex(LookupError, msg):
|
with self.assertRaisesRegex(LookupError, msg):
|
||||||
encoded_data.decode(encoding)
|
encoded_data.decode(encoding)
|
||||||
|
@ -2737,7 +2737,7 @@ class TransformCodecTest(unittest.TestCase):
|
||||||
for bad_input in (b"immutable", bytearray(b"mutable")):
|
for bad_input in (b"immutable", bytearray(b"mutable")):
|
||||||
with self.subTest(bad_input=bad_input):
|
with self.subTest(bad_input=bad_input):
|
||||||
msg = (r"^'rot_13' is not a text encoding; "
|
msg = (r"^'rot_13' is not a text encoding; "
|
||||||
"use codecs.decode\(\) to handle arbitrary codecs")
|
r"use codecs.decode\(\) to handle arbitrary codecs")
|
||||||
with self.assertRaisesRegex(LookupError, msg) as failure:
|
with self.assertRaisesRegex(LookupError, msg) as failure:
|
||||||
bad_input.decode("rot_13")
|
bad_input.decode("rot_13")
|
||||||
self.assertIsNone(failure.exception.__cause__)
|
self.assertIsNone(failure.exception.__cause__)
|
||||||
|
@ -2956,12 +2956,12 @@ class ExceptionChainingTest(unittest.TestCase):
|
||||||
self.assertEqual(decoded, b"not str!")
|
self.assertEqual(decoded, b"not str!")
|
||||||
# Text model methods should complain
|
# Text model methods should complain
|
||||||
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
|
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
|
||||||
"use codecs.encode\(\) to encode to arbitrary types$")
|
r"use codecs.encode\(\) to encode to arbitrary types$")
|
||||||
msg = fmt.format(self.codec_name)
|
msg = fmt.format(self.codec_name)
|
||||||
with self.assertRaisesRegex(TypeError, msg):
|
with self.assertRaisesRegex(TypeError, msg):
|
||||||
"str_input".encode(self.codec_name)
|
"str_input".encode(self.codec_name)
|
||||||
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
|
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
|
||||||
"use codecs.decode\(\) to decode to arbitrary types$")
|
r"use codecs.decode\(\) to decode to arbitrary types$")
|
||||||
msg = fmt.format(self.codec_name)
|
msg = fmt.format(self.codec_name)
|
||||||
with self.assertRaisesRegex(TypeError, msg):
|
with self.assertRaisesRegex(TypeError, msg):
|
||||||
b"bytes input".decode(self.codec_name)
|
b"bytes input".decode(self.codec_name)
|
||||||
|
|
|
@ -891,7 +891,7 @@ class CoroutineTest(unittest.TestCase):
|
||||||
return await Awaitable()
|
return await Awaitable()
|
||||||
|
|
||||||
with self.assertRaisesRegex(
|
with self.assertRaisesRegex(
|
||||||
TypeError, "__await__\(\) returned a coroutine"):
|
TypeError, r"__await__\(\) returned a coroutine"):
|
||||||
|
|
||||||
run_async(foo())
|
run_async(foo())
|
||||||
|
|
||||||
|
@ -1333,7 +1333,7 @@ class CoroutineTest(unittest.TestCase):
|
||||||
|
|
||||||
with self.assertRaisesRegex(
|
with self.assertRaisesRegex(
|
||||||
TypeError,
|
TypeError,
|
||||||
"async for' received an invalid object.*__aiter.*\: I"):
|
r"async for' received an invalid object.*__aiter.*\: I"):
|
||||||
|
|
||||||
run_async(foo())
|
run_async(foo())
|
||||||
|
|
||||||
|
@ -1667,8 +1667,8 @@ class SysSetCoroWrapperTest(unittest.TestCase):
|
||||||
try:
|
try:
|
||||||
with silence_coro_gc(), self.assertRaisesRegex(
|
with silence_coro_gc(), self.assertRaisesRegex(
|
||||||
RuntimeError,
|
RuntimeError,
|
||||||
"coroutine wrapper.*\.wrapper at 0x.*attempted to "
|
r"coroutine wrapper.*\.wrapper at 0x.*attempted to "
|
||||||
"recursively wrap .* wrap .*"):
|
r"recursively wrap .* wrap .*"):
|
||||||
|
|
||||||
foo()
|
foo()
|
||||||
finally:
|
finally:
|
||||||
|
|
|
@ -291,7 +291,7 @@ constructor:
|
||||||
...
|
...
|
||||||
... Non-example text.
|
... Non-example text.
|
||||||
...
|
...
|
||||||
... >>> print('another\example')
|
... >>> print('another\\example')
|
||||||
... another
|
... another
|
||||||
... example
|
... example
|
||||||
... '''
|
... '''
|
||||||
|
|
|
@ -581,7 +581,7 @@ class TestParser(TestParserMixin, TestEmailBase):
|
||||||
|
|
||||||
def test_get_comment_quoted_parens(self):
|
def test_get_comment_quoted_parens(self):
|
||||||
self._test_get_x(parser.get_comment,
|
self._test_get_x(parser.get_comment,
|
||||||
'(foo\) \(\)bar)', '(foo\) \(\)bar)', ' ', [], '', ['foo) ()bar'])
|
r'(foo\) \(\)bar)', r'(foo\) \(\)bar)', ' ', [], '', ['foo) ()bar'])
|
||||||
|
|
||||||
def test_get_comment_non_printable(self):
|
def test_get_comment_non_printable(self):
|
||||||
self._test_get_x(parser.get_comment,
|
self._test_get_x(parser.get_comment,
|
||||||
|
@ -625,7 +625,7 @@ class TestParser(TestParserMixin, TestEmailBase):
|
||||||
|
|
||||||
def test_get_comment_qs_in_nested_comment(self):
|
def test_get_comment_qs_in_nested_comment(self):
|
||||||
comment = self._test_get_x(parser.get_comment,
|
comment = self._test_get_x(parser.get_comment,
|
||||||
'(foo (b\)))', '(foo (b\)))', ' ', [], '', ['foo (b\))'])
|
r'(foo (b\)))', r'(foo (b\)))', ' ', [], '', [r'foo (b\))'])
|
||||||
self.assertEqual(comment[2].content, 'b)')
|
self.assertEqual(comment[2].content, 'b)')
|
||||||
|
|
||||||
# get_cfws
|
# get_cfws
|
||||||
|
|
|
@ -3040,7 +3040,7 @@ class TestMiscellaneous(TestEmailBase):
|
||||||
|
|
||||||
def test_escape_backslashes(self):
|
def test_escape_backslashes(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')),
|
utils.formataddr((r'Arthur \Backslash\ Foobar', 'person@dom.ain')),
|
||||||
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>')
|
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>')
|
||||||
a = r'Arthur \Backslash\ Foobar'
|
a = r'Arthur \Backslash\ Foobar'
|
||||||
b = 'person@dom.ain'
|
b = 'person@dom.ain'
|
||||||
|
|
|
@ -93,7 +93,7 @@ class FaultHandlerTests(unittest.TestCase):
|
||||||
header = 'Thread 0x[0-9a-f]+'
|
header = 'Thread 0x[0-9a-f]+'
|
||||||
else:
|
else:
|
||||||
header = 'Stack'
|
header = 'Stack'
|
||||||
regex = """
|
regex = r"""
|
||||||
^{fatal_error}
|
^{fatal_error}
|
||||||
|
|
||||||
{header} \(most recent call first\):
|
{header} \(most recent call first\):
|
||||||
|
@ -490,7 +490,7 @@ class FaultHandlerTests(unittest.TestCase):
|
||||||
lineno = 8
|
lineno = 8
|
||||||
else:
|
else:
|
||||||
lineno = 10
|
lineno = 10
|
||||||
regex = """
|
regex = r"""
|
||||||
^Thread 0x[0-9a-f]+ \(most recent call first\):
|
^Thread 0x[0-9a-f]+ \(most recent call first\):
|
||||||
(?: File ".*threading.py", line [0-9]+ in [_a-z]+
|
(?: File ".*threading.py", line [0-9]+ in [_a-z]+
|
||||||
){{1,3}} File "<string>", line 23 in run
|
){{1,3}} File "<string>", line 23 in run
|
||||||
|
@ -669,9 +669,9 @@ class FaultHandlerTests(unittest.TestCase):
|
||||||
trace = '\n'.join(trace)
|
trace = '\n'.join(trace)
|
||||||
if not unregister:
|
if not unregister:
|
||||||
if all_threads:
|
if all_threads:
|
||||||
regex = 'Current thread 0x[0-9a-f]+ \(most recent call first\):\n'
|
regex = r'Current thread 0x[0-9a-f]+ \(most recent call first\):\n'
|
||||||
else:
|
else:
|
||||||
regex = 'Stack \(most recent call first\):\n'
|
regex = r'Stack \(most recent call first\):\n'
|
||||||
regex = expected_traceback(14, 32, regex)
|
regex = expected_traceback(14, 32, regex)
|
||||||
self.assertRegex(trace, regex)
|
self.assertRegex(trace, regex)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -62,14 +62,14 @@ class FnmatchTestCase(unittest.TestCase):
|
||||||
class TranslateTestCase(unittest.TestCase):
|
class TranslateTestCase(unittest.TestCase):
|
||||||
|
|
||||||
def test_translate(self):
|
def test_translate(self):
|
||||||
self.assertEqual(translate('*'), '.*\Z(?ms)')
|
self.assertEqual(translate('*'), r'.*\Z(?ms)')
|
||||||
self.assertEqual(translate('?'), '.\Z(?ms)')
|
self.assertEqual(translate('?'), r'.\Z(?ms)')
|
||||||
self.assertEqual(translate('a?b*'), 'a.b.*\Z(?ms)')
|
self.assertEqual(translate('a?b*'), r'a.b.*\Z(?ms)')
|
||||||
self.assertEqual(translate('[abc]'), '[abc]\Z(?ms)')
|
self.assertEqual(translate('[abc]'), r'[abc]\Z(?ms)')
|
||||||
self.assertEqual(translate('[]]'), '[]]\Z(?ms)')
|
self.assertEqual(translate('[]]'), r'[]]\Z(?ms)')
|
||||||
self.assertEqual(translate('[!x]'), '[^x]\Z(?ms)')
|
self.assertEqual(translate('[!x]'), r'[^x]\Z(?ms)')
|
||||||
self.assertEqual(translate('[^x]'), '[\\^x]\Z(?ms)')
|
self.assertEqual(translate('[^x]'), r'[\^x]\Z(?ms)')
|
||||||
self.assertEqual(translate('[x'), '\\[x\Z(?ms)')
|
self.assertEqual(translate('[x'), r'\[x\Z(?ms)')
|
||||||
|
|
||||||
|
|
||||||
class FilterTestCase(unittest.TestCase):
|
class FilterTestCase(unittest.TestCase):
|
||||||
|
|
|
@ -4,7 +4,7 @@ import unittest
|
||||||
from test import support
|
from test import support
|
||||||
import re
|
import re
|
||||||
|
|
||||||
rx = re.compile('\((\S+).py, line (\d+)')
|
rx = re.compile(r'\((\S+).py, line (\d+)')
|
||||||
|
|
||||||
def get_error_location(msg):
|
def get_error_location(msg):
|
||||||
mo = rx.search(str(msg))
|
mo = rx.search(str(msg))
|
||||||
|
|
|
@ -244,7 +244,7 @@ class DebuggerTests(unittest.TestCase):
|
||||||
# gdb can insert additional '\n' and space characters in various places
|
# gdb can insert additional '\n' and space characters in various places
|
||||||
# in its output, depending on the width of the terminal it's connected
|
# in its output, depending on the width of the terminal it's connected
|
||||||
# to (using its "wrap_here" function)
|
# to (using its "wrap_here" function)
|
||||||
m = re.match('.*#0\s+builtin_id\s+\(self\=.*,\s+v=\s*(.*?)\)\s+at\s+\S*Python/bltinmodule.c.*',
|
m = re.match(r'.*#0\s+builtin_id\s+\(self\=.*,\s+v=\s*(.*?)\)\s+at\s+\S*Python/bltinmodule.c.*',
|
||||||
gdb_output, re.DOTALL)
|
gdb_output, re.DOTALL)
|
||||||
if not m:
|
if not m:
|
||||||
self.fail('Unexpected gdb output: %r\n%s' % (gdb_output, gdb_output))
|
self.fail('Unexpected gdb output: %r\n%s' % (gdb_output, gdb_output))
|
||||||
|
@ -552,7 +552,7 @@ class Foo:
|
||||||
foo = Foo()
|
foo = Foo()
|
||||||
foo.an_attr = foo
|
foo.an_attr = foo
|
||||||
id(foo)''')
|
id(foo)''')
|
||||||
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>',
|
self.assertTrue(re.match(r'<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>',
|
||||||
gdb_repr),
|
gdb_repr),
|
||||||
'Unexpected gdb representation: %r\n%s' % \
|
'Unexpected gdb representation: %r\n%s' % \
|
||||||
(gdb_repr, gdb_output))
|
(gdb_repr, gdb_output))
|
||||||
|
@ -565,7 +565,7 @@ class Foo(object):
|
||||||
foo = Foo()
|
foo = Foo()
|
||||||
foo.an_attr = foo
|
foo.an_attr = foo
|
||||||
id(foo)''')
|
id(foo)''')
|
||||||
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>',
|
self.assertTrue(re.match(r'<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>',
|
||||||
gdb_repr),
|
gdb_repr),
|
||||||
'Unexpected gdb representation: %r\n%s' % \
|
'Unexpected gdb representation: %r\n%s' % \
|
||||||
(gdb_repr, gdb_output))
|
(gdb_repr, gdb_output))
|
||||||
|
@ -579,7 +579,7 @@ b = Foo()
|
||||||
a.an_attr = b
|
a.an_attr = b
|
||||||
b.an_attr = a
|
b.an_attr = a
|
||||||
id(a)''')
|
id(a)''')
|
||||||
self.assertTrue(re.match('<Foo\(an_attr=<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>\) at remote 0x-?[0-9a-f]+>',
|
self.assertTrue(re.match(r'<Foo\(an_attr=<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>\) at remote 0x-?[0-9a-f]+>',
|
||||||
gdb_repr),
|
gdb_repr),
|
||||||
'Unexpected gdb representation: %r\n%s' % \
|
'Unexpected gdb representation: %r\n%s' % \
|
||||||
(gdb_repr, gdb_output))
|
(gdb_repr, gdb_output))
|
||||||
|
@ -614,7 +614,7 @@ id(a)''')
|
||||||
|
|
||||||
def test_builtin_method(self):
|
def test_builtin_method(self):
|
||||||
gdb_repr, gdb_output = self.get_gdb_repr('import sys; id(sys.stdout.readlines)')
|
gdb_repr, gdb_output = self.get_gdb_repr('import sys; id(sys.stdout.readlines)')
|
||||||
self.assertTrue(re.match('<built-in method readlines of _io.TextIOWrapper object at remote 0x-?[0-9a-f]+>',
|
self.assertTrue(re.match(r'<built-in method readlines of _io.TextIOWrapper object at remote 0x-?[0-9a-f]+>',
|
||||||
gdb_repr),
|
gdb_repr),
|
||||||
'Unexpected gdb representation: %r\n%s' % \
|
'Unexpected gdb representation: %r\n%s' % \
|
||||||
(gdb_repr, gdb_output))
|
(gdb_repr, gdb_output))
|
||||||
|
@ -629,7 +629,7 @@ id(foo.__code__)''',
|
||||||
breakpoint='builtin_id',
|
breakpoint='builtin_id',
|
||||||
cmds_after_breakpoint=['print (PyFrameObject*)(((PyCodeObject*)v)->co_zombieframe)']
|
cmds_after_breakpoint=['print (PyFrameObject*)(((PyCodeObject*)v)->co_zombieframe)']
|
||||||
)
|
)
|
||||||
self.assertTrue(re.match('.*\s+\$1 =\s+Frame 0x-?[0-9a-f]+, for file <string>, line 3, in foo \(\)\s+.*',
|
self.assertTrue(re.match(r'.*\s+\$1 =\s+Frame 0x-?[0-9a-f]+, for file <string>, line 3, in foo \(\)\s+.*',
|
||||||
gdb_output,
|
gdb_output,
|
||||||
re.DOTALL),
|
re.DOTALL),
|
||||||
'Unexpected gdb representation: %r\n%s' % (gdb_output, gdb_output))
|
'Unexpected gdb representation: %r\n%s' % (gdb_output, gdb_output))
|
||||||
|
|
|
@ -625,20 +625,20 @@ class KeywordOnly_TestCase(unittest.TestCase):
|
||||||
)
|
)
|
||||||
# required arg missing
|
# required arg missing
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
"Required argument 'required' \(pos 1\) not found"):
|
r"Required argument 'required' \(pos 1\) not found"):
|
||||||
getargs_keyword_only(optional=2)
|
getargs_keyword_only(optional=2)
|
||||||
|
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
"Required argument 'required' \(pos 1\) not found"):
|
r"Required argument 'required' \(pos 1\) not found"):
|
||||||
getargs_keyword_only(keyword_only=3)
|
getargs_keyword_only(keyword_only=3)
|
||||||
|
|
||||||
def test_too_many_args(self):
|
def test_too_many_args(self):
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
"Function takes at most 2 positional arguments \(3 given\)"):
|
r"Function takes at most 2 positional arguments \(3 given\)"):
|
||||||
getargs_keyword_only(1, 2, 3)
|
getargs_keyword_only(1, 2, 3)
|
||||||
|
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
"function takes at most 3 arguments \(4 given\)"):
|
r"function takes at most 3 arguments \(4 given\)"):
|
||||||
getargs_keyword_only(1, 2, 3, keyword_only=5)
|
getargs_keyword_only(1, 2, 3, keyword_only=5)
|
||||||
|
|
||||||
def test_invalid_keyword(self):
|
def test_invalid_keyword(self):
|
||||||
|
@ -673,11 +673,11 @@ class PositionalOnlyAndKeywords_TestCase(unittest.TestCase):
|
||||||
self.assertEqual(self.getargs(1), (1, -1, -1))
|
self.assertEqual(self.getargs(1), (1, -1, -1))
|
||||||
# required positional arg missing
|
# required positional arg missing
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
"Function takes at least 1 positional arguments \(0 given\)"):
|
r"Function takes at least 1 positional arguments \(0 given\)"):
|
||||||
self.getargs()
|
self.getargs()
|
||||||
|
|
||||||
with self.assertRaisesRegex(TypeError,
|
with self.assertRaisesRegex(TypeError,
|
||||||
"Function takes at least 1 positional arguments \(0 given\)"):
|
r"Function takes at least 1 positional arguments \(0 given\)"):
|
||||||
self.getargs(keyword=3)
|
self.getargs(keyword=3)
|
||||||
|
|
||||||
def test_empty_keyword(self):
|
def test_empty_keyword(self):
|
||||||
|
|
|
@ -701,7 +701,7 @@ class AttributesTestCase(TestCaseBase):
|
||||||
|
|
||||||
def test_attr_funky_names2(self):
|
def test_attr_funky_names2(self):
|
||||||
self._run_check(
|
self._run_check(
|
||||||
"<a $><b $=%><c \=/>",
|
r"<a $><b $=%><c \=/>",
|
||||||
[("starttag", "a", [("$", None)]),
|
[("starttag", "a", [("$", None)]),
|
||||||
("starttag", "b", [("$", "%")]),
|
("starttag", "b", [("$", "%")]),
|
||||||
("starttag", "c", [("\\", "/")])])
|
("starttag", "c", [("\\", "/")])])
|
||||||
|
|
|
@ -1051,7 +1051,7 @@ class CookieTests(unittest.TestCase):
|
||||||
url = "http://foo.bar.com/"
|
url = "http://foo.bar.com/"
|
||||||
interact_2965(c, url, "spam=eggs; Version=1; Port")
|
interact_2965(c, url, "spam=eggs; Version=1; Port")
|
||||||
h = interact_2965(c, url)
|
h = interact_2965(c, url)
|
||||||
self.assertRegex(h, "\$Port([^=]|$)",
|
self.assertRegex(h, r"\$Port([^=]|$)",
|
||||||
"port with no value not returned with no value")
|
"port with no value not returned with no value")
|
||||||
|
|
||||||
c = CookieJar(pol)
|
c = CookieJar(pol)
|
||||||
|
@ -1396,9 +1396,9 @@ class LWPCookieTests(unittest.TestCase):
|
||||||
|
|
||||||
self.assertRegex(cookie, r'^\$Version="?1"?;')
|
self.assertRegex(cookie, r'^\$Version="?1"?;')
|
||||||
self.assertRegex(cookie, r'Part_Number="?Rocket_Launcher_0001"?;'
|
self.assertRegex(cookie, r'Part_Number="?Rocket_Launcher_0001"?;'
|
||||||
'\s*\$Path="\/acme"')
|
r'\s*\$Path="\/acme"')
|
||||||
self.assertRegex(cookie, r'Customer="?WILE_E_COYOTE"?;'
|
self.assertRegex(cookie, r'Customer="?WILE_E_COYOTE"?;'
|
||||||
'\s*\$Path="\/acme"')
|
r'\s*\$Path="\/acme"')
|
||||||
|
|
||||||
#
|
#
|
||||||
# 7. User Agent -> Server
|
# 7. User Agent -> Server
|
||||||
|
|
|
@ -101,7 +101,7 @@ class HelperFunctionTest(unittest.TestCase):
|
||||||
(["echo foo", "audio/*", "foo.txt"], "echo foo"),
|
(["echo foo", "audio/*", "foo.txt"], "echo foo"),
|
||||||
(["echo %s", "audio/*", "foo.txt"], "echo foo.txt"),
|
(["echo %s", "audio/*", "foo.txt"], "echo foo.txt"),
|
||||||
(["echo %t", "audio/*", "foo.txt"], "echo audio/*"),
|
(["echo %t", "audio/*", "foo.txt"], "echo audio/*"),
|
||||||
(["echo \%t", "audio/*", "foo.txt"], "echo %t"),
|
(["echo \\%t", "audio/*", "foo.txt"], "echo %t"),
|
||||||
(["echo foo", "audio/*", "foo.txt", plist], "echo foo"),
|
(["echo foo", "audio/*", "foo.txt", plist], "echo foo"),
|
||||||
(["echo %{total}", "audio/*", "foo.txt", plist], "echo 3")
|
(["echo %{total}", "audio/*", "foo.txt", plist], "echo 3")
|
||||||
]
|
]
|
||||||
|
|
|
@ -2086,7 +2086,7 @@ class Win32JunctionTests(unittest.TestCase):
|
||||||
class NonLocalSymlinkTests(unittest.TestCase):
|
class NonLocalSymlinkTests(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
"""
|
r"""
|
||||||
Create this structure:
|
Create this structure:
|
||||||
|
|
||||||
base
|
base
|
||||||
|
|
|
@ -255,7 +255,7 @@ class PlatformTest(unittest.TestCase):
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.filterwarnings(
|
warnings.filterwarnings(
|
||||||
'ignore',
|
'ignore',
|
||||||
'dist\(\) and linux_distribution\(\) '
|
r'dist\(\) and linux_distribution\(\) '
|
||||||
'functions are deprecated .*',
|
'functions are deprecated .*',
|
||||||
PendingDeprecationWarning,
|
PendingDeprecationWarning,
|
||||||
)
|
)
|
||||||
|
@ -331,7 +331,7 @@ class PlatformTest(unittest.TestCase):
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.filterwarnings(
|
warnings.filterwarnings(
|
||||||
'ignore',
|
'ignore',
|
||||||
'dist\(\) and linux_distribution\(\) '
|
r'dist\(\) and linux_distribution\(\) '
|
||||||
'functions are deprecated .*',
|
'functions are deprecated .*',
|
||||||
PendingDeprecationWarning,
|
PendingDeprecationWarning,
|
||||||
)
|
)
|
||||||
|
|
|
@ -113,10 +113,10 @@ class ReTests(unittest.TestCase):
|
||||||
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
|
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
|
||||||
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
|
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
|
||||||
|
|
||||||
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
|
self.assertEqual(re.sub('(?P<a>x)', r'\g<a>\g<a>', 'xx'), 'xxxx')
|
||||||
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
|
self.assertEqual(re.sub('(?P<a>x)', r'\g<a>\g<1>', 'xx'), 'xxxx')
|
||||||
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
|
self.assertEqual(re.sub('(?P<unk>x)', r'\g<unk>\g<unk>', 'xx'), 'xxxx')
|
||||||
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
|
self.assertEqual(re.sub('(?P<unk>x)', r'\g<1>\g<1>', 'xx'), 'xxxx')
|
||||||
|
|
||||||
self.assertEqual(re.sub('a', r'\t\n\v\r\f\a\b', 'a'), '\t\n\v\r\f\a\b')
|
self.assertEqual(re.sub('a', r'\t\n\v\r\f\a\b', 'a'), '\t\n\v\r\f\a\b')
|
||||||
self.assertEqual(re.sub('a', '\t\n\v\r\f\a\b', 'a'), '\t\n\v\r\f\a\b')
|
self.assertEqual(re.sub('a', '\t\n\v\r\f\a\b', 'a'), '\t\n\v\r\f\a\b')
|
||||||
|
@ -127,11 +127,11 @@ class ReTests(unittest.TestCase):
|
||||||
with self.assertRaises(re.error):
|
with self.assertRaises(re.error):
|
||||||
self.assertEqual(re.sub('a', '\\' + c, 'a'), '\\' + c)
|
self.assertEqual(re.sub('a', '\\' + c, 'a'), '\\' + c)
|
||||||
|
|
||||||
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
|
self.assertEqual(re.sub(r'^\s*', 'X', 'test'), 'Xtest')
|
||||||
|
|
||||||
def test_bug_449964(self):
|
def test_bug_449964(self):
|
||||||
# fails for group followed by other escape
|
# fails for group followed by other escape
|
||||||
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
|
self.assertEqual(re.sub(r'(?P<unk>x)', r'\g<1>\g<1>\b', 'xx'),
|
||||||
'xx\bxx\b')
|
'xx\bxx\b')
|
||||||
|
|
||||||
def test_bug_449000(self):
|
def test_bug_449000(self):
|
||||||
|
@ -218,26 +218,26 @@ class ReTests(unittest.TestCase):
|
||||||
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
|
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
|
||||||
|
|
||||||
def test_symbolic_groups(self):
|
def test_symbolic_groups(self):
|
||||||
re.compile('(?P<a>x)(?P=a)(?(a)y)')
|
re.compile(r'(?P<a>x)(?P=a)(?(a)y)')
|
||||||
re.compile('(?P<a1>x)(?P=a1)(?(a1)y)')
|
re.compile(r'(?P<a1>x)(?P=a1)(?(a1)y)')
|
||||||
re.compile('(?P<a1>x)\1(?(1)y)')
|
re.compile(r'(?P<a1>x)\1(?(1)y)')
|
||||||
self.checkPatternError('(?P<a>)(?P<a>)',
|
self.checkPatternError(r'(?P<a>)(?P<a>)',
|
||||||
"redefinition of group name 'a' as group 2; "
|
"redefinition of group name 'a' as group 2; "
|
||||||
"was group 1")
|
"was group 1")
|
||||||
self.checkPatternError('(?P<a>(?P=a))',
|
self.checkPatternError(r'(?P<a>(?P=a))',
|
||||||
"cannot refer to an open group", 10)
|
"cannot refer to an open group", 10)
|
||||||
self.checkPatternError('(?Pxy)', 'unknown extension ?Px')
|
self.checkPatternError(r'(?Pxy)', 'unknown extension ?Px')
|
||||||
self.checkPatternError('(?P<a>)(?P=a', 'missing ), unterminated name', 11)
|
self.checkPatternError(r'(?P<a>)(?P=a', 'missing ), unterminated name', 11)
|
||||||
self.checkPatternError('(?P=', 'missing group name', 4)
|
self.checkPatternError(r'(?P=', 'missing group name', 4)
|
||||||
self.checkPatternError('(?P=)', 'missing group name', 4)
|
self.checkPatternError(r'(?P=)', 'missing group name', 4)
|
||||||
self.checkPatternError('(?P=1)', "bad character in group name '1'", 4)
|
self.checkPatternError(r'(?P=1)', "bad character in group name '1'", 4)
|
||||||
self.checkPatternError('(?P=a)', "unknown group name 'a'")
|
self.checkPatternError(r'(?P=a)', "unknown group name 'a'")
|
||||||
self.checkPatternError('(?P=a1)', "unknown group name 'a1'")
|
self.checkPatternError(r'(?P=a1)', "unknown group name 'a1'")
|
||||||
self.checkPatternError('(?P=a.)', "bad character in group name 'a.'", 4)
|
self.checkPatternError(r'(?P=a.)', "bad character in group name 'a.'", 4)
|
||||||
self.checkPatternError('(?P<)', 'missing >, unterminated name', 4)
|
self.checkPatternError(r'(?P<)', 'missing >, unterminated name', 4)
|
||||||
self.checkPatternError('(?P<a', 'missing >, unterminated name', 4)
|
self.checkPatternError(r'(?P<a', 'missing >, unterminated name', 4)
|
||||||
self.checkPatternError('(?P<', 'missing group name', 4)
|
self.checkPatternError(r'(?P<', 'missing group name', 4)
|
||||||
self.checkPatternError('(?P<>)', 'missing group name', 4)
|
self.checkPatternError(r'(?P<>)', 'missing group name', 4)
|
||||||
self.checkPatternError(r'(?P<1>)', "bad character in group name '1'", 4)
|
self.checkPatternError(r'(?P<1>)', "bad character in group name '1'", 4)
|
||||||
self.checkPatternError(r'(?P<a.>)', "bad character in group name 'a.'", 4)
|
self.checkPatternError(r'(?P<a.>)', "bad character in group name 'a.'", 4)
|
||||||
self.checkPatternError(r'(?(', 'missing group name', 3)
|
self.checkPatternError(r'(?(', 'missing group name', 3)
|
||||||
|
@ -256,35 +256,35 @@ class ReTests(unittest.TestCase):
|
||||||
self.assertEqual(re.match(pat, 'xc8yz').span(), (0, 5))
|
self.assertEqual(re.match(pat, 'xc8yz').span(), (0, 5))
|
||||||
|
|
||||||
def test_symbolic_refs(self):
|
def test_symbolic_refs(self):
|
||||||
self.checkTemplateError('(?P<a>x)', '\g<a', 'xx',
|
self.checkTemplateError('(?P<a>x)', r'\g<a', 'xx',
|
||||||
'missing >, unterminated name', 3)
|
'missing >, unterminated name', 3)
|
||||||
self.checkTemplateError('(?P<a>x)', '\g<', 'xx',
|
self.checkTemplateError('(?P<a>x)', r'\g<', 'xx',
|
||||||
'missing group name', 3)
|
'missing group name', 3)
|
||||||
self.checkTemplateError('(?P<a>x)', '\g', 'xx', 'missing <', 2)
|
self.checkTemplateError('(?P<a>x)', r'\g', 'xx', 'missing <', 2)
|
||||||
self.checkTemplateError('(?P<a>x)', '\g<a a>', 'xx',
|
self.checkTemplateError('(?P<a>x)', r'\g<a a>', 'xx',
|
||||||
"bad character in group name 'a a'", 3)
|
"bad character in group name 'a a'", 3)
|
||||||
self.checkTemplateError('(?P<a>x)', '\g<>', 'xx',
|
self.checkTemplateError('(?P<a>x)', r'\g<>', 'xx',
|
||||||
'missing group name', 3)
|
'missing group name', 3)
|
||||||
self.checkTemplateError('(?P<a>x)', '\g<1a1>', 'xx',
|
self.checkTemplateError('(?P<a>x)', r'\g<1a1>', 'xx',
|
||||||
"bad character in group name '1a1'", 3)
|
"bad character in group name '1a1'", 3)
|
||||||
self.checkTemplateError('(?P<a>x)', r'\g<2>', 'xx',
|
self.checkTemplateError('(?P<a>x)', r'\g<2>', 'xx',
|
||||||
'invalid group reference')
|
'invalid group reference')
|
||||||
self.checkTemplateError('(?P<a>x)', r'\2', 'xx',
|
self.checkTemplateError('(?P<a>x)', r'\2', 'xx',
|
||||||
'invalid group reference')
|
'invalid group reference')
|
||||||
with self.assertRaisesRegex(IndexError, "unknown group name 'ab'"):
|
with self.assertRaisesRegex(IndexError, "unknown group name 'ab'"):
|
||||||
re.sub('(?P<a>x)', '\g<ab>', 'xx')
|
re.sub('(?P<a>x)', r'\g<ab>', 'xx')
|
||||||
self.assertEqual(re.sub('(?P<a>x)|(?P<b>y)', r'\g<b>', 'xx'), '')
|
self.assertEqual(re.sub('(?P<a>x)|(?P<b>y)', r'\g<b>', 'xx'), '')
|
||||||
self.assertEqual(re.sub('(?P<a>x)|(?P<b>y)', r'\2', 'xx'), '')
|
self.assertEqual(re.sub('(?P<a>x)|(?P<b>y)', r'\2', 'xx'), '')
|
||||||
self.checkTemplateError('(?P<a>x)', '\g<-1>', 'xx',
|
self.checkTemplateError('(?P<a>x)', r'\g<-1>', 'xx',
|
||||||
"bad character in group name '-1'", 3)
|
"bad character in group name '-1'", 3)
|
||||||
# New valid/invalid identifiers in Python 3
|
# New valid/invalid identifiers in Python 3
|
||||||
self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx')
|
self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx')
|
||||||
self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx')
|
self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx')
|
||||||
self.checkTemplateError('(?P<a>x)', '\g<©>', 'xx',
|
self.checkTemplateError('(?P<a>x)', r'\g<©>', 'xx',
|
||||||
"bad character in group name '©'", 3)
|
"bad character in group name '©'", 3)
|
||||||
# Support > 100 groups.
|
# Support > 100 groups.
|
||||||
pat = '|'.join('x(?P<a%d>%x)y' % (i, i) for i in range(1, 200 + 1))
|
pat = '|'.join('x(?P<a%d>%x)y' % (i, i) for i in range(1, 200 + 1))
|
||||||
self.assertEqual(re.sub(pat, '\g<200>', 'xc8yzxc8y'), 'c8zc8')
|
self.assertEqual(re.sub(pat, r'\g<200>', 'xc8yzxc8y'), 'c8zc8')
|
||||||
|
|
||||||
def test_re_subn(self):
|
def test_re_subn(self):
|
||||||
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
|
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
|
||||||
|
@ -472,19 +472,19 @@ class ReTests(unittest.TestCase):
|
||||||
re.compile(r".*?").fullmatch("abcd", pos=1, endpos=3).span(), (1, 3))
|
re.compile(r".*?").fullmatch("abcd", pos=1, endpos=3).span(), (1, 3))
|
||||||
|
|
||||||
def test_re_groupref_exists(self):
|
def test_re_groupref_exists(self):
|
||||||
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
|
self.assertEqual(re.match(r'^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
|
||||||
('(', 'a'))
|
('(', 'a'))
|
||||||
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
|
self.assertEqual(re.match(r'^(\()?([^()]+)(?(1)\))$', 'a').groups(),
|
||||||
(None, 'a'))
|
(None, 'a'))
|
||||||
self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'))
|
self.assertIsNone(re.match(r'^(\()?([^()]+)(?(1)\))$', 'a)'))
|
||||||
self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', '(a'))
|
self.assertIsNone(re.match(r'^(\()?([^()]+)(?(1)\))$', '(a'))
|
||||||
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
|
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
|
||||||
('a', 'b'))
|
('a', 'b'))
|
||||||
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
|
self.assertEqual(re.match(r'^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
|
||||||
(None, 'd'))
|
(None, 'd'))
|
||||||
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
|
self.assertEqual(re.match(r'^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
|
||||||
(None, 'd'))
|
(None, 'd'))
|
||||||
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
|
self.assertEqual(re.match(r'^(?:(a)|c)((?(1)|d))$', 'a').groups(),
|
||||||
('a', ''))
|
('a', ''))
|
||||||
|
|
||||||
# Tests for bug #1177831: exercise groups other than the first group
|
# Tests for bug #1177831: exercise groups other than the first group
|
||||||
|
@ -509,7 +509,7 @@ class ReTests(unittest.TestCase):
|
||||||
'two branches', 10)
|
'two branches', 10)
|
||||||
|
|
||||||
def test_re_groupref_overflow(self):
|
def test_re_groupref_overflow(self):
|
||||||
self.checkTemplateError('()', '\g<%s>' % sre_constants.MAXGROUPS, 'xx',
|
self.checkTemplateError('()', r'\g<%s>' % sre_constants.MAXGROUPS, 'xx',
|
||||||
'invalid group reference', 3)
|
'invalid group reference', 3)
|
||||||
self.checkPatternError(r'(?P<a>)(?(%d))' % sre_constants.MAXGROUPS,
|
self.checkPatternError(r'(?P<a>)(?(%d))' % sre_constants.MAXGROUPS,
|
||||||
'invalid group reference', 10)
|
'invalid group reference', 10)
|
||||||
|
@ -544,37 +544,37 @@ class ReTests(unittest.TestCase):
|
||||||
" ")
|
" ")
|
||||||
|
|
||||||
def test_repeat_minmax(self):
|
def test_repeat_minmax(self):
|
||||||
self.assertIsNone(re.match("^(\w){1}$", "abc"))
|
self.assertIsNone(re.match(r"^(\w){1}$", "abc"))
|
||||||
self.assertIsNone(re.match("^(\w){1}?$", "abc"))
|
self.assertIsNone(re.match(r"^(\w){1}?$", "abc"))
|
||||||
self.assertIsNone(re.match("^(\w){1,2}$", "abc"))
|
self.assertIsNone(re.match(r"^(\w){1,2}$", "abc"))
|
||||||
self.assertIsNone(re.match("^(\w){1,2}?$", "abc"))
|
self.assertIsNone(re.match(r"^(\w){1,2}?$", "abc"))
|
||||||
|
|
||||||
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
|
self.assertEqual(re.match(r"^(\w){3}$", "abc").group(1), "c")
|
||||||
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
|
self.assertEqual(re.match(r"^(\w){1,3}$", "abc").group(1), "c")
|
||||||
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
|
self.assertEqual(re.match(r"^(\w){1,4}$", "abc").group(1), "c")
|
||||||
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
|
self.assertEqual(re.match(r"^(\w){3,4}?$", "abc").group(1), "c")
|
||||||
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
|
self.assertEqual(re.match(r"^(\w){3}?$", "abc").group(1), "c")
|
||||||
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
|
self.assertEqual(re.match(r"^(\w){1,3}?$", "abc").group(1), "c")
|
||||||
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
|
self.assertEqual(re.match(r"^(\w){1,4}?$", "abc").group(1), "c")
|
||||||
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
|
self.assertEqual(re.match(r"^(\w){3,4}?$", "abc").group(1), "c")
|
||||||
|
|
||||||
self.assertIsNone(re.match("^x{1}$", "xxx"))
|
self.assertIsNone(re.match(r"^x{1}$", "xxx"))
|
||||||
self.assertIsNone(re.match("^x{1}?$", "xxx"))
|
self.assertIsNone(re.match(r"^x{1}?$", "xxx"))
|
||||||
self.assertIsNone(re.match("^x{1,2}$", "xxx"))
|
self.assertIsNone(re.match(r"^x{1,2}$", "xxx"))
|
||||||
self.assertIsNone(re.match("^x{1,2}?$", "xxx"))
|
self.assertIsNone(re.match(r"^x{1,2}?$", "xxx"))
|
||||||
|
|
||||||
self.assertTrue(re.match("^x{3}$", "xxx"))
|
self.assertTrue(re.match(r"^x{3}$", "xxx"))
|
||||||
self.assertTrue(re.match("^x{1,3}$", "xxx"))
|
self.assertTrue(re.match(r"^x{1,3}$", "xxx"))
|
||||||
self.assertTrue(re.match("^x{3,3}$", "xxx"))
|
self.assertTrue(re.match(r"^x{3,3}$", "xxx"))
|
||||||
self.assertTrue(re.match("^x{1,4}$", "xxx"))
|
self.assertTrue(re.match(r"^x{1,4}$", "xxx"))
|
||||||
self.assertTrue(re.match("^x{3,4}?$", "xxx"))
|
self.assertTrue(re.match(r"^x{3,4}?$", "xxx"))
|
||||||
self.assertTrue(re.match("^x{3}?$", "xxx"))
|
self.assertTrue(re.match(r"^x{3}?$", "xxx"))
|
||||||
self.assertTrue(re.match("^x{1,3}?$", "xxx"))
|
self.assertTrue(re.match(r"^x{1,3}?$", "xxx"))
|
||||||
self.assertTrue(re.match("^x{1,4}?$", "xxx"))
|
self.assertTrue(re.match(r"^x{1,4}?$", "xxx"))
|
||||||
self.assertTrue(re.match("^x{3,4}?$", "xxx"))
|
self.assertTrue(re.match(r"^x{3,4}?$", "xxx"))
|
||||||
|
|
||||||
self.assertIsNone(re.match("^x{}$", "xxx"))
|
self.assertIsNone(re.match(r"^x{}$", "xxx"))
|
||||||
self.assertTrue(re.match("^x{}$", "x{}"))
|
self.assertTrue(re.match(r"^x{}$", "x{}"))
|
||||||
|
|
||||||
self.checkPatternError(r'x{2,1}',
|
self.checkPatternError(r'x{2,1}',
|
||||||
'min repeat greater than max repeat', 2)
|
'min repeat greater than max repeat', 2)
|
||||||
|
@ -697,10 +697,10 @@ class ReTests(unittest.TestCase):
|
||||||
"a\n\nb")
|
"a\n\nb")
|
||||||
|
|
||||||
def test_lookahead(self):
|
def test_lookahead(self):
|
||||||
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
|
self.assertEqual(re.match(r"(a(?=\s[^a]))", "a b").group(1), "a")
|
||||||
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
|
self.assertEqual(re.match(r"(a(?=\s[^a]*))", "a b").group(1), "a")
|
||||||
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
|
self.assertEqual(re.match(r"(a(?=\s[abc]))", "a b").group(1), "a")
|
||||||
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
|
self.assertEqual(re.match(r"(a(?=\s[abc]*))", "a bc").group(1), "a")
|
||||||
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
|
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
|
||||||
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
|
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
|
||||||
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
|
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
|
||||||
|
@ -848,12 +848,12 @@ class ReTests(unittest.TestCase):
|
||||||
self.assertEqual(re.match(b"abc", b"ABC", re.I|re.L).group(0), b"ABC")
|
self.assertEqual(re.match(b"abc", b"ABC", re.I|re.L).group(0), b"ABC")
|
||||||
|
|
||||||
def test_not_literal(self):
|
def test_not_literal(self):
|
||||||
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
|
self.assertEqual(re.search(r"\s([^a])", " b").group(1), "b")
|
||||||
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
|
self.assertEqual(re.search(r"\s([^a]*)", " bb").group(1), "bb")
|
||||||
|
|
||||||
def test_search_coverage(self):
|
def test_search_coverage(self):
|
||||||
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
|
self.assertEqual(re.search(r"\s(b)", " b").group(1), "b")
|
||||||
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
|
self.assertEqual(re.search(r"a\s", "a ").group(0), "a ")
|
||||||
|
|
||||||
def assertMatch(self, pattern, text, match=None, span=None,
|
def assertMatch(self, pattern, text, match=None, span=None,
|
||||||
matcher=re.match):
|
matcher=re.match):
|
||||||
|
@ -1055,8 +1055,8 @@ class ReTests(unittest.TestCase):
|
||||||
self.assertIsNone(re.match(r'(a)?a','a').lastindex)
|
self.assertIsNone(re.match(r'(a)?a','a').lastindex)
|
||||||
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
|
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
|
||||||
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
|
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
|
||||||
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
|
self.assertEqual(re.match(r"(?P<a>a(b))", "ab").lastgroup, 'a')
|
||||||
self.assertEqual(re.match("((a))", "a").lastindex, 1)
|
self.assertEqual(re.match(r"((a))", "a").lastindex, 1)
|
||||||
|
|
||||||
def test_bug_418626(self):
|
def test_bug_418626(self):
|
||||||
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
|
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
|
||||||
|
@ -1228,7 +1228,7 @@ class ReTests(unittest.TestCase):
|
||||||
'\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd'
|
'\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd'
|
||||||
]
|
]
|
||||||
for x in decimal_digits:
|
for x in decimal_digits:
|
||||||
self.assertEqual(re.match('^\d$', x).group(0), x)
|
self.assertEqual(re.match(r'^\d$', x).group(0), x)
|
||||||
|
|
||||||
not_decimal_digits = [
|
not_decimal_digits = [
|
||||||
'\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl'
|
'\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl'
|
||||||
|
@ -1237,7 +1237,7 @@ class ReTests(unittest.TestCase):
|
||||||
'\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No'
|
'\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No'
|
||||||
]
|
]
|
||||||
for x in not_decimal_digits:
|
for x in not_decimal_digits:
|
||||||
self.assertIsNone(re.match('^\d$', x))
|
self.assertIsNone(re.match(r'^\d$', x))
|
||||||
|
|
||||||
def test_empty_array(self):
|
def test_empty_array(self):
|
||||||
# SF buf 1647541
|
# SF buf 1647541
|
||||||
|
@ -1306,29 +1306,29 @@ class ReTests(unittest.TestCase):
|
||||||
for flags in (0, re.UNICODE):
|
for flags in (0, re.UNICODE):
|
||||||
pat = re.compile('\xc0', flags | re.IGNORECASE)
|
pat = re.compile('\xc0', flags | re.IGNORECASE)
|
||||||
self.assertTrue(pat.match('\xe0'))
|
self.assertTrue(pat.match('\xe0'))
|
||||||
pat = re.compile('\w', flags)
|
pat = re.compile(r'\w', flags)
|
||||||
self.assertTrue(pat.match('\xe0'))
|
self.assertTrue(pat.match('\xe0'))
|
||||||
pat = re.compile('\xc0', re.ASCII | re.IGNORECASE)
|
pat = re.compile('\xc0', re.ASCII | re.IGNORECASE)
|
||||||
self.assertIsNone(pat.match('\xe0'))
|
self.assertIsNone(pat.match('\xe0'))
|
||||||
pat = re.compile('(?a)\xc0', re.IGNORECASE)
|
pat = re.compile('(?a)\xc0', re.IGNORECASE)
|
||||||
self.assertIsNone(pat.match('\xe0'))
|
self.assertIsNone(pat.match('\xe0'))
|
||||||
pat = re.compile('\w', re.ASCII)
|
pat = re.compile(r'\w', re.ASCII)
|
||||||
self.assertIsNone(pat.match('\xe0'))
|
self.assertIsNone(pat.match('\xe0'))
|
||||||
pat = re.compile('(?a)\w')
|
pat = re.compile(r'(?a)\w')
|
||||||
self.assertIsNone(pat.match('\xe0'))
|
self.assertIsNone(pat.match('\xe0'))
|
||||||
# Bytes patterns
|
# Bytes patterns
|
||||||
for flags in (0, re.ASCII):
|
for flags in (0, re.ASCII):
|
||||||
pat = re.compile(b'\xc0', flags | re.IGNORECASE)
|
pat = re.compile(b'\xc0', flags | re.IGNORECASE)
|
||||||
self.assertIsNone(pat.match(b'\xe0'))
|
self.assertIsNone(pat.match(b'\xe0'))
|
||||||
pat = re.compile(b'\w', flags)
|
pat = re.compile(br'\w', flags)
|
||||||
self.assertIsNone(pat.match(b'\xe0'))
|
self.assertIsNone(pat.match(b'\xe0'))
|
||||||
# Incompatibilities
|
# Incompatibilities
|
||||||
self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE)
|
self.assertRaises(ValueError, re.compile, br'\w', re.UNICODE)
|
||||||
self.assertRaises(ValueError, re.compile, b'(?u)\w')
|
self.assertRaises(ValueError, re.compile, br'(?u)\w')
|
||||||
self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII)
|
self.assertRaises(ValueError, re.compile, r'\w', re.UNICODE | re.ASCII)
|
||||||
self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII)
|
self.assertRaises(ValueError, re.compile, r'(?u)\w', re.ASCII)
|
||||||
self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE)
|
self.assertRaises(ValueError, re.compile, r'(?a)\w', re.UNICODE)
|
||||||
self.assertRaises(ValueError, re.compile, '(?au)\w')
|
self.assertRaises(ValueError, re.compile, r'(?au)\w')
|
||||||
|
|
||||||
def test_locale_flag(self):
|
def test_locale_flag(self):
|
||||||
import locale
|
import locale
|
||||||
|
@ -1359,13 +1359,13 @@ class ReTests(unittest.TestCase):
|
||||||
pat = re.compile(bpat, re.IGNORECASE)
|
pat = re.compile(bpat, re.IGNORECASE)
|
||||||
if bletter:
|
if bletter:
|
||||||
self.assertIsNone(pat.match(bletter))
|
self.assertIsNone(pat.match(bletter))
|
||||||
pat = re.compile(b'\w', re.LOCALE)
|
pat = re.compile(br'\w', re.LOCALE)
|
||||||
if bletter:
|
if bletter:
|
||||||
self.assertTrue(pat.match(bletter))
|
self.assertTrue(pat.match(bletter))
|
||||||
pat = re.compile(b'(?L)\w')
|
pat = re.compile(br'(?L)\w')
|
||||||
if bletter:
|
if bletter:
|
||||||
self.assertTrue(pat.match(bletter))
|
self.assertTrue(pat.match(bletter))
|
||||||
pat = re.compile(b'\w')
|
pat = re.compile(br'\w')
|
||||||
if bletter:
|
if bletter:
|
||||||
self.assertIsNone(pat.match(bletter))
|
self.assertIsNone(pat.match(bletter))
|
||||||
# Incompatibilities
|
# Incompatibilities
|
||||||
|
@ -1379,7 +1379,7 @@ class ReTests(unittest.TestCase):
|
||||||
def test_bug_6509(self):
|
def test_bug_6509(self):
|
||||||
# Replacement strings of both types must parse properly.
|
# Replacement strings of both types must parse properly.
|
||||||
# all strings
|
# all strings
|
||||||
pat = re.compile('a(\w)')
|
pat = re.compile(r'a(\w)')
|
||||||
self.assertEqual(pat.sub('b\\1', 'ac'), 'bc')
|
self.assertEqual(pat.sub('b\\1', 'ac'), 'bc')
|
||||||
pat = re.compile('a(.)')
|
pat = re.compile('a(.)')
|
||||||
self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234')
|
self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234')
|
||||||
|
@ -1387,7 +1387,7 @@ class ReTests(unittest.TestCase):
|
||||||
self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str')
|
self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str')
|
||||||
|
|
||||||
# all bytes
|
# all bytes
|
||||||
pat = re.compile(b'a(\w)')
|
pat = re.compile(br'a(\w)')
|
||||||
self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc')
|
self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc')
|
||||||
pat = re.compile(b'a(.)')
|
pat = re.compile(b'a(.)')
|
||||||
self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD')
|
self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD')
|
||||||
|
@ -1509,7 +1509,7 @@ class ReTests(unittest.TestCase):
|
||||||
for string in (b'[abracadabra]', B(b'[abracadabra]'),
|
for string in (b'[abracadabra]', B(b'[abracadabra]'),
|
||||||
bytearray(b'[abracadabra]'),
|
bytearray(b'[abracadabra]'),
|
||||||
memoryview(b'[abracadabra]')):
|
memoryview(b'[abracadabra]')):
|
||||||
m = re.search(rb'(.+)(.*?)\1', string)
|
m = re.search(br'(.+)(.*?)\1', string)
|
||||||
self.assertEqual(repr(m), "<%s.%s object; "
|
self.assertEqual(repr(m), "<%s.%s object; "
|
||||||
"span=(1, 12), match=b'abracadabra'>" %
|
"span=(1, 12), match=b'abracadabra'>" %
|
||||||
(type(m).__module__, type(m).__qualname__))
|
(type(m).__module__, type(m).__qualname__))
|
||||||
|
|
|
@ -704,8 +704,8 @@ class ArgsTestCase(BaseTestCase):
|
||||||
test = self.create_test('coverage')
|
test = self.create_test('coverage')
|
||||||
output = self.run_tests("--coverage", test)
|
output = self.run_tests("--coverage", test)
|
||||||
self.check_executed_tests(output, [test])
|
self.check_executed_tests(output, [test])
|
||||||
regex = ('lines +cov% +module +\(path\)\n'
|
regex = (r'lines +cov% +module +\(path\)\n'
|
||||||
'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
|
r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
|
|
||||||
def test_wait(self):
|
def test_wait(self):
|
||||||
|
|
|
@ -3405,7 +3405,7 @@ def test_main(verbose=False):
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.filterwarnings(
|
warnings.filterwarnings(
|
||||||
'ignore',
|
'ignore',
|
||||||
'dist\(\) and linux_distribution\(\) '
|
r'dist\(\) and linux_distribution\(\) '
|
||||||
'functions are deprecated .*',
|
'functions are deprecated .*',
|
||||||
PendingDeprecationWarning,
|
PendingDeprecationWarning,
|
||||||
)
|
)
|
||||||
|
|
|
@ -23,9 +23,9 @@ def escapestr(text, ampm):
|
||||||
"""
|
"""
|
||||||
new_text = re.escape(text)
|
new_text = re.escape(text)
|
||||||
new_text = new_text.replace(re.escape(ampm), ampm)
|
new_text = new_text.replace(re.escape(ampm), ampm)
|
||||||
new_text = new_text.replace('\%', '%')
|
new_text = new_text.replace(r'\%', '%')
|
||||||
new_text = new_text.replace('\:', ':')
|
new_text = new_text.replace(r'\:', ':')
|
||||||
new_text = new_text.replace('\?', '?')
|
new_text = new_text.replace(r'\?', '?')
|
||||||
return new_text
|
return new_text
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -121,9 +121,9 @@ class TestLiterals(unittest.TestCase):
|
||||||
self.assertEqual(eval(""" b'\x01' """), byte(1))
|
self.assertEqual(eval(""" b'\x01' """), byte(1))
|
||||||
self.assertEqual(eval(r""" b'\x81' """), byte(0x81))
|
self.assertEqual(eval(r""" b'\x81' """), byte(0x81))
|
||||||
self.assertRaises(SyntaxError, eval, """ b'\x81' """)
|
self.assertRaises(SyntaxError, eval, """ b'\x81' """)
|
||||||
self.assertEqual(eval(r""" b'\u1881' """), b'\\' + b'u1881')
|
self.assertEqual(eval(r""" br'\u1881' """), b'\\' + b'u1881')
|
||||||
self.assertRaises(SyntaxError, eval, """ b'\u1881' """)
|
self.assertRaises(SyntaxError, eval, """ b'\u1881' """)
|
||||||
self.assertEqual(eval(r""" b'\U0001d120' """), b'\\' + b'U0001d120')
|
self.assertEqual(eval(r""" br'\U0001d120' """), b'\\' + b'U0001d120')
|
||||||
self.assertRaises(SyntaxError, eval, """ b'\U0001d120' """)
|
self.assertRaises(SyntaxError, eval, """ b'\U0001d120' """)
|
||||||
|
|
||||||
def test_eval_bytes_incomplete(self):
|
def test_eval_bytes_incomplete(self):
|
||||||
|
|
|
@ -129,7 +129,7 @@ class TimeRETests(unittest.TestCase):
|
||||||
def test_pattern_escaping(self):
|
def test_pattern_escaping(self):
|
||||||
# Make sure any characters in the format string that might be taken as
|
# Make sure any characters in the format string that might be taken as
|
||||||
# regex syntax is escaped.
|
# regex syntax is escaped.
|
||||||
pattern_string = self.time_re.pattern("\d+")
|
pattern_string = self.time_re.pattern(r"\d+")
|
||||||
self.assertIn(r"\\d\+", pattern_string,
|
self.assertIn(r"\\d\+", pattern_string,
|
||||||
"%s does not have re characters escaped properly" %
|
"%s does not have re characters escaped properly" %
|
||||||
pattern_string)
|
pattern_string)
|
||||||
|
@ -170,9 +170,9 @@ class TimeRETests(unittest.TestCase):
|
||||||
|
|
||||||
def test_matching_with_escapes(self):
|
def test_matching_with_escapes(self):
|
||||||
# Make sure a format that requires escaping of characters works
|
# Make sure a format that requires escaping of characters works
|
||||||
compiled_re = self.time_re.compile("\w+ %m")
|
compiled_re = self.time_re.compile(r"\w+ %m")
|
||||||
found = compiled_re.match("\w+ 10")
|
found = compiled_re.match(r"\w+ 10")
|
||||||
self.assertTrue(found, "Escaping failed of format '\w+ 10'")
|
self.assertTrue(found, r"Escaping failed of format '\w+ 10'")
|
||||||
|
|
||||||
def test_locale_data_w_regex_metacharacters(self):
|
def test_locale_data_w_regex_metacharacters(self):
|
||||||
# Check that if locale data contains regex metacharacters they are
|
# Check that if locale data contains regex metacharacters they are
|
||||||
|
@ -403,7 +403,7 @@ class StrptimeTests(unittest.TestCase):
|
||||||
# unbalanced parentheses when the regex is compiled if they are not
|
# unbalanced parentheses when the regex is compiled if they are not
|
||||||
# escaped.
|
# escaped.
|
||||||
# Test instigated by bug #796149 .
|
# Test instigated by bug #796149 .
|
||||||
need_escaping = ".^$*+?{}\[]|)("
|
need_escaping = r".^$*+?{}\[]|)("
|
||||||
self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping))
|
self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping))
|
||||||
|
|
||||||
def test_feb29_on_leap_year_without_year(self):
|
def test_feb29_on_leap_year_without_year(self):
|
||||||
|
|
|
@ -1564,7 +1564,7 @@ class UnicodeTest(string_tests.CommonTest,
|
||||||
('+', b'+-'),
|
('+', b'+-'),
|
||||||
('+-', b'+--'),
|
('+-', b'+--'),
|
||||||
('+?', b'+-?'),
|
('+?', b'+-?'),
|
||||||
('\?', b'+AFw?'),
|
(r'\?', b'+AFw?'),
|
||||||
('+?', b'+-?'),
|
('+?', b'+-?'),
|
||||||
(r'\\?', b'+AFwAXA?'),
|
(r'\\?', b'+AFwAXA?'),
|
||||||
(r'\\\?', b'+AFwAXABc?'),
|
(r'\\\?', b'+AFwAXABc?'),
|
||||||
|
@ -2326,7 +2326,7 @@ class UnicodeTest(string_tests.CommonTest,
|
||||||
# non-ascii format, ascii argument: ensure that PyUnicode_FromFormatV()
|
# non-ascii format, ascii argument: ensure that PyUnicode_FromFormatV()
|
||||||
# raises an error
|
# raises an error
|
||||||
self.assertRaisesRegex(ValueError,
|
self.assertRaisesRegex(ValueError,
|
||||||
'^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format '
|
r'^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format '
|
||||||
'string, got a non-ASCII byte: 0xe9$',
|
'string, got a non-ASCII byte: 0xe9$',
|
||||||
PyUnicode_FromFormat, b'unicode\xe9=%s', 'ascii')
|
PyUnicode_FromFormat, b'unicode\xe9=%s', 'ascii')
|
||||||
|
|
||||||
|
|
|
@ -729,7 +729,7 @@ FF
|
||||||
|
|
||||||
|
|
||||||
class QuotingTests(unittest.TestCase):
|
class QuotingTests(unittest.TestCase):
|
||||||
"""Tests for urllib.quote() and urllib.quote_plus()
|
r"""Tests for urllib.quote() and urllib.quote_plus()
|
||||||
|
|
||||||
According to RFC 2396 (Uniform Resource Identifiers), to escape a
|
According to RFC 2396 (Uniform Resource Identifiers), to escape a
|
||||||
character you write it as '%' + <2 character US-ASCII hex value>.
|
character you write it as '%' + <2 character US-ASCII hex value>.
|
||||||
|
@ -804,7 +804,7 @@ class QuotingTests(unittest.TestCase):
|
||||||
# Make sure all characters that should be quoted are by default sans
|
# Make sure all characters that should be quoted are by default sans
|
||||||
# space (separate test for that).
|
# space (separate test for that).
|
||||||
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
|
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
|
||||||
should_quote.append('<>#%"{}|\^[]`')
|
should_quote.append(r'<>#%"{}|\^[]`')
|
||||||
should_quote.append(chr(127)) # For 0x7F
|
should_quote.append(chr(127)) # For 0x7F
|
||||||
should_quote = ''.join(should_quote)
|
should_quote = ''.join(should_quote)
|
||||||
for char in should_quote:
|
for char in should_quote:
|
||||||
|
|
|
@ -1218,7 +1218,7 @@ class CGIHandlerTestCase(unittest.TestCase):
|
||||||
content = handle[handle.find("<?xml"):]
|
content = handle[handle.find("<?xml"):]
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
int(re.search('Content-Length: (\d+)', handle).group(1)),
|
int(re.search(r'Content-Length: (\d+)', handle).group(1)),
|
||||||
len(content))
|
len(content))
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -167,7 +167,7 @@ class TextTestRunner(object):
|
||||||
if self.warnings in ['default', 'always']:
|
if self.warnings in ['default', 'always']:
|
||||||
warnings.filterwarnings('module',
|
warnings.filterwarnings('module',
|
||||||
category=DeprecationWarning,
|
category=DeprecationWarning,
|
||||||
message='Please use assert\w+ instead.')
|
message=r'Please use assert\w+ instead.')
|
||||||
startTime = time.time()
|
startTime = time.time()
|
||||||
startTestRun = getattr(result, 'startTestRun', None)
|
startTestRun = getattr(result, 'startTestRun', None)
|
||||||
if startTestRun is not None:
|
if startTestRun is not None:
|
||||||
|
|
|
@ -240,7 +240,7 @@ class TestLongMessage(unittest.TestCase):
|
||||||
# Error messages are multiline so not testing on full message
|
# Error messages are multiline so not testing on full message
|
||||||
# assertTupleEqual and assertListEqual delegate to this method
|
# assertTupleEqual and assertListEqual delegate to this method
|
||||||
self.assertMessages('assertSequenceEqual', ([], [None]),
|
self.assertMessages('assertSequenceEqual', ([], [None]),
|
||||||
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
|
[r"\+ \[None\]$", "^oops$", r"\+ \[None\]$",
|
||||||
r"\+ \[None\] : oops$"])
|
r"\+ \[None\] : oops$"])
|
||||||
|
|
||||||
def testAssertSetEqual(self):
|
def testAssertSetEqual(self):
|
||||||
|
@ -250,21 +250,21 @@ class TestLongMessage(unittest.TestCase):
|
||||||
|
|
||||||
def testAssertIn(self):
|
def testAssertIn(self):
|
||||||
self.assertMessages('assertIn', (None, []),
|
self.assertMessages('assertIn', (None, []),
|
||||||
['^None not found in \[\]$', "^oops$",
|
[r'^None not found in \[\]$', "^oops$",
|
||||||
'^None not found in \[\]$',
|
r'^None not found in \[\]$',
|
||||||
'^None not found in \[\] : oops$'])
|
r'^None not found in \[\] : oops$'])
|
||||||
|
|
||||||
def testAssertNotIn(self):
|
def testAssertNotIn(self):
|
||||||
self.assertMessages('assertNotIn', (None, [None]),
|
self.assertMessages('assertNotIn', (None, [None]),
|
||||||
['^None unexpectedly found in \[None\]$', "^oops$",
|
[r'^None unexpectedly found in \[None\]$', "^oops$",
|
||||||
'^None unexpectedly found in \[None\]$',
|
r'^None unexpectedly found in \[None\]$',
|
||||||
'^None unexpectedly found in \[None\] : oops$'])
|
r'^None unexpectedly found in \[None\] : oops$'])
|
||||||
|
|
||||||
def testAssertDictEqual(self):
|
def testAssertDictEqual(self):
|
||||||
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
|
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
|
||||||
[r"\+ \{'key': 'value'\}$", "^oops$",
|
[r"\+ \{'key': 'value'\}$", "^oops$",
|
||||||
"\+ \{'key': 'value'\}$",
|
r"\+ \{'key': 'value'\}$",
|
||||||
"\+ \{'key': 'value'\} : oops$"])
|
r"\+ \{'key': 'value'\} : oops$"])
|
||||||
|
|
||||||
def testAssertDictContainsSubset(self):
|
def testAssertDictContainsSubset(self):
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
|
|
|
@ -390,7 +390,7 @@ class Test_TestLoader(unittest.TestCase):
|
||||||
suite = loader.loadTestsFromName('abc () //')
|
suite = loader.loadTestsFromName('abc () //')
|
||||||
error, test = self.check_deferred_error(loader, suite)
|
error, test = self.check_deferred_error(loader, suite)
|
||||||
expected = "Failed to import test module: abc () //"
|
expected = "Failed to import test module: abc () //"
|
||||||
expected_regex = "Failed to import test module: abc \(\) //"
|
expected_regex = r"Failed to import test module: abc \(\) //"
|
||||||
self.assertIn(
|
self.assertIn(
|
||||||
expected, error,
|
expected, error,
|
||||||
'missing error string in %r' % error)
|
'missing error string in %r' % error)
|
||||||
|
@ -502,7 +502,7 @@ class Test_TestLoader(unittest.TestCase):
|
||||||
suite = loader.loadTestsFromName('abc () //', unittest)
|
suite = loader.loadTestsFromName('abc () //', unittest)
|
||||||
error, test = self.check_deferred_error(loader, suite)
|
error, test = self.check_deferred_error(loader, suite)
|
||||||
expected = "module 'unittest' has no attribute 'abc () //'"
|
expected = "module 'unittest' has no attribute 'abc () //'"
|
||||||
expected_regex = "module 'unittest' has no attribute 'abc \(\) //'"
|
expected_regex = r"module 'unittest' has no attribute 'abc \(\) //'"
|
||||||
self.assertIn(
|
self.assertIn(
|
||||||
expected, error,
|
expected, error,
|
||||||
'missing error string in %r' % error)
|
'missing error string in %r' % error)
|
||||||
|
@ -809,7 +809,7 @@ class Test_TestLoader(unittest.TestCase):
|
||||||
suite = loader.loadTestsFromNames(['abc () //'])
|
suite = loader.loadTestsFromNames(['abc () //'])
|
||||||
error, test = self.check_deferred_error(loader, list(suite)[0])
|
error, test = self.check_deferred_error(loader, list(suite)[0])
|
||||||
expected = "Failed to import test module: abc () //"
|
expected = "Failed to import test module: abc () //"
|
||||||
expected_regex = "Failed to import test module: abc \(\) //"
|
expected_regex = r"Failed to import test module: abc \(\) //"
|
||||||
self.assertIn(
|
self.assertIn(
|
||||||
expected, error,
|
expected, error,
|
||||||
'missing error string in %r' % error)
|
'missing error string in %r' % error)
|
||||||
|
@ -928,7 +928,7 @@ class Test_TestLoader(unittest.TestCase):
|
||||||
suite = loader.loadTestsFromNames(['abc () //'], unittest)
|
suite = loader.loadTestsFromNames(['abc () //'], unittest)
|
||||||
error, test = self.check_deferred_error(loader, list(suite)[0])
|
error, test = self.check_deferred_error(loader, list(suite)[0])
|
||||||
expected = "module 'unittest' has no attribute 'abc () //'"
|
expected = "module 'unittest' has no attribute 'abc () //'"
|
||||||
expected_regex = "module 'unittest' has no attribute 'abc \(\) //'"
|
expected_regex = r"module 'unittest' has no attribute 'abc \(\) //'"
|
||||||
self.assertIn(
|
self.assertIn(
|
||||||
expected, error,
|
expected, error,
|
||||||
'missing error string in %r' % error)
|
'missing error string in %r' % error)
|
||||||
|
|
|
@ -59,15 +59,15 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
xpath_tokenizer_re = re.compile(
|
xpath_tokenizer_re = re.compile(
|
||||||
"("
|
r"("
|
||||||
"'[^']*'|\"[^\"]*\"|"
|
r"'[^']*'|\"[^\"]*\"|"
|
||||||
"::|"
|
r"::|"
|
||||||
"//?|"
|
r"//?|"
|
||||||
"\.\.|"
|
r"\.\.|"
|
||||||
"\(\)|"
|
r"\(\)|"
|
||||||
"[/.*:\[\]\(\)@=])|"
|
r"[/.*:\[\]\(\)@=])|"
|
||||||
"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
|
r"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
|
||||||
"\s+"
|
r"\s+"
|
||||||
)
|
)
|
||||||
|
|
||||||
def xpath_tokenizer(pattern, namespaces=None):
|
def xpath_tokenizer(pattern, namespaces=None):
|
||||||
|
@ -180,7 +180,7 @@ def prepare_predicate(next, token):
|
||||||
if elem.get(key) == value:
|
if elem.get(key) == value:
|
||||||
yield elem
|
yield elem
|
||||||
return select
|
return select
|
||||||
if signature == "-" and not re.match("\-?\d+$", predicate[0]):
|
if signature == "-" and not re.match(r"\-?\d+$", predicate[0]):
|
||||||
# [tag]
|
# [tag]
|
||||||
tag = predicate[0]
|
tag = predicate[0]
|
||||||
def select(context, result):
|
def select(context, result):
|
||||||
|
@ -188,7 +188,7 @@ def prepare_predicate(next, token):
|
||||||
if elem.find(tag) is not None:
|
if elem.find(tag) is not None:
|
||||||
yield elem
|
yield elem
|
||||||
return select
|
return select
|
||||||
if signature == "-='" and not re.match("\-?\d+$", predicate[0]):
|
if signature == "-='" and not re.match(r"\-?\d+$", predicate[0]):
|
||||||
# [tag='value']
|
# [tag='value']
|
||||||
tag = predicate[0]
|
tag = predicate[0]
|
||||||
value = predicate[-1]
|
value = predicate[-1]
|
||||||
|
|
|
@ -1030,7 +1030,7 @@ def register_namespace(prefix, uri):
|
||||||
ValueError is raised if prefix is reserved or is invalid.
|
ValueError is raised if prefix is reserved or is invalid.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if re.match("ns\d+$", prefix):
|
if re.match(r"ns\d+$", prefix):
|
||||||
raise ValueError("Prefix format reserved for internal use")
|
raise ValueError("Prefix format reserved for internal use")
|
||||||
for k, v in list(_namespace_map.items()):
|
for k, v in list(_namespace_map.items()):
|
||||||
if k == uri or v == prefix:
|
if k == uri or v == prefix:
|
||||||
|
|
|
@ -1222,7 +1222,7 @@ def OverrideStdioWith(stdout):
|
||||||
|
|
||||||
def create_regex(before, after, word=True, whole_line=True):
|
def create_regex(before, after, word=True, whole_line=True):
|
||||||
"""Create an re object for matching marker lines."""
|
"""Create an re object for matching marker lines."""
|
||||||
group_re = "\w+" if word else ".+"
|
group_re = r"\w+" if word else ".+"
|
||||||
pattern = r'{}({}){}'
|
pattern = r'{}({}){}'
|
||||||
if whole_line:
|
if whole_line:
|
||||||
pattern = '^' + pattern + '$'
|
pattern = '^' + pattern + '$'
|
||||||
|
|
|
@ -413,7 +413,7 @@ class FormulaCell(BaseCell):
|
||||||
|
|
||||||
def renumber(self, x1, y1, x2, y2, dx, dy):
|
def renumber(self, x1, y1, x2, y2, dx, dy):
|
||||||
out = []
|
out = []
|
||||||
for part in re.split('(\w+)', self.formula):
|
for part in re.split(r'(\w+)', self.formula):
|
||||||
m = re.match('^([A-Z]+)([1-9][0-9]*)$', part)
|
m = re.match('^([A-Z]+)([1-9][0-9]*)$', part)
|
||||||
if m is not None:
|
if m is not None:
|
||||||
sx, sy = m.groups()
|
sx, sy = m.groups()
|
||||||
|
|
|
@ -95,7 +95,7 @@ def realwork(vars, moddefns, target):
|
||||||
print()
|
print()
|
||||||
|
|
||||||
print('$(temp_dir):')
|
print('$(temp_dir):')
|
||||||
print(' if not exist $(temp_dir)\. mkdir $(temp_dir)')
|
print(r' if not exist $(temp_dir)\. mkdir $(temp_dir)')
|
||||||
print()
|
print()
|
||||||
|
|
||||||
objects = []
|
objects = []
|
||||||
|
@ -106,7 +106,7 @@ def realwork(vars, moddefns, target):
|
||||||
base = os.path.basename(file)
|
base = os.path.basename(file)
|
||||||
base, ext = os.path.splitext(base)
|
base, ext = os.path.splitext(base)
|
||||||
objects.append(base + ".obj")
|
objects.append(base + ".obj")
|
||||||
print('$(temp_dir)\%s.obj: "%s"' % (base, file))
|
print(r'$(temp_dir)\%s.obj: "%s"' % (base, file))
|
||||||
print("\t@$(CC) -c -nologo /Fo$* $(cdl) $(c_debug) /D BUILD_FREEZE", end=' ')
|
print("\t@$(CC) -c -nologo /Fo$* $(cdl) $(c_debug) /D BUILD_FREEZE", end=' ')
|
||||||
print('"-I$(pythonhome)/Include" "-I$(pythonhome)/PC" \\')
|
print('"-I$(pythonhome)/Include" "-I$(pythonhome)/PC" \\')
|
||||||
print("\t\t$(cflags) $(cdebug) $(cinclude) \\")
|
print("\t\t$(cflags) $(cdebug) $(cinclude) \\")
|
||||||
|
@ -126,7 +126,7 @@ def realwork(vars, moddefns, target):
|
||||||
print() ; print()
|
print() ; print()
|
||||||
|
|
||||||
print("OBJS=", end=' ')
|
print("OBJS=", end=' ')
|
||||||
for obj in objects: print('"$(temp_dir)\%s"' % (obj), end=' ')
|
for obj in objects: print(r'"$(temp_dir)\%s"' % (obj), end=' ')
|
||||||
print() ; print()
|
print() ; print()
|
||||||
|
|
||||||
print("LIBS=", end=' ')
|
print("LIBS=", end=' ')
|
||||||
|
|
|
@ -99,8 +99,8 @@ def option_dict(options):
|
||||||
# Alias
|
# Alias
|
||||||
getpasswd = invisible_input
|
getpasswd = invisible_input
|
||||||
|
|
||||||
_integerRE = re.compile('\s*(-?\d+)\s*$')
|
_integerRE = re.compile(r'\s*(-?\d+)\s*$')
|
||||||
_integerRangeRE = re.compile('\s*(-?\d+)\s*-\s*(-?\d+)\s*$')
|
_integerRangeRE = re.compile(r'\s*(-?\d+)\s*-\s*(-?\d+)\s*$')
|
||||||
|
|
||||||
def srange(s,
|
def srange(s,
|
||||||
|
|
||||||
|
|
|
@ -134,17 +134,17 @@ class ColorDB:
|
||||||
|
|
||||||
class RGBColorDB(ColorDB):
|
class RGBColorDB(ColorDB):
|
||||||
_re = re.compile(
|
_re = re.compile(
|
||||||
'\s*(?P<red>\d+)\s+(?P<green>\d+)\s+(?P<blue>\d+)\s+(?P<name>.*)')
|
r'\s*(?P<red>\d+)\s+(?P<green>\d+)\s+(?P<blue>\d+)\s+(?P<name>.*)')
|
||||||
|
|
||||||
|
|
||||||
class HTML40DB(ColorDB):
|
class HTML40DB(ColorDB):
|
||||||
_re = re.compile('(?P<name>\S+)\s+(?P<hexrgb>#[0-9a-fA-F]{6})')
|
_re = re.compile(r'(?P<name>\S+)\s+(?P<hexrgb>#[0-9a-fA-F]{6})')
|
||||||
|
|
||||||
def _extractrgb(self, mo):
|
def _extractrgb(self, mo):
|
||||||
return rrggbb_to_triplet(mo.group('hexrgb'))
|
return rrggbb_to_triplet(mo.group('hexrgb'))
|
||||||
|
|
||||||
class LightlinkDB(HTML40DB):
|
class LightlinkDB(HTML40DB):
|
||||||
_re = re.compile('(?P<name>(.+))\s+(?P<hexrgb>#[0-9a-fA-F]{6})')
|
_re = re.compile(r'(?P<name>(.+))\s+(?P<hexrgb>#[0-9a-fA-F]{6})')
|
||||||
|
|
||||||
def _extractname(self, mo):
|
def _extractname(self, mo):
|
||||||
return mo.group('name').strip()
|
return mo.group('name').strip()
|
||||||
|
|
|
@ -174,8 +174,8 @@ def usage(msg):
|
||||||
sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
|
sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
|
||||||
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
|
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
|
||||||
|
|
||||||
PATTERN = ("^(.+?):(\d+): DeprecationWarning: "
|
PATTERN = (r"^(.+?):(\d+): DeprecationWarning: "
|
||||||
"classic (int|long|float|complex) division$")
|
r"classic (int|long|float|complex) division$")
|
||||||
|
|
||||||
def readwarnings(warningsfile):
|
def readwarnings(warningsfile):
|
||||||
prog = re.compile(PATTERN)
|
prog = re.compile(PATTERN)
|
||||||
|
|
|
@ -23,13 +23,13 @@
|
||||||
|
|
||||||
import sys, re, getopt, os
|
import sys, re, getopt, os
|
||||||
|
|
||||||
p_define = re.compile('^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+')
|
p_define = re.compile(r'^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+')
|
||||||
|
|
||||||
p_macro = re.compile(
|
p_macro = re.compile(
|
||||||
'^[\t ]*#[\t ]*define[\t ]+'
|
r'^[\t ]*#[\t ]*define[\t ]+'
|
||||||
'([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+')
|
r'([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+')
|
||||||
|
|
||||||
p_include = re.compile('^[\t ]*#[\t ]*include[\t ]+<([^>\n]+)>')
|
p_include = re.compile(r'^[\t ]*#[\t ]*include[\t ]+<([^>\n]+)>')
|
||||||
|
|
||||||
p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
|
p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
|
||||||
p_cpp_comment = re.compile('//.*')
|
p_cpp_comment = re.compile('//.*')
|
||||||
|
|
|
@ -147,14 +147,14 @@ def build_html_page(classified_text, title='python',
|
||||||
#### LaTeX Output ##########################################
|
#### LaTeX Output ##########################################
|
||||||
|
|
||||||
default_latex_commands = {
|
default_latex_commands = {
|
||||||
'comment': '{\color{red}#1}',
|
'comment': r'{\color{red}#1}',
|
||||||
'string': '{\color{ForestGreen}#1}',
|
'string': r'{\color{ForestGreen}#1}',
|
||||||
'docstring': '{\emph{\color{ForestGreen}#1}}',
|
'docstring': r'{\emph{\color{ForestGreen}#1}}',
|
||||||
'keyword': '{\color{orange}#1}',
|
'keyword': r'{\color{orange}#1}',
|
||||||
'builtin': '{\color{purple}#1}',
|
'builtin': r'{\color{purple}#1}',
|
||||||
'definition': '{\color{orange}#1}',
|
'definition': r'{\color{orange}#1}',
|
||||||
'defname': '{\color{blue}#1}',
|
'defname': r'{\color{blue}#1}',
|
||||||
'operator': '{\color{brown}#1}',
|
'operator': r'{\color{brown}#1}',
|
||||||
}
|
}
|
||||||
|
|
||||||
default_latex_document = r'''
|
default_latex_document = r'''
|
||||||
|
|
|
@ -88,7 +88,7 @@ del i
|
||||||
# no more expressions are searched for. So, order is important.
|
# no more expressions are searched for. So, order is important.
|
||||||
emparse_list_reason = [
|
emparse_list_reason = [
|
||||||
r'^5\d{2} <>\.\.\. (?P<reason>.*)',
|
r'^5\d{2} <>\.\.\. (?P<reason>.*)',
|
||||||
'<>\.\.\. (?P<reason>.*)',
|
r'<>\.\.\. (?P<reason>.*)',
|
||||||
re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
|
re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
|
||||||
re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
|
re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
|
||||||
re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
|
re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
"""
|
"""
|
||||||
import re,sys
|
import re,sys
|
||||||
|
|
||||||
entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
|
entityRE = re.compile(r'<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
|
||||||
|
|
||||||
def parse(text,pos=0,endpos=None):
|
def parse(text,pos=0,endpos=None):
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ def writefile(f,defs):
|
||||||
if charcode[:2] == '&#':
|
if charcode[:2] == '&#':
|
||||||
code = int(charcode[2:-1])
|
code = int(charcode[2:-1])
|
||||||
if code < 256:
|
if code < 256:
|
||||||
charcode = "'\%o'" % code
|
charcode = r"'\%o'" % code
|
||||||
else:
|
else:
|
||||||
charcode = repr(charcode)
|
charcode = repr(charcode)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -64,7 +64,7 @@ def main():
|
||||||
if fix(arg): bad = 1
|
if fix(arg): bad = 1
|
||||||
sys.exit(bad)
|
sys.exit(bad)
|
||||||
|
|
||||||
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
|
ispythonprog = re.compile(r'^[a-zA-Z0-9_]+\.py$')
|
||||||
def ispython(name):
|
def ispython(name):
|
||||||
return bool(ispythonprog.match(name))
|
return bool(ispythonprog.match(name))
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ def main():
|
||||||
for s in tags: fp.write(s)
|
for s in tags: fp.write(s)
|
||||||
|
|
||||||
|
|
||||||
expr = '^[ \t]*(def|class)[ \t]+([a-zA-Z0-9_]+)[ \t]*[:\(]'
|
expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z0-9_]+)[ \t]*[:\(]'
|
||||||
matcher = re.compile(expr)
|
matcher = re.compile(expr)
|
||||||
|
|
||||||
def treat_file(filename):
|
def treat_file(filename):
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#! /usr/bin/env python3
|
#! /usr/bin/env python3
|
||||||
|
|
||||||
"""
|
r"""
|
||||||
SVN helper script.
|
SVN helper script.
|
||||||
|
|
||||||
Try to set the svn:eol-style property to "native" on every .py, .txt, .c and
|
Try to set the svn:eol-style property to "native" on every .py, .txt, .c and
|
||||||
|
|
|
@ -78,11 +78,11 @@ spprog = re.compile('[\n@{}&<>]') # Special characters in
|
||||||
# running text
|
# running text
|
||||||
#
|
#
|
||||||
# menu item (Yuck!)
|
# menu item (Yuck!)
|
||||||
miprog = re.compile('^\* ([^:]*):(:|[ \t]*([^\t,\n.]+)([^ \t\n]*))[ \t\n]*')
|
miprog = re.compile(r'^\* ([^:]*):(:|[ \t]*([^\t,\n.]+)([^ \t\n]*))[ \t\n]*')
|
||||||
# 0 1 1 2 3 34 42 0
|
# 0 1 1 2 3 34 42 0
|
||||||
# ----- ---------- ---------
|
# ----- ---------- ---------
|
||||||
# -|-----------------------------
|
# -|-----------------------------
|
||||||
# -----------------------------------------------------
|
# -----------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -37,11 +37,11 @@ UNI_UNDEFINED = chr(0xFFFE)
|
||||||
# Placeholder for a missing code point
|
# Placeholder for a missing code point
|
||||||
MISSING_CODE = -1
|
MISSING_CODE = -1
|
||||||
|
|
||||||
mapRE = re.compile('((?:0x[0-9a-fA-F]+\+?)+)'
|
mapRE = re.compile(r'((?:0x[0-9a-fA-F]+\+?)+)'
|
||||||
'\s+'
|
r'\s+'
|
||||||
'((?:(?:0x[0-9a-fA-Z]+|<[A-Za-z]+>)\+?)*)'
|
r'((?:(?:0x[0-9a-fA-Z]+|<[A-Za-z]+>)\+?)*)'
|
||||||
'\s*'
|
r'\s*'
|
||||||
'(#.+)?')
|
r'(#.+)?')
|
||||||
|
|
||||||
def parsecodes(codes, len=len, range=range):
|
def parsecodes(codes, len=len, range=range):
|
||||||
|
|
||||||
|
|
4
setup.py
4
setup.py
|
@ -838,7 +838,7 @@ class PyBuildExt(build_ext):
|
||||||
# find out which version of OpenSSL we have
|
# find out which version of OpenSSL we have
|
||||||
openssl_ver = 0
|
openssl_ver = 0
|
||||||
openssl_ver_re = re.compile(
|
openssl_ver_re = re.compile(
|
||||||
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
|
r'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
|
||||||
|
|
||||||
# look for the openssl version header on the compiler search path.
|
# look for the openssl version header on the compiler search path.
|
||||||
opensslv_h = find_file('openssl/opensslv.h', [],
|
opensslv_h = find_file('openssl/opensslv.h', [],
|
||||||
|
@ -1724,7 +1724,7 @@ class PyBuildExt(build_ext):
|
||||||
# All existing framework builds of Tcl/Tk don't support 64-bit
|
# All existing framework builds of Tcl/Tk don't support 64-bit
|
||||||
# architectures.
|
# architectures.
|
||||||
cflags = sysconfig.get_config_vars('CFLAGS')[0]
|
cflags = sysconfig.get_config_vars('CFLAGS')[0]
|
||||||
archs = re.findall('-arch\s+(\w+)', cflags)
|
archs = re.findall(r'-arch\s+(\w+)', cflags)
|
||||||
|
|
||||||
tmpfile = os.path.join(self.build_temp, 'tk.arch')
|
tmpfile = os.path.join(self.build_temp, 'tk.arch')
|
||||||
if not os.path.exists(self.build_temp):
|
if not os.path.exists(self.build_temp):
|
||||||
|
|
Loading…
Reference in New Issue