mirror of https://github.com/python/cpython
Issue 21977: Minor improvements to the regexes in the tokenizer example.
This commit is contained in:
parent
5283c4e108
commit
8323f68f3e
|
@ -1340,9 +1340,9 @@ successive matches::
|
||||||
('ASSIGN', r':='), # Assignment operator
|
('ASSIGN', r':='), # Assignment operator
|
||||||
('END', r';'), # Statement terminator
|
('END', r';'), # Statement terminator
|
||||||
('ID', r'[A-Za-z]+'), # Identifiers
|
('ID', r'[A-Za-z]+'), # Identifiers
|
||||||
('OP', r'[+*\/\-]'), # Arithmetic operators
|
('OP', r'[+\-*/]'), # Arithmetic operators
|
||||||
('NEWLINE', r'\n'), # Line endings
|
('NEWLINE', r'\n'), # Line endings
|
||||||
('SKIP', r'[ \t]'), # Skip over spaces and tabs
|
('SKIP', r'[ \t]+'), # Skip over spaces and tabs
|
||||||
]
|
]
|
||||||
tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification)
|
tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification)
|
||||||
get_token = re.compile(tok_regex).match
|
get_token = re.compile(tok_regex).match
|
||||||
|
|
Loading…
Reference in New Issue