Issue 21977: Minor improvements to the regexes in the tokenizer example.

This commit is contained in:
Raymond Hettinger 2014-07-14 01:52:00 -07:00
parent 5283c4e108
commit 8323f68f3e
1 changed files with 2 additions and 2 deletions

View File

@ -1340,9 +1340,9 @@ successive matches::
('ASSIGN', r':='), # Assignment operator ('ASSIGN', r':='), # Assignment operator
('END', r';'), # Statement terminator ('END', r';'), # Statement terminator
('ID', r'[A-Za-z]+'), # Identifiers ('ID', r'[A-Za-z]+'), # Identifiers
('OP', r'[+*\/\-]'), # Arithmetic operators ('OP', r'[+\-*/]'), # Arithmetic operators
('NEWLINE', r'\n'), # Line endings ('NEWLINE', r'\n'), # Line endings
('SKIP', r'[ \t]'), # Skip over spaces and tabs ('SKIP', r'[ \t]+'), # Skip over spaces and tabs
] ]
tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification) tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification)
get_token = re.compile(tok_regex).match get_token = re.compile(tok_regex).match