# # Secret Labs' Regular Expression Engine # # re-compatible interface for the sre matching engine # # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. # # This version of the SRE library can be redistributed under CNRI's # Python 1.6 license. For any other use, please contact Secret Labs # AB (info@pythonware.com). # # Portions of this engine have been developed in cooperation with # CNRI. Hewlett-Packard provided funding for 1.6 integration and # other compatibility work. # r"""Support for regular expressions (RE). This module provides regular expression matching operations similar to those found in Perl. It supports both 8-bit and Unicode strings; both the pattern and the strings being processed can contain null bytes and characters outside the US ASCII range. Regular expressions can contain both special and ordinary characters. Most ordinary characters, like "A", "a", or "0", are the simplest regular expressions; they simply match themselves. You can concatenate ordinary characters, so last matches the string 'last'. The special characters are: "." Matches any character except a newline. "^" Matches the start of the string. "$" Matches the end of the string or just before the newline at the end of the string. "*" Matches 0 or more (greedy) repetitions of the preceding RE. Greedy means that it will match as many repetitions as possible. "+" Matches 1 or more (greedy) repetitions of the preceding RE. "?" Matches 0 or 1 (greedy) of the preceding RE. *?,+?,?? Non-greedy versions of the previous three special characters. {m,n} Matches from m to n repetitions of the preceding RE. {m,n}? Non-greedy version of the above. "\\" Either escapes special characters or signals a special sequence. [] Indicates a set of characters. A "^" as the first character indicates a complementing set. "|" A|B, creates an RE that will match either A or B. (...) Matches the RE inside the parentheses. The contents can be retrieved or matched later in the string. (?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below). (?:...) Non-grouping version of regular parentheses. (?P...) The substring matched by the group is accessible by name. (?P=name) Matches the text matched earlier by the group named name. (?#...) A comment; ignored. (?=...) Matches if ... matches next, but doesn't consume the string. (?!...) Matches if ... doesn't match next. (?<=...) Matches if preceded by ... (must be fixed length). (?= 0x02020000: __all__.append("finditer") def finditer(pattern, string, flags=0): """Return an iterator over all non-overlapping matches in the string. For each match, the iterator returns a match object. Empty matches are included in the result.""" return _compile(pattern, flags).finditer(string) def compile(pattern, flags=0): "Compile a regular expression pattern, returning a pattern object." return _compile(pattern, flags) def purge(): "Clear the regular expression caches" _compile_typed.clear() _compile_repl.clear() def template(pattern, flags=0): "Compile a template pattern, returning a pattern object" return _compile(pattern, flags|T) _alphanum_str = frozenset( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890") _alphanum_bytes = frozenset( b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890") def escape(pattern): "Escape all non-alphanumeric characters in pattern." if isinstance(pattern, str): alphanum = _alphanum_str s = list(pattern) for i in range(len(pattern)): c = pattern[i] if c not in alphanum: if c == "\000": s[i] = "\\000" else: s[i] = "\\" + c return "".join(s) else: alphanum = _alphanum_bytes s = [] esc = ord(b"\\") for c in pattern: if c in alphanum: s.append(c) else: if c == 0: s.extend(b"\\000") else: s.append(esc) s.append(c) return bytes(s) # -------------------------------------------------------------------- # internals _cache = {} _cache_repl = {} _pattern_type = type(sre_compile.compile("", 0)) _MAXCACHE = 500 def _shrink_cache(cache_dict, max_length, divisor=5): """Make room in the given cache. Args: cache_dict: The cache dictionary to modify. max_length: Maximum # of entries in cache_dict before it is shrunk. divisor: Cache will shrink to max_length - 1/divisor*max_length items. """ # Toss out a fraction of the entries at random to make room for new ones. # A random algorithm was chosen as opposed to simply cache_dict.popitem() # as popitem could penalize the same regular expression repeatedly based # on its internal hash value. Being random should spread the cache miss # love around. cache_keys = tuple(cache_dict.keys()) overage = len(cache_keys) - max_length if overage < 0: # Cache is already within limits. Normally this should not happen # but it could due to multithreading. return number_to_toss = max_length // divisor + overage # The import is done here to avoid a circular depencency. import random if not hasattr(random, 'sample'): # Do nothing while resolving the circular dependency: # re->random->warnings->tokenize->string->re return for doomed_key in random.sample(cache_keys, number_to_toss): try: del cache_dict[doomed_key] except KeyError: # Ignore problems if the cache changed from another thread. pass def _compile(*args): return _compile_typed(type(args[0]), *args) @functools.lru_cache(maxsize=_MAXCACHE) def _compile_typed(type, *key): # internal: compile pattern pattern, flags = key if isinstance(pattern, _pattern_type): if flags: raise ValueError( "Cannot process flags argument with a compiled pattern") return pattern if not sre_compile.isstring(pattern): raise TypeError("first argument must be string or compiled pattern") return sre_compile.compile(pattern, flags) return p @functools.lru_cache(maxsize=_MAXCACHE) def _compile_repl(*key): # internal: compile replacement pattern repl, pattern = key return sre_parse.parse_template(repl, pattern) def _expand(pattern, match, template): # internal: match.expand implementation hook template = sre_parse.parse_template(template, pattern) return sre_parse.expand_template(template, match) def _subx(pattern, template): # internal: pattern.sub/subn implementation helper template = _compile_repl(template, pattern) if not template[0] and len(template[1]) == 1: # literal replacement return template[1][0] def filter(match, template=template): return sre_parse.expand_template(template, match) return filter # register myself for pickling import copyreg def _pickle(p): return _compile, (p.pattern, p.flags) copyreg.pickle(_pattern_type, _pickle, _compile) # -------------------------------------------------------------------- # experimental stuff (see python-dev discussions for details) class Scanner: def __init__(self, lexicon, flags=0): from sre_constants import BRANCH, SUBPATTERN self.lexicon = lexicon # combine phrases into a compound pattern p = [] s = sre_parse.Pattern() s.flags = flags for phrase, action in lexicon: p.append(sre_parse.SubPattern(s, [ (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))), ])) s.groups = len(p)+1 p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) self.scanner = sre_compile.compile(p) def scan(self, string): result = [] append = result.append match = self.scanner.scanner(string).match i = 0 while 1: m = match() if not m: break j = m.end() if i == j: break action = self.lexicon[m.lastindex-1][1] if hasattr(action, "__call__"): self.match = m action = action(self, m.group()) if action is not None: append(action) i = j return result, string[i:]