1999-01-30 18:39:17 -04:00
|
|
|
"""Module to analyze Python source code; for syntax coloring tools.
|
|
|
|
|
|
|
|
Interface:
|
2004-07-18 03:16:08 -03:00
|
|
|
tags = fontify(pytext, searchfrom, searchto)
|
1999-01-30 18:39:17 -04:00
|
|
|
|
|
|
|
The 'pytext' argument is a string containing Python source code.
|
2004-07-18 03:16:08 -03:00
|
|
|
The (optional) arguments 'searchfrom' and 'searchto' may contain a slice in pytext.
|
1999-01-30 18:39:17 -04:00
|
|
|
The returned value is a list of tuples, formatted like this:
|
2004-07-18 03:16:08 -03:00
|
|
|
[('keyword', 0, 6, None), ('keyword', 11, 17, None), ('comment', 23, 53, None), etc. ]
|
1999-01-30 18:39:17 -04:00
|
|
|
The tuple contents are always like this:
|
2004-07-18 03:16:08 -03:00
|
|
|
(tag, startindex, endindex, sublist)
|
1999-01-30 18:39:17 -04:00
|
|
|
tag is one of 'keyword', 'string', 'comment' or 'identifier'
|
2004-07-18 03:16:08 -03:00
|
|
|
sublist is not used, hence always None.
|
1999-01-30 18:39:17 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
# Based on FontText.py by Mitchell S. Chapman,
|
|
|
|
# which was modified by Zachary Roadhouse,
|
|
|
|
# then un-Tk'd by Just van Rossum.
|
|
|
|
# Many thanks for regular expression debugging & authoring are due to:
|
2004-07-18 03:16:08 -03:00
|
|
|
# Tim (the-incredib-ly y'rs) Peters and Cristian Tismer
|
1999-01-30 18:39:17 -04:00
|
|
|
# So, who owns the copyright? ;-) How about this:
|
2004-07-18 03:16:08 -03:00
|
|
|
# Copyright 1996-2001:
|
|
|
|
# Mitchell S. Chapman,
|
|
|
|
# Zachary Roadhouse,
|
|
|
|
# Tim Peters,
|
|
|
|
# Just van Rossum
|
1999-01-30 18:39:17 -04:00
|
|
|
|
2001-07-10 16:25:40 -03:00
|
|
|
__version__ = "0.4"
|
1999-01-30 18:39:17 -04:00
|
|
|
|
2001-07-10 16:25:40 -03:00
|
|
|
import string
|
|
|
|
import re
|
1999-01-30 18:39:17 -04:00
|
|
|
|
|
|
|
# First a little helper, since I don't like to repeat things. (Tismer speaking)
|
|
|
|
import string
|
|
|
|
def replace(where, what, with):
|
2004-07-18 03:16:08 -03:00
|
|
|
return string.join(string.split(where, what), with)
|
1999-01-30 18:39:17 -04:00
|
|
|
|
|
|
|
# This list of keywords is taken from ref/node13.html of the
|
|
|
|
# Python 1.3 HTML documentation. ("access" is intentionally omitted.)
|
|
|
|
keywordsList = [
|
2004-07-18 03:16:08 -03:00
|
|
|
"assert", "exec",
|
|
|
|
"del", "from", "lambda", "return",
|
|
|
|
"and", "elif", "global", "not", "try",
|
|
|
|
"break", "else", "if", "or", "while",
|
|
|
|
"class", "except", "import", "pass",
|
|
|
|
"continue", "finally", "in", "print",
|
|
|
|
"def", "for", "is", "raise", "yield"]
|
1999-01-30 18:39:17 -04:00
|
|
|
|
|
|
|
# Build up a regular expression which will match anything
|
|
|
|
# interesting, including multi-line triple-quoted strings.
|
2001-07-10 16:25:40 -03:00
|
|
|
commentPat = r"#[^\n]*"
|
1999-01-30 18:39:17 -04:00
|
|
|
|
2001-07-10 16:25:40 -03:00
|
|
|
pat = r"q[^\\q\n]*(\\[\000-\377][^\\q\n]*)*q"
|
|
|
|
quotePat = replace(pat, "q", "'") + "|" + replace(pat, 'q', '"')
|
1999-01-30 18:39:17 -04:00
|
|
|
|
|
|
|
# Way to go, Tim!
|
2001-07-10 16:25:40 -03:00
|
|
|
pat = r"""
|
2004-07-18 03:16:08 -03:00
|
|
|
qqq
|
|
|
|
[^\\q]*
|
|
|
|
(
|
|
|
|
( \\[\000-\377]
|
|
|
|
| q
|
|
|
|
( \\[\000-\377]
|
|
|
|
| [^\q]
|
|
|
|
| q
|
|
|
|
( \\[\000-\377]
|
|
|
|
| [^\\q]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
[^\\q]*
|
|
|
|
)*
|
|
|
|
qqq
|
1999-01-30 18:39:17 -04:00
|
|
|
"""
|
2004-07-18 03:16:08 -03:00
|
|
|
pat = string.join(string.split(pat), '') # get rid of whitespace
|
2001-07-10 16:25:40 -03:00
|
|
|
tripleQuotePat = replace(pat, "q", "'") + "|" + replace(pat, 'q', '"')
|
1999-01-30 18:39:17 -04:00
|
|
|
|
|
|
|
# Build up a regular expression which matches all and only
|
|
|
|
# Python keywords. This will let us skip the uninteresting
|
|
|
|
# identifier references.
|
|
|
|
# nonKeyPat identifies characters which may legally precede
|
|
|
|
# a keyword pattern.
|
2001-07-10 16:25:40 -03:00
|
|
|
nonKeyPat = r"(^|[^a-zA-Z0-9_.\"'])"
|
1999-01-30 18:39:17 -04:00
|
|
|
|
2001-07-10 16:25:40 -03:00
|
|
|
keyPat = nonKeyPat + "(" + "|".join(keywordsList) + ")" + nonKeyPat
|
1999-01-30 18:39:17 -04:00
|
|
|
|
2001-07-10 16:25:40 -03:00
|
|
|
matchPat = commentPat + "|" + keyPat + "|" + tripleQuotePat + "|" + quotePat
|
2001-02-21 09:54:31 -04:00
|
|
|
matchRE = re.compile(matchPat)
|
1999-01-30 18:39:17 -04:00
|
|
|
|
2004-07-18 03:16:08 -03:00
|
|
|
idKeyPat = "[ \t]*[A-Za-z_][A-Za-z_0-9.]*" # Ident w. leading whitespace.
|
2001-02-21 09:54:31 -04:00
|
|
|
idRE = re.compile(idKeyPat)
|
1999-01-30 18:39:17 -04:00
|
|
|
|
|
|
|
|
|
|
|
def fontify(pytext, searchfrom = 0, searchto = None):
|
2004-07-18 03:16:08 -03:00
|
|
|
if searchto is None:
|
|
|
|
searchto = len(pytext)
|
|
|
|
# Cache a few attributes for quicker reference.
|
|
|
|
search = matchRE.search
|
|
|
|
idSearch = idRE.search
|
|
|
|
|
|
|
|
tags = []
|
|
|
|
tags_append = tags.append
|
|
|
|
commentTag = 'comment'
|
|
|
|
stringTag = 'string'
|
|
|
|
keywordTag = 'keyword'
|
|
|
|
identifierTag = 'identifier'
|
|
|
|
|
|
|
|
start = 0
|
|
|
|
end = searchfrom
|
|
|
|
while 1:
|
|
|
|
m = search(pytext, end)
|
|
|
|
if m is None:
|
|
|
|
break # EXIT LOOP
|
|
|
|
start = m.start()
|
|
|
|
if start >= searchto:
|
|
|
|
break # EXIT LOOP
|
|
|
|
match = m.group(0)
|
|
|
|
end = start + len(match)
|
|
|
|
c = match[0]
|
|
|
|
if c not in "#'\"":
|
|
|
|
# Must have matched a keyword.
|
|
|
|
if start <> searchfrom:
|
|
|
|
# there's still a redundant char before and after it, strip!
|
|
|
|
match = match[1:-1]
|
|
|
|
start = start + 1
|
|
|
|
else:
|
|
|
|
# this is the first keyword in the text.
|
|
|
|
# Only a space at the end.
|
|
|
|
match = match[:-1]
|
|
|
|
end = end - 1
|
|
|
|
tags_append((keywordTag, start, end, None))
|
|
|
|
# If this was a defining keyword, look ahead to the
|
|
|
|
# following identifier.
|
|
|
|
if match in ["def", "class"]:
|
|
|
|
m = idSearch(pytext, end)
|
|
|
|
if m is not None:
|
|
|
|
start = m.start()
|
|
|
|
if start == end:
|
|
|
|
match = m.group(0)
|
|
|
|
end = start + len(match)
|
|
|
|
tags_append((identifierTag, start, end, None))
|
|
|
|
elif c == "#":
|
|
|
|
tags_append((commentTag, start, end, None))
|
|
|
|
else:
|
|
|
|
tags_append((stringTag, start, end, None))
|
|
|
|
return tags
|
1999-01-30 18:39:17 -04:00
|
|
|
|
|
|
|
|
|
|
|
def test(path):
|
2004-07-18 03:16:08 -03:00
|
|
|
f = open(path)
|
|
|
|
text = f.read()
|
|
|
|
f.close()
|
|
|
|
tags = fontify(text)
|
|
|
|
for tag, start, end, sublist in tags:
|
|
|
|
print tag, repr(text[start:end])
|