Teach the peephole optimizer to fold simple constant expressions.

This commit is contained in:
Raymond Hettinger 2005-01-02 06:17:33 +00:00
parent 64585988af
commit c34f8673a1
3 changed files with 149 additions and 1 deletions

View File

@ -102,6 +102,34 @@ class TestTranforms(unittest.TestCase):
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', '(9)'), # chained fold
('"@"*4', "('@@@@')"), # check string ops
('a="abc" + "def"', "('abcdef')"), # check string ops
('a = 3**4', '(81)'), # binary power
('a = 3*4', '(12)'), # binary multiply
('a = 13/4.0', '(3.25)'), # binary divide
('a = 13//4', '(3)'), # binary floor divide
('a = 14%4', '(2)'), # binary modulo
('a = 2+3', '(5)'), # binary add
('a = 13-4', '(9)'), # binary subtract
('a = (12,13)[1]', '(13)'), # binary subscr
('a = 13 << 2', '(52)'), # binary lshift
('a = 13 >> 2', '(3)'), # binary rshift
('a = 13 & 7', '(5)'), # binary and
('a = 13 ^ 7', '(10)'), # binary xor
('a = 13 | 7', '(15)'), # binary or
):
asm = dis_single(line)
self.assert_(elem in asm, asm)
self.assert_('BINARY_' not in asm)
# Verify that unfoldables are skipped
asm = dis_single('a=2+"b"')
self.assert_('(2)' in asm)
self.assert_("('b')" in asm)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):

View File

@ -13,6 +13,9 @@ Core and builtins
- min() and max() now support key= arguments with the same meaning as in
list.sort().
- The peephole optimizer now performs simple constant folding in expressions:
(2+3) --> (5).
Extension Modules
-----------------

View File

@ -439,6 +439,99 @@ tuple_of_constants(unsigned char *codestr, int n, PyObject *consts)
return 1;
}
/* Replace LOAD_CONST c1. LOAD_CONST c2 BINOP
with LOAD_CONST binop(c1,c2)
The consts table must still be in list form so that the
new constant can be appended.
Called with codestr pointing to the first LOAD_CONST.
Abandons the transformation if the folding fails (i.e. 1+'a'). */
static int
fold_binops_on_constants(unsigned char *codestr, PyObject *consts)
{
PyObject *newconst, *v, *w;
int len_consts, opcode;
/* Pre-conditions */
assert(PyList_CheckExact(consts));
assert(codestr[0] == LOAD_CONST);
assert(codestr[3] == LOAD_CONST);
/* Create new constant */
v = PyList_GET_ITEM(consts, GETARG(codestr, 0));
w = PyList_GET_ITEM(consts, GETARG(codestr, 3));
opcode = codestr[6];
switch (opcode) {
case BINARY_POWER:
newconst = PyNumber_Power(v, w, Py_None);
break;
case BINARY_MULTIPLY:
newconst = PyNumber_Multiply(v, w);
break;
case BINARY_DIVIDE:
if (!_Py_QnewFlag) {
newconst = PyNumber_Divide(v, w);
break;
}
/* -Qnew is in effect: fall through to
BINARY_TRUE_DIVIDE */
case BINARY_TRUE_DIVIDE:
newconst = PyNumber_TrueDivide(v, w);
break;
case BINARY_FLOOR_DIVIDE:
newconst = PyNumber_FloorDivide(v, w);
break;
case BINARY_MODULO:
newconst = PyNumber_Remainder(v, w);
break;
case BINARY_ADD:
newconst = PyNumber_Add(v, w);
break;
case BINARY_SUBTRACT:
newconst = PyNumber_Subtract(v, w);
break;
case BINARY_SUBSCR:
newconst = PyObject_GetItem(v, w);
break;
case BINARY_LSHIFT:
newconst = PyNumber_Lshift(v, w);
break;
case BINARY_RSHIFT:
newconst = PyNumber_Rshift(v, w);
break;
case BINARY_AND:
newconst = PyNumber_And(v, w);
break;
case BINARY_XOR:
newconst = PyNumber_Xor(v, w);
break;
case BINARY_OR:
newconst = PyNumber_Or(v, w);
break;
default:
/* Called with an unknown opcode */
assert(0);
return 0;
}
if (newconst == NULL) {
PyErr_Clear();
return 0;
}
/* Append folded constant into consts table */
len_consts = PyList_GET_SIZE(consts);
if (PyList_Append(consts, newconst)) {
Py_DECREF(newconst);
return 0;
}
Py_DECREF(newconst);
/* Write NOP NOP NOP NOP LOAD_CONST newconst */
memset(codestr, NOP, 4);
codestr[4] = LOAD_CONST;
SETARG(codestr, 4, len_consts);
return 1;
}
static unsigned int *
markblocks(unsigned char *code, int len)
{
@ -614,7 +707,6 @@ optimize_code(PyObject *code, PyObject* consts, PyObject *names, PyObject *linen
h = i - 3 * j;
if (h >= 0 &&
j <= lastlc &&
codestr[h] == LOAD_CONST &&
ISBASICBLOCK(blocks, h, 3*(j+1)) &&
tuple_of_constants(&codestr[h], j, consts)) {
assert(codestr[i] == LOAD_CONST);
@ -640,6 +732,31 @@ optimize_code(PyObject *code, PyObject* consts, PyObject *names, PyObject *linen
}
break;
/* Fold binary ops on constants.
LOAD_CONST c1 LOAD_CONST c2 BINOP --> LOAD_CONST binop(c1,c2) */
case BINARY_POWER:
case BINARY_MULTIPLY:
case BINARY_DIVIDE:
case BINARY_TRUE_DIVIDE:
case BINARY_FLOOR_DIVIDE:
case BINARY_MODULO:
case BINARY_ADD:
case BINARY_SUBTRACT:
case BINARY_SUBSCR:
case BINARY_LSHIFT:
case BINARY_RSHIFT:
case BINARY_AND:
case BINARY_XOR:
case BINARY_OR:
if (lastlc >= 2 &&
ISBASICBLOCK(blocks, i-6, 7) &&
fold_binops_on_constants(&codestr[i-6], consts)) {
i -= 2;
assert(codestr[i] == LOAD_CONST);
cumlc = 1;
}
break;
/* Simplify conditional jump to conditional jump where the
result of the first test implies the success of a similar
test or the failure of the opposite test.