From 1babdfc48afc60afe5ae708f77dad8a641bf36ec Mon Sep 17 00:00:00 2001 From: Tim Peters Date: Sun, 24 Nov 2002 19:19:09 +0000 Subject: [PATCH] Reduced memory burden by iterating over the normalization test input file directly (instead of sucking it all into a list of lines first). --- Lib/test/regrtest.py | 2 +- Lib/test/test_normalization.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py index d0650e6aace..f870527088e 100755 --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -506,7 +506,7 @@ def printlist(x, width=70, indent=4): # test_normalization # Whether a skip is expected here depends on whether a large test # input file has been downloaded. test_normalization.skip_expected -# controls that +# controls that. _expectations = { 'win32': diff --git a/Lib/test/test_normalization.py b/Lib/test/test_normalization.py index b6737391cb5..7e18c973ffa 100644 --- a/Lib/test/test_normalization.py +++ b/Lib/test/test_normalization.py @@ -33,10 +33,8 @@ def test_main(): raise TestSkipped(TESTDATAFILE + " not found, download from " + "http://www.unicode.org/Public/UNIDATA/" + TESTDATAFILE) - data = open(TESTDATAFILE).readlines() - part1_data = {} - for line in data: + for line in open(TESTDATAFILE): if '#' in line: line = line.split('#')[0] line = line.strip()