diff --git a/Doc/library/tokenize.rst b/Doc/library/tokenize.rst index 111289c767f..c89d3d4b082 100644 --- a/Doc/library/tokenize.rst +++ b/Doc/library/tokenize.rst @@ -39,7 +39,7 @@ The primary entry point is a :term:`generator`: column where the token begins in the source; a 2-tuple ``(erow, ecol)`` of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed (the last tuple item) - is the *logical* line; continuation lines are included. The 5 tuple is + is the *physical* line; continuation lines are included. The 5 tuple is returned as a :term:`named tuple` with the field names: ``type string start end line``. diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py index 279d322971d..0f9fde3fb0d 100644 --- a/Lib/lib2to3/pgen2/tokenize.py +++ b/Lib/lib2to3/pgen2/tokenize.py @@ -346,7 +346,7 @@ def generate_tokens(readline): column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the - logical line; continuation lines are included. + physical line; continuation lines are included. """ lnum = parenlev = continued = 0 contstr, needcont = '', 0 diff --git a/Lib/tokenize.py b/Lib/tokenize.py index 0f9d5dd554d..738fb71d188 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -415,7 +415,7 @@ def tokenize(readline): column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the - logical line; continuation lines are included. + physical line; continuation lines are included. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream.