mirror of https://github.com/python/cpython
Merge branch 'main' of https://github.com/python/cpython
This commit is contained in:
commit
3f54d1cfe7
|
@ -2,7 +2,7 @@ name: Mark stale pull requests
|
|||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
- cron: "0 */12 * * *"
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
|
|
@ -841,10 +841,11 @@ Glossary
|
|||
Some named tuples are built-in types (such as the above examples).
|
||||
Alternatively, a named tuple can be created from a regular class
|
||||
definition that inherits from :class:`tuple` and that defines named
|
||||
fields. Such a class can be written by hand or it can be created with
|
||||
the factory function :func:`collections.namedtuple`. The latter
|
||||
technique also adds some extra methods that may not be found in
|
||||
hand-written or built-in named tuples.
|
||||
fields. Such a class can be written by hand, or it can be created by
|
||||
inheriting :class:`typing.NamedTuple`, or with the factory function
|
||||
:func:`collections.namedtuple`. The latter techniques also add some
|
||||
extra methods that may not be found in hand-written or built-in named
|
||||
tuples.
|
||||
|
||||
namespace
|
||||
The place where a variable is stored. Namespaces are implemented as
|
||||
|
|
|
@ -998,7 +998,7 @@ The following recipes have a more mathematical flavor:
|
|||
|
||||
def sum_of_squares(it):
|
||||
"Add up the squares of the input values."
|
||||
# sum_of_squares([10, 20, 30]) -> 1400
|
||||
# sum_of_squares([10, 20, 30]) --> 1400
|
||||
return math.sumprod(*tee(it))
|
||||
|
||||
def reshape(matrix, cols):
|
||||
|
@ -1019,17 +1019,16 @@ The following recipes have a more mathematical flavor:
|
|||
|
||||
def convolve(signal, kernel):
|
||||
"""Discrete linear convolution of two iterables.
|
||||
Equivalent to polynomial multiplication.
|
||||
|
||||
The kernel is fully consumed before the calculations begin.
|
||||
The signal is consumed lazily and can be infinite.
|
||||
|
||||
Convolutions are mathematically commutative.
|
||||
If the signal and kernel are swapped,
|
||||
the output will be the same.
|
||||
Convolutions are mathematically commutative; however, the inputs are
|
||||
evaluated differently. The signal is consumed lazily and can be
|
||||
infinite. The kernel is fully consumed before the calculations begin.
|
||||
|
||||
Article: https://betterexplained.com/articles/intuitive-convolution/
|
||||
Video: https://www.youtube.com/watch?v=KuXjwB4LzSA
|
||||
"""
|
||||
# convolve([1, -1, -20], [1, -3]) --> 1 -4 -17 60
|
||||
# convolve(data, [0.25, 0.25, 0.25, 0.25]) --> Moving average (blur)
|
||||
# convolve(data, [1/2, 0, -1/2]) --> 1st derivative estimate
|
||||
# convolve(data, [1, -2, 1]) --> 2nd derivative estimate
|
||||
|
@ -1067,7 +1066,7 @@ The following recipes have a more mathematical flavor:
|
|||
f(x) = x³ -4x² -17x + 60
|
||||
f'(x) = 3x² -8x -17
|
||||
"""
|
||||
# polynomial_derivative([1, -4, -17, 60]) -> [3, -8, -17]
|
||||
# polynomial_derivative([1, -4, -17, 60]) --> [3, -8, -17]
|
||||
n = len(coefficients)
|
||||
powers = reversed(range(1, n))
|
||||
return list(map(operator.mul, coefficients, powers))
|
||||
|
@ -1169,6 +1168,12 @@ The following recipes have a more mathematical flavor:
|
|||
|
||||
>>> take(10, count())
|
||||
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
||||
>>> # Verify that the input is consumed lazily
|
||||
>>> it = iter('abcdef')
|
||||
>>> take(3, it)
|
||||
['a', 'b', 'c']
|
||||
>>> list(it)
|
||||
['d', 'e', 'f']
|
||||
|
||||
>>> list(prepend(1, [2, 3, 4]))
|
||||
[1, 2, 3, 4]
|
||||
|
@ -1181,25 +1186,45 @@ The following recipes have a more mathematical flavor:
|
|||
|
||||
>>> list(tail(3, 'ABCDEFG'))
|
||||
['E', 'F', 'G']
|
||||
>>> # Verify the input is consumed greedily
|
||||
>>> input_iterator = iter('ABCDEFG')
|
||||
>>> output_iterator = tail(3, input_iterator)
|
||||
>>> list(input_iterator)
|
||||
[]
|
||||
|
||||
>>> it = iter(range(10))
|
||||
>>> consume(it, 3)
|
||||
>>> # Verify the input is consumed lazily
|
||||
>>> next(it)
|
||||
3
|
||||
>>> # Verify the input is consumed completely
|
||||
>>> consume(it)
|
||||
>>> next(it, 'Done')
|
||||
'Done'
|
||||
|
||||
>>> nth('abcde', 3)
|
||||
'd'
|
||||
|
||||
>>> nth('abcde', 9) is None
|
||||
True
|
||||
>>> # Verify that the input is consumed lazily
|
||||
>>> it = iter('abcde')
|
||||
>>> nth(it, 2)
|
||||
'c'
|
||||
>>> list(it)
|
||||
['d', 'e']
|
||||
|
||||
>>> [all_equal(s) for s in ('', 'A', 'AAAA', 'AAAB', 'AAABA')]
|
||||
[True, True, True, False, False]
|
||||
>>> [all_equal(s, key=str.casefold) for s in ('', 'A', 'AaAa', 'AAAB', 'AAABA')]
|
||||
[True, True, True, False, False]
|
||||
>>> # Verify that the input is consumed lazily and that only
|
||||
>>> # one element of a second equivalence class is used to disprove
|
||||
>>> # the assertion that all elements are equal.
|
||||
>>> it = iter('aaabbbccc')
|
||||
>>> all_equal(it)
|
||||
False
|
||||
>>> ''.join(it)
|
||||
'bbccc'
|
||||
|
||||
>>> quantify(range(99), lambda x: x%2==0)
|
||||
50
|
||||
|
@ -1222,6 +1247,11 @@ The following recipes have a more mathematical flavor:
|
|||
|
||||
>>> list(ncycles('abc', 3))
|
||||
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
|
||||
>>> # Verify greedy consumption of input iterator
|
||||
>>> input_iterator = iter('abc')
|
||||
>>> output_iterator = ncycles(input_iterator, 3)
|
||||
>>> list(input_iterator)
|
||||
[]
|
||||
|
||||
>>> sum_of_squares([10, 20, 30])
|
||||
1400
|
||||
|
@ -1248,12 +1278,22 @@ The following recipes have a more mathematical flavor:
|
|||
|
||||
>>> list(transpose([(1, 2, 3), (11, 22, 33)]))
|
||||
[(1, 11), (2, 22), (3, 33)]
|
||||
>>> # Verify that the inputs are consumed lazily
|
||||
>>> input1 = iter([1, 2, 3])
|
||||
>>> input2 = iter([11, 22, 33])
|
||||
>>> output_iterator = transpose([input1, input2])
|
||||
>>> next(output_iterator)
|
||||
(1, 11)
|
||||
>>> list(zip(input1, input2))
|
||||
[(2, 22), (3, 33)]
|
||||
|
||||
>>> list(matmul([(7, 5), (3, 5)], [[2, 5], [7, 9]]))
|
||||
[(49, 80), (41, 60)]
|
||||
>>> list(matmul([[2, 5], [7, 9], [3, 4]], [[7, 11, 5, 4, 9], [3, 5, 2, 6, 3]]))
|
||||
[(29, 47, 20, 38, 33), (76, 122, 53, 82, 90), (33, 53, 23, 36, 39)]
|
||||
|
||||
>>> list(convolve([1, -1, -20], [1, -3])) == [1, -4, -17, 60]
|
||||
True
|
||||
>>> data = [20, 40, 24, 32, 20, 28, 16]
|
||||
>>> list(convolve(data, [0.25, 0.25, 0.25, 0.25]))
|
||||
[5.0, 15.0, 21.0, 29.0, 29.0, 26.0, 24.0, 16.0, 11.0, 4.0]
|
||||
|
@ -1261,6 +1301,18 @@ The following recipes have a more mathematical flavor:
|
|||
[20, 20, -16, 8, -12, 8, -12, -16]
|
||||
>>> list(convolve(data, [1, -2, 1]))
|
||||
[20, 0, -36, 24, -20, 20, -20, -4, 16]
|
||||
>>> # Verify signal is consumed lazily and the kernel greedily
|
||||
>>> signal_iterator = iter([10, 20, 30, 40, 50])
|
||||
>>> kernel_iterator = iter([1, 2, 3])
|
||||
>>> output_iterator = convolve(signal_iterator, kernel_iterator)
|
||||
>>> list(kernel_iterator)
|
||||
[]
|
||||
>>> next(output_iterator)
|
||||
10
|
||||
>>> next(output_iterator)
|
||||
40
|
||||
>>> list(signal_iterator)
|
||||
[30, 40, 50]
|
||||
|
||||
>>> from fractions import Fraction
|
||||
>>> from decimal import Decimal
|
||||
|
@ -1348,6 +1400,17 @@ The following recipes have a more mathematical flavor:
|
|||
>>> # Test list input. Lists do not support None for the stop argument
|
||||
>>> list(iter_index(list('AABCADEAF'), 'A'))
|
||||
[0, 1, 4, 7]
|
||||
>>> # Verify that input is consumed lazily
|
||||
>>> input_iterator = iter('AABCADEAF')
|
||||
>>> output_iterator = iter_index(input_iterator, 'A')
|
||||
>>> next(output_iterator)
|
||||
0
|
||||
>>> next(output_iterator)
|
||||
1
|
||||
>>> next(output_iterator)
|
||||
4
|
||||
>>> ''.join(input_iterator)
|
||||
'DEAF'
|
||||
|
||||
>>> list(sieve(30))
|
||||
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
|
||||
|
@ -1499,6 +1562,17 @@ The following recipes have a more mathematical flavor:
|
|||
[0, 2, 4, 6, 8]
|
||||
>>> list(odds)
|
||||
[1, 3, 5, 7, 9]
|
||||
>>> # Verify that the input is consumed lazily
|
||||
>>> input_iterator = iter(range(10))
|
||||
>>> evens, odds = partition(is_odd, input_iterator)
|
||||
>>> next(odds)
|
||||
1
|
||||
>>> next(odds)
|
||||
3
|
||||
>>> next(evens)
|
||||
0
|
||||
>>> list(input_iterator)
|
||||
[4, 5, 6, 7, 8, 9]
|
||||
|
||||
>>> list(subslices('ABCD'))
|
||||
['A', 'AB', 'ABC', 'ABCD', 'B', 'BC', 'BCD', 'C', 'CD', 'D']
|
||||
|
@ -1518,6 +1592,13 @@ The following recipes have a more mathematical flavor:
|
|||
['A', 'B', 'C', 'D']
|
||||
>>> list(unique_everseen('ABBcCAD', str.casefold))
|
||||
['A', 'B', 'c', 'D']
|
||||
>>> # Verify that the input is consumed lazily
|
||||
>>> input_iterator = iter('AAAABBBCCDAABBB')
|
||||
>>> output_iterator = unique_everseen(input_iterator)
|
||||
>>> next(output_iterator)
|
||||
'A'
|
||||
>>> ''.join(input_iterator)
|
||||
'AAABBBCCDAABBB'
|
||||
|
||||
>>> list(unique_justseen('AAAABBBCCDAABBB'))
|
||||
['A', 'B', 'C', 'D', 'A', 'B']
|
||||
|
@ -1525,6 +1606,13 @@ The following recipes have a more mathematical flavor:
|
|||
['A', 'B', 'C', 'A', 'D']
|
||||
>>> list(unique_justseen('ABBcCAD', str.casefold))
|
||||
['A', 'B', 'c', 'A', 'D']
|
||||
>>> # Verify that the input is consumed lazily
|
||||
>>> input_iterator = iter('AAAABBBCCDAABBB')
|
||||
>>> output_iterator = unique_justseen(input_iterator)
|
||||
>>> next(output_iterator)
|
||||
'A'
|
||||
>>> ''.join(input_iterator)
|
||||
'AAABBBCCDAABBB'
|
||||
|
||||
>>> d = dict(a=1, b=2, c=3)
|
||||
>>> it = iter_except(d.popitem, KeyError)
|
||||
|
@ -1545,6 +1633,12 @@ The following recipes have a more mathematical flavor:
|
|||
|
||||
>>> first_true('ABC0DEF1', '9', str.isdigit)
|
||||
'0'
|
||||
>>> # Verify that inputs are consumed lazily
|
||||
>>> it = iter('ABC0DEF1')
|
||||
>>> first_true(it, predicate=str.isdigit)
|
||||
'0'
|
||||
>>> ''.join(it)
|
||||
'DEF1'
|
||||
|
||||
|
||||
.. testcode::
|
||||
|
|
|
@ -1233,6 +1233,10 @@ These can be used as types in annotations. They all support subscription using
|
|||
|
||||
.. versionadded:: 3.5.3
|
||||
|
||||
.. versionchanged:: 3.13
|
||||
|
||||
:data:`ClassVar` can now be nested in :data:`Final` and vice versa.
|
||||
|
||||
.. data:: Final
|
||||
|
||||
Special typing construct to indicate final names to type checkers.
|
||||
|
@ -1256,6 +1260,10 @@ These can be used as types in annotations. They all support subscription using
|
|||
|
||||
.. versionadded:: 3.8
|
||||
|
||||
.. versionchanged:: 3.13
|
||||
|
||||
:data:`Final` can now be nested in :data:`ClassVar` and vice versa.
|
||||
|
||||
.. data:: Required
|
||||
|
||||
Special typing construct to mark a :class:`TypedDict` key as required.
|
||||
|
|
|
@ -268,6 +268,18 @@ def modules_cleanup(oldmodules):
|
|||
sys.modules.update(oldmodules)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def isolated_modules():
|
||||
"""
|
||||
Save modules on entry and cleanup on exit.
|
||||
"""
|
||||
(saved,) = modules_setup()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
modules_cleanup(saved)
|
||||
|
||||
|
||||
def mock_register_at_fork(func):
|
||||
# bpo-30599: Mock os.register_at_fork() when importing the random module,
|
||||
# since this function doesn't allow to unregister callbacks and would leak
|
||||
|
|
|
@ -293,71 +293,6 @@ class TestIncompleteFrameAreInvisible(unittest.TestCase):
|
|||
""")
|
||||
assert_python_ok("-c", code)
|
||||
|
||||
@support.cpython_only
|
||||
@unittest.skipIf(Py_GIL_DISABLED, "test requires precise GC scheduling")
|
||||
def test_sneaky_frame_object(self):
|
||||
|
||||
def trace(frame, event, arg):
|
||||
"""
|
||||
Don't actually do anything, just force a frame object to be created.
|
||||
"""
|
||||
|
||||
def callback(phase, info):
|
||||
"""
|
||||
Yo dawg, I heard you like frames, so I'm allocating a frame while
|
||||
you're allocating a frame, so you can have a frame while you have a
|
||||
frame!
|
||||
"""
|
||||
nonlocal sneaky_frame_object
|
||||
sneaky_frame_object = sys._getframe().f_back.f_back
|
||||
# We're done here:
|
||||
gc.callbacks.remove(callback)
|
||||
|
||||
def f():
|
||||
while True:
|
||||
yield
|
||||
|
||||
old_threshold = gc.get_threshold()
|
||||
old_callbacks = gc.callbacks[:]
|
||||
old_enabled = gc.isenabled()
|
||||
old_trace = sys.gettrace()
|
||||
try:
|
||||
# Stop the GC for a second while we set things up:
|
||||
gc.disable()
|
||||
# Create a paused generator:
|
||||
g = f()
|
||||
next(g)
|
||||
# Move all objects to the oldest generation, and tell the GC to run
|
||||
# on the *very next* allocation:
|
||||
gc.collect()
|
||||
gc.set_threshold(1, 0, 0)
|
||||
sys._clear_internal_caches()
|
||||
# Okay, so here's the nightmare scenario:
|
||||
# - We're tracing the resumption of a generator, which creates a new
|
||||
# frame object.
|
||||
# - The allocation of this frame object triggers a collection
|
||||
# *before* the frame object is actually created.
|
||||
# - During the collection, we request the exact same frame object.
|
||||
# This test does it with a GC callback, but in real code it would
|
||||
# likely be a trace function, weakref callback, or finalizer.
|
||||
# - The collection finishes, and the original frame object is
|
||||
# created. We now have two frame objects fighting over ownership
|
||||
# of the same interpreter frame!
|
||||
sys.settrace(trace)
|
||||
gc.callbacks.append(callback)
|
||||
sneaky_frame_object = None
|
||||
gc.enable()
|
||||
next(g)
|
||||
# g.gi_frame should be the frame object from the callback (the
|
||||
# one that was *requested* second, but *created* first):
|
||||
self.assertIs(g.gi_frame, sneaky_frame_object)
|
||||
finally:
|
||||
gc.set_threshold(*old_threshold)
|
||||
gc.callbacks[:] = old_callbacks
|
||||
sys.settrace(old_trace)
|
||||
if old_enabled:
|
||||
gc.enable()
|
||||
|
||||
@support.cpython_only
|
||||
@threading_helper.requires_working_threading()
|
||||
def test_sneaky_frame_object_teardown(self):
|
||||
|
|
|
@ -70,7 +70,7 @@ class SiteDir:
|
|||
self.addCleanup(self.fixtures.close)
|
||||
self.site_dir = self.fixtures.enter_context(os_helper.temp_dir())
|
||||
self.fixtures.enter_context(import_helper.DirsOnSysPath(self.site_dir))
|
||||
self.fixtures.enter_context(import_helper.CleanImport())
|
||||
self.fixtures.enter_context(import_helper.isolated_modules())
|
||||
|
||||
|
||||
class ModulesFilesTests(SiteDir, unittest.TestCase):
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
Added import helper ``isolated_modules`` as ``CleanImport`` does not remove
|
||||
modules imported during the context. Use it in importlib.resources tests to
|
||||
avoid leaving ``mod`` around to impede importlib.metadata tests.
|
|
@ -125,29 +125,14 @@ list_append(PyListObject *self, PyObject *object)
|
|||
return return_value;
|
||||
}
|
||||
|
||||
PyDoc_STRVAR(py_list_extend__doc__,
|
||||
PyDoc_STRVAR(list_extend__doc__,
|
||||
"extend($self, iterable, /)\n"
|
||||
"--\n"
|
||||
"\n"
|
||||
"Extend list by appending elements from the iterable.");
|
||||
|
||||
#define PY_LIST_EXTEND_METHODDEF \
|
||||
{"extend", (PyCFunction)py_list_extend, METH_O, py_list_extend__doc__},
|
||||
|
||||
static PyObject *
|
||||
py_list_extend_impl(PyListObject *self, PyObject *iterable);
|
||||
|
||||
static PyObject *
|
||||
py_list_extend(PyListObject *self, PyObject *iterable)
|
||||
{
|
||||
PyObject *return_value = NULL;
|
||||
|
||||
Py_BEGIN_CRITICAL_SECTION2(self, iterable);
|
||||
return_value = py_list_extend_impl(self, iterable);
|
||||
Py_END_CRITICAL_SECTION2();
|
||||
|
||||
return return_value;
|
||||
}
|
||||
#define LIST_EXTEND_METHODDEF \
|
||||
{"extend", (PyCFunction)list_extend, METH_O, list_extend__doc__},
|
||||
|
||||
PyDoc_STRVAR(list_pop__doc__,
|
||||
"pop($self, index=-1, /)\n"
|
||||
|
@ -454,4 +439,4 @@ list___reversed__(PyListObject *self, PyObject *Py_UNUSED(ignored))
|
|||
{
|
||||
return list___reversed___impl(self);
|
||||
}
|
||||
/*[clinic end generated code: output=a77eda9931ec0c20 input=a9049054013a1b77]*/
|
||||
/*[clinic end generated code: output=854957a1d4a89bbd input=a9049054013a1b77]*/
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "pycore_modsupport.h" // _PyArg_NoKwnames()
|
||||
#include "pycore_object.h" // _PyObject_GC_TRACK(), _PyDebugAllocatorStats()
|
||||
#include "pycore_tuple.h" // _PyTuple_FromArray()
|
||||
#include "pycore_setobject.h" // _PySet_NextEntry()
|
||||
#include <stddef.h>
|
||||
|
||||
/*[clinic input]
|
||||
|
@ -994,26 +995,28 @@ PyList_SetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
|
|||
return list_ass_slice((PyListObject *)a, ilow, ihigh, v);
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
static int
|
||||
list_inplace_repeat_lock_held(PyListObject *self, Py_ssize_t n)
|
||||
{
|
||||
Py_ssize_t input_size = PyList_GET_SIZE(self);
|
||||
if (input_size == 0 || n == 1) {
|
||||
return Py_NewRef(self);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (n < 1) {
|
||||
list_clear(self);
|
||||
return Py_NewRef(self);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (input_size > PY_SSIZE_T_MAX / n) {
|
||||
return PyErr_NoMemory();
|
||||
PyErr_NoMemory();
|
||||
return -1;
|
||||
}
|
||||
Py_ssize_t output_size = input_size * n;
|
||||
|
||||
if (list_resize(self, output_size) < 0)
|
||||
return NULL;
|
||||
if (list_resize(self, output_size) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
PyObject **items = self->ob_item;
|
||||
for (Py_ssize_t j = 0; j < input_size; j++) {
|
||||
|
@ -1021,8 +1024,7 @@ list_inplace_repeat_lock_held(PyListObject *self, Py_ssize_t n)
|
|||
}
|
||||
_Py_memory_repeat((char *)items, sizeof(PyObject *)*output_size,
|
||||
sizeof(PyObject *)*input_size);
|
||||
|
||||
return Py_NewRef(self);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -1031,7 +1033,12 @@ list_inplace_repeat(PyObject *_self, Py_ssize_t n)
|
|||
PyObject *ret;
|
||||
PyListObject *self = (PyListObject *) _self;
|
||||
Py_BEGIN_CRITICAL_SECTION(self);
|
||||
ret = list_inplace_repeat_lock_held(self, n);
|
||||
if (list_inplace_repeat_lock_held(self, n) < 0) {
|
||||
ret = NULL;
|
||||
}
|
||||
else {
|
||||
ret = Py_NewRef(self);
|
||||
}
|
||||
Py_END_CRITICAL_SECTION();
|
||||
return ret;
|
||||
}
|
||||
|
@ -1179,7 +1186,7 @@ list_extend_fast(PyListObject *self, PyObject *iterable)
|
|||
}
|
||||
|
||||
static int
|
||||
list_extend_iter(PyListObject *self, PyObject *iterable)
|
||||
list_extend_iter_lock_held(PyListObject *self, PyObject *iterable)
|
||||
{
|
||||
PyObject *it = PyObject_GetIter(iterable);
|
||||
if (it == NULL) {
|
||||
|
@ -1253,45 +1260,78 @@ list_extend_iter(PyListObject *self, PyObject *iterable)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
list_extend_lock_held(PyListObject *self, PyObject *iterable)
|
||||
{
|
||||
PyObject *seq = PySequence_Fast(iterable, "argument must be iterable");
|
||||
if (!seq) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int res = list_extend_fast(self, seq);
|
||||
Py_DECREF(seq);
|
||||
return res;
|
||||
}
|
||||
|
||||
static int
|
||||
list_extend(PyListObject *self, PyObject *iterable)
|
||||
list_extend_set(PyListObject *self, PySetObject *other)
|
||||
{
|
||||
// Special cases:
|
||||
// 1) lists and tuples which can use PySequence_Fast ops
|
||||
// 2) extending self to self requires making a copy first
|
||||
if (PyList_CheckExact(iterable)
|
||||
|| PyTuple_CheckExact(iterable)
|
||||
|| (PyObject *)self == iterable)
|
||||
{
|
||||
iterable = PySequence_Fast(iterable, "argument must be iterable");
|
||||
if (!iterable) {
|
||||
return -1;
|
||||
}
|
||||
Py_ssize_t m = Py_SIZE(self);
|
||||
Py_ssize_t n = PySet_GET_SIZE(other);
|
||||
if (list_resize(self, m + n) < 0) {
|
||||
return -1;
|
||||
}
|
||||
/* populate the end of self with iterable's items */
|
||||
Py_ssize_t setpos = 0;
|
||||
Py_hash_t hash;
|
||||
PyObject *key;
|
||||
PyObject **dest = self->ob_item + m;
|
||||
while (_PySet_NextEntry((PyObject *)other, &setpos, &key, &hash)) {
|
||||
Py_INCREF(key);
|
||||
*dest = key;
|
||||
dest++;
|
||||
}
|
||||
Py_SET_SIZE(self, m + n);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int res = list_extend_fast(self, iterable);
|
||||
Py_DECREF(iterable);
|
||||
return res;
|
||||
static int
|
||||
_list_extend(PyListObject *self, PyObject *iterable)
|
||||
{
|
||||
// Special case:
|
||||
// lists and tuples which can use PySequence_Fast ops
|
||||
// TODO(@corona10): Add more special cases for other types.
|
||||
int res = -1;
|
||||
if ((PyObject *)self == iterable) {
|
||||
Py_BEGIN_CRITICAL_SECTION(self);
|
||||
res = list_inplace_repeat_lock_held(self, 2);
|
||||
Py_END_CRITICAL_SECTION();
|
||||
}
|
||||
else if (PyList_CheckExact(iterable)) {
|
||||
Py_BEGIN_CRITICAL_SECTION2(self, iterable);
|
||||
res = list_extend_lock_held(self, iterable);
|
||||
Py_END_CRITICAL_SECTION2();
|
||||
}
|
||||
else if (PyTuple_CheckExact(iterable)) {
|
||||
Py_BEGIN_CRITICAL_SECTION(self);
|
||||
res = list_extend_lock_held(self, iterable);
|
||||
Py_END_CRITICAL_SECTION();
|
||||
}
|
||||
else if (PyAnySet_CheckExact(iterable)) {
|
||||
Py_BEGIN_CRITICAL_SECTION2(self, iterable);
|
||||
res = list_extend_set(self, (PySetObject *)iterable);
|
||||
Py_END_CRITICAL_SECTION2();
|
||||
}
|
||||
else {
|
||||
return list_extend_iter(self, iterable);
|
||||
Py_BEGIN_CRITICAL_SECTION(self);
|
||||
res = list_extend_iter_lock_held(self, iterable);
|
||||
Py_END_CRITICAL_SECTION();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
PyObject *
|
||||
_PyList_Extend(PyListObject *self, PyObject *iterable)
|
||||
{
|
||||
if (list_extend(self, iterable) < 0) {
|
||||
return NULL;
|
||||
}
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
|
||||
/*[clinic input]
|
||||
@critical_section self iterable
|
||||
list.extend as py_list_extend
|
||||
list.extend as list_extend
|
||||
|
||||
iterable: object
|
||||
/
|
||||
|
@ -1300,12 +1340,20 @@ Extend list by appending elements from the iterable.
|
|||
[clinic start generated code]*/
|
||||
|
||||
static PyObject *
|
||||
py_list_extend_impl(PyListObject *self, PyObject *iterable)
|
||||
/*[clinic end generated code: output=a2f115ceace2c845 input=1d42175414e1a5f3]*/
|
||||
list_extend(PyListObject *self, PyObject *iterable)
|
||||
/*[clinic end generated code: output=630fb3bca0c8e789 input=979da7597a515791]*/
|
||||
{
|
||||
return _PyList_Extend(self, iterable);
|
||||
if (_list_extend(self, iterable) < 0) {
|
||||
return NULL;
|
||||
}
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_PyList_Extend(PyListObject *self, PyObject *iterable)
|
||||
{
|
||||
return list_extend(self, iterable);
|
||||
}
|
||||
|
||||
int
|
||||
PyList_Extend(PyObject *self, PyObject *iterable)
|
||||
|
@ -1314,7 +1362,7 @@ PyList_Extend(PyObject *self, PyObject *iterable)
|
|||
PyErr_BadInternalCall();
|
||||
return -1;
|
||||
}
|
||||
return list_extend((PyListObject*)self, iterable);
|
||||
return _list_extend((PyListObject*)self, iterable);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1334,7 +1382,7 @@ static PyObject *
|
|||
list_inplace_concat(PyObject *_self, PyObject *other)
|
||||
{
|
||||
PyListObject *self = (PyListObject *)_self;
|
||||
if (list_extend(self, other) < 0) {
|
||||
if (_list_extend(self, other) < 0) {
|
||||
return NULL;
|
||||
}
|
||||
return Py_NewRef(self);
|
||||
|
@ -3168,7 +3216,7 @@ list___init___impl(PyListObject *self, PyObject *iterable)
|
|||
list_clear(self);
|
||||
}
|
||||
if (iterable != NULL) {
|
||||
if (list_extend(self, iterable) < 0) {
|
||||
if (_list_extend(self, iterable) < 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -3229,7 +3277,7 @@ static PyMethodDef list_methods[] = {
|
|||
LIST_COPY_METHODDEF
|
||||
LIST_APPEND_METHODDEF
|
||||
LIST_INSERT_METHODDEF
|
||||
PY_LIST_EXTEND_METHODDEF
|
||||
LIST_EXTEND_METHODDEF
|
||||
LIST_POP_METHODDEF
|
||||
LIST_REMOVE_METHODDEF
|
||||
LIST_INDEX_METHODDEF
|
||||
|
|
|
@ -37,24 +37,15 @@ _PyFrame_MakeAndSetFrameObject(_PyInterpreterFrame *frame)
|
|||
return NULL;
|
||||
}
|
||||
PyErr_SetRaisedException(exc);
|
||||
if (frame->frame_obj) {
|
||||
// GH-97002: How did we get into this horrible situation? Most likely,
|
||||
// allocating f triggered a GC collection, which ran some code that
|
||||
// *also* created the same frame... while we were in the middle of
|
||||
// creating it! See test_sneaky_frame_object in test_frame.py for a
|
||||
// concrete example.
|
||||
//
|
||||
// Regardless, just throw f away and use that frame instead, since it's
|
||||
// already been exposed to user code. It's actually a bit tricky to do
|
||||
// this, since we aren't backed by a real _PyInterpreterFrame anymore.
|
||||
// Just pretend that we have an owned, cleared frame so frame_dealloc
|
||||
// doesn't make the situation worse:
|
||||
f->f_frame = (_PyInterpreterFrame *)f->_f_frame_data;
|
||||
f->f_frame->owner = FRAME_CLEARED;
|
||||
f->f_frame->frame_obj = f;
|
||||
Py_DECREF(f);
|
||||
return frame->frame_obj;
|
||||
}
|
||||
|
||||
// GH-97002: There was a time when a frame object could be created when we
|
||||
// are allocating the new frame object f above, so frame->frame_obj would
|
||||
// be assigned already. That path does not exist anymore. We won't call any
|
||||
// Python code in this function and garbage collection will not run.
|
||||
// Notice that _PyFrame_New_NoTrack() can potentially raise a MemoryError,
|
||||
// but it won't allocate a traceback until the frame unwinds, so we are safe
|
||||
// here.
|
||||
assert(frame->frame_obj == NULL);
|
||||
assert(frame->owner != FRAME_OWNED_BY_FRAME_OBJECT);
|
||||
assert(frame->owner != FRAME_CLEARED);
|
||||
f->f_frame = frame;
|
||||
|
|
Loading…
Reference in New Issue