mirror of https://github.com/python/cpython
Issue #23632: Memoryviews now allow tuple indexing (including for multi-dimensional memoryviews).
This commit is contained in:
parent
9eb57c5fa5
commit
31084ba528
|
@ -3282,10 +3282,8 @@ copying.
|
|||
the view. The :class:`~memoryview.itemsize` attribute will give you the
|
||||
number of bytes in a single element.
|
||||
|
||||
A :class:`memoryview` supports slicing to expose its data. If
|
||||
:class:`~memoryview.format` is one of the native format specifiers
|
||||
from the :mod:`struct` module, indexing will return a single element
|
||||
with the correct type. Full slicing will result in a subview::
|
||||
A :class:`memoryview` supports slicing and indexing to expose its data.
|
||||
One-dimensional slicing will result in a subview::
|
||||
|
||||
>>> v = memoryview(b'abcefg')
|
||||
>>> v[1]
|
||||
|
@ -3297,25 +3295,29 @@ copying.
|
|||
>>> bytes(v[1:4])
|
||||
b'bce'
|
||||
|
||||
Other native formats::
|
||||
If :class:`~memoryview.format` is one of the native format specifiers
|
||||
from the :mod:`struct` module, indexing with an integer or a tuple of
|
||||
integers is also supported and returns a single *element* with
|
||||
the correct type. One-dimensional memoryviews can be indexed
|
||||
with an integer or a one-integer tuple. Multi-dimensional memoryviews
|
||||
can be indexed with tuples of exactly *ndim* integers where *ndim* is
|
||||
the number of dimensions. Zero-dimensional memoryviews can be indexed
|
||||
with the empty tuple.
|
||||
|
||||
Here is an example with a non-byte format::
|
||||
|
||||
>>> import array
|
||||
>>> a = array.array('l', [-11111111, 22222222, -33333333, 44444444])
|
||||
>>> a[0]
|
||||
>>> m = memoryview(a)
|
||||
>>> m[0]
|
||||
-11111111
|
||||
>>> a[-1]
|
||||
>>> m[-1]
|
||||
44444444
|
||||
>>> a[2:3].tolist()
|
||||
[-33333333]
|
||||
>>> a[::2].tolist()
|
||||
>>> m[::2].tolist()
|
||||
[-11111111, -33333333]
|
||||
>>> a[::-1].tolist()
|
||||
[44444444, -33333333, 22222222, -11111111]
|
||||
|
||||
.. versionadded:: 3.3
|
||||
|
||||
If the underlying object is writable, the memoryview supports slice
|
||||
assignment. Resizing is not allowed::
|
||||
If the underlying object is writable, the memoryview supports
|
||||
one-dimensional slice assignment. Resizing is not allowed::
|
||||
|
||||
>>> data = bytearray(b'abcefg')
|
||||
>>> v = memoryview(data)
|
||||
|
@ -3348,12 +3350,16 @@ copying.
|
|||
True
|
||||
|
||||
.. versionchanged:: 3.3
|
||||
One-dimensional memoryviews can now be sliced.
|
||||
One-dimensional memoryviews with formats 'B', 'b' or 'c' are now hashable.
|
||||
|
||||
.. versionchanged:: 3.4
|
||||
memoryview is now registered automatically with
|
||||
:class:`collections.abc.Sequence`
|
||||
|
||||
.. versionchanged:: 3.5
|
||||
memoryviews can now be indexed with tuple of integers.
|
||||
|
||||
:class:`memoryview` has several methods:
|
||||
|
||||
.. method:: __eq__(exporter)
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
# memoryview tests is now in this module.
|
||||
#
|
||||
|
||||
import contextlib
|
||||
import unittest
|
||||
from test import support
|
||||
from itertools import permutations, product
|
||||
|
@ -2825,6 +2826,13 @@ class TestBufferProtocol(unittest.TestCase):
|
|||
m = memoryview(ex)
|
||||
self.assertRaises(TypeError, eval, "9.0 in m", locals())
|
||||
|
||||
@contextlib.contextmanager
|
||||
def assert_out_of_bounds_error(self, dim):
|
||||
with self.assertRaises(IndexError) as cm:
|
||||
yield
|
||||
self.assertEqual(str(cm.exception),
|
||||
"index out of bounds on dimension %d" % (dim,))
|
||||
|
||||
def test_memoryview_index(self):
|
||||
|
||||
# ndim = 0
|
||||
|
@ -2851,12 +2859,31 @@ class TestBufferProtocol(unittest.TestCase):
|
|||
self.assertRaises(IndexError, m.__getitem__, -8)
|
||||
self.assertRaises(IndexError, m.__getitem__, 8)
|
||||
|
||||
# Not implemented: multidimensional sub-views
|
||||
# multi-dimensional
|
||||
ex = ndarray(list(range(12)), shape=[3,4], flags=ND_WRITABLE)
|
||||
m = memoryview(ex)
|
||||
|
||||
self.assertRaises(NotImplementedError, m.__getitem__, 0)
|
||||
self.assertRaises(NotImplementedError, m.__setitem__, 0, 9)
|
||||
self.assertEqual(m[0, 0], 0)
|
||||
self.assertEqual(m[2, 0], 8)
|
||||
self.assertEqual(m[2, 3], 11)
|
||||
self.assertEqual(m[-1, -1], 11)
|
||||
self.assertEqual(m[-3, -4], 0)
|
||||
|
||||
# out of bounds
|
||||
for index in (3, -4):
|
||||
with self.assert_out_of_bounds_error(dim=1):
|
||||
m[index, 0]
|
||||
for index in (4, -5):
|
||||
with self.assert_out_of_bounds_error(dim=2):
|
||||
m[0, index]
|
||||
self.assertRaises(IndexError, m.__getitem__, (2**64, 0))
|
||||
self.assertRaises(IndexError, m.__getitem__, (0, 2**64))
|
||||
|
||||
self.assertRaises(TypeError, m.__getitem__, (0, 0, 0))
|
||||
self.assertRaises(TypeError, m.__getitem__, (0.0, 0.0))
|
||||
|
||||
# Not implemented: multidimensional sub-views
|
||||
self.assertRaises(NotImplementedError, m.__getitem__, ())
|
||||
self.assertRaises(NotImplementedError, m.__getitem__, 0)
|
||||
|
||||
def test_memoryview_assign(self):
|
||||
|
@ -2945,10 +2972,27 @@ class TestBufferProtocol(unittest.TestCase):
|
|||
m = memoryview(ex)
|
||||
self.assertRaises(NotImplementedError, m.__setitem__, 0, 1)
|
||||
|
||||
# Not implemented: multidimensional sub-views
|
||||
# multi-dimensional
|
||||
ex = ndarray(list(range(12)), shape=[3,4], flags=ND_WRITABLE)
|
||||
m = memoryview(ex)
|
||||
m[0,1] = 42
|
||||
self.assertEqual(ex[0][1], 42)
|
||||
m[-1,-1] = 43
|
||||
self.assertEqual(ex[2][3], 43)
|
||||
# errors
|
||||
for index in (3, -4):
|
||||
with self.assert_out_of_bounds_error(dim=1):
|
||||
m[index, 0] = 0
|
||||
for index in (4, -5):
|
||||
with self.assert_out_of_bounds_error(dim=2):
|
||||
m[0, index] = 0
|
||||
self.assertRaises(IndexError, m.__setitem__, (2**64, 0), 0)
|
||||
self.assertRaises(IndexError, m.__setitem__, (0, 2**64), 0)
|
||||
|
||||
self.assertRaises(TypeError, m.__setitem__, (0, 0, 0), 0)
|
||||
self.assertRaises(TypeError, m.__setitem__, (0.0, 0.0), 0)
|
||||
|
||||
# Not implemented: multidimensional sub-views
|
||||
self.assertRaises(NotImplementedError, m.__setitem__, 0, [2, 3])
|
||||
|
||||
def test_memoryview_slice(self):
|
||||
|
@ -2961,8 +3005,8 @@ class TestBufferProtocol(unittest.TestCase):
|
|||
self.assertRaises(ValueError, m.__setitem__, slice(0,2,0),
|
||||
bytearray([1,2]))
|
||||
|
||||
# invalid slice key
|
||||
self.assertRaises(TypeError, m.__getitem__, ())
|
||||
# 0-dim slicing (identity function)
|
||||
self.assertRaises(NotImplementedError, m.__getitem__, ())
|
||||
|
||||
# multidimensional slices
|
||||
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE)
|
||||
|
|
|
@ -10,6 +10,9 @@ Release date: 2015-03-28
|
|||
Core and Builtins
|
||||
-----------------
|
||||
|
||||
- Issue #23632: Memoryviews now allow tuple indexing (including for
|
||||
multi-dimensional memoryviews).
|
||||
|
||||
- Issue #23192: Fixed generator lambdas. Patch by Bruno Cauet.
|
||||
|
||||
- Issue #23629: Fix the default __sizeof__ implementation for variable-sized
|
||||
|
|
|
@ -192,10 +192,10 @@ PyTypeObject _PyManagedBuffer_Type = {
|
|||
#define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
|
||||
|
||||
/* Check for the presence of suboffsets in the first dimension. */
|
||||
#define HAVE_PTR(suboffsets) (suboffsets && suboffsets[0] >= 0)
|
||||
#define HAVE_PTR(suboffsets, dim) (suboffsets && suboffsets[dim] >= 0)
|
||||
/* Adjust ptr if suboffsets are present. */
|
||||
#define ADJUST_PTR(ptr, suboffsets) \
|
||||
(HAVE_PTR(suboffsets) ? *((char**)ptr) + suboffsets[0] : ptr)
|
||||
#define ADJUST_PTR(ptr, suboffsets, dim) \
|
||||
(HAVE_PTR(suboffsets, dim) ? *((char**)ptr) + suboffsets[dim] : ptr)
|
||||
|
||||
/* Memoryview buffer properties */
|
||||
#define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
|
||||
|
@ -332,11 +332,11 @@ copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
|
|||
char *p;
|
||||
Py_ssize_t i;
|
||||
for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
|
||||
char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
|
||||
char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
|
||||
memcpy(p, xsptr, itemsize);
|
||||
}
|
||||
for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
|
||||
char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
|
||||
char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
|
||||
memcpy(xdptr, p, itemsize);
|
||||
}
|
||||
}
|
||||
|
@ -364,8 +364,8 @@ copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
|
|||
}
|
||||
|
||||
for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
|
||||
char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
|
||||
char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
|
||||
char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
|
||||
char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
|
||||
|
||||
copy_rec(shape+1, ndim-1, itemsize,
|
||||
xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
|
||||
|
@ -2057,7 +2057,7 @@ tolist_base(const char *ptr, const Py_ssize_t *shape,
|
|||
return NULL;
|
||||
|
||||
for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
|
||||
const char *xptr = ADJUST_PTR(ptr, suboffsets);
|
||||
const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
|
||||
item = unpack_single(xptr, fmt);
|
||||
if (item == NULL) {
|
||||
Py_DECREF(lst);
|
||||
|
@ -2091,7 +2091,7 @@ tolist_rec(const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
|
|||
return NULL;
|
||||
|
||||
for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
|
||||
const char *xptr = ADJUST_PTR(ptr, suboffsets);
|
||||
const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
|
||||
item = tolist_rec(xptr, ndim-1, shape+1,
|
||||
strides+1, suboffsets ? suboffsets+1 : NULL,
|
||||
fmt);
|
||||
|
@ -2171,33 +2171,66 @@ memory_repr(PyMemoryViewObject *self)
|
|||
/* Indexing and slicing */
|
||||
/**************************************************************************/
|
||||
|
||||
/* Get the pointer to the item at index. */
|
||||
static char *
|
||||
ptr_from_index(Py_buffer *view, Py_ssize_t index)
|
||||
lookup_dimension(Py_buffer *view, char *ptr, int dim, Py_ssize_t index)
|
||||
{
|
||||
char *ptr;
|
||||
Py_ssize_t nitems; /* items in the first dimension */
|
||||
Py_ssize_t nitems; /* items in the given dimension */
|
||||
|
||||
assert(view->shape);
|
||||
assert(view->strides);
|
||||
|
||||
nitems = view->shape[0];
|
||||
nitems = view->shape[dim];
|
||||
if (index < 0) {
|
||||
index += nitems;
|
||||
}
|
||||
if (index < 0 || index >= nitems) {
|
||||
PyErr_SetString(PyExc_IndexError, "index out of bounds");
|
||||
PyErr_Format(PyExc_IndexError,
|
||||
"index out of bounds on dimension %d", dim + 1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ptr = (char *)view->buf;
|
||||
ptr += view->strides[0] * index;
|
||||
ptr += view->strides[dim] * index;
|
||||
|
||||
ptr = ADJUST_PTR(ptr, view->suboffsets);
|
||||
ptr = ADJUST_PTR(ptr, view->suboffsets, dim);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* Get the pointer to the item at index. */
|
||||
static char *
|
||||
ptr_from_index(Py_buffer *view, Py_ssize_t index)
|
||||
{
|
||||
char *ptr = (char *)view->buf;
|
||||
return lookup_dimension(view, ptr, 0, index);
|
||||
}
|
||||
|
||||
/* Get the pointer to the item at tuple. */
|
||||
static char *
|
||||
ptr_from_tuple(Py_buffer *view, PyObject *tup)
|
||||
{
|
||||
char *ptr = (char *)view->buf;
|
||||
Py_ssize_t dim, nindices = PyTuple_GET_SIZE(tup);
|
||||
|
||||
if (nindices > view->ndim) {
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"cannot index %zd-dimension view with %zd-element tuple",
|
||||
view->ndim, nindices);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (dim = 0; dim < nindices; dim++) {
|
||||
Py_ssize_t index;
|
||||
index = PyNumber_AsSsize_t(PyTuple_GET_ITEM(tup, dim),
|
||||
PyExc_IndexError);
|
||||
if (index == -1 && PyErr_Occurred())
|
||||
return NULL;
|
||||
ptr = lookup_dimension(view, ptr, dim, index);
|
||||
if (ptr == NULL)
|
||||
return NULL;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* Return the item at index. In a one-dimensional view, this is an object
|
||||
with the type specified by view->format. Otherwise, the item is a sub-view.
|
||||
The function is used in memory_subscript() and memory_as_sequence. */
|
||||
|
@ -2229,6 +2262,32 @@ memory_item(PyMemoryViewObject *self, Py_ssize_t index)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* Return the item at position *key* (a tuple of indices). */
|
||||
static PyObject *
|
||||
memory_item_multi(PyMemoryViewObject *self, PyObject *tup)
|
||||
{
|
||||
Py_buffer *view = &(self->view);
|
||||
const char *fmt;
|
||||
Py_ssize_t nindices = PyTuple_GET_SIZE(tup);
|
||||
char *ptr;
|
||||
|
||||
CHECK_RELEASED(self);
|
||||
|
||||
fmt = adjust_fmt(view);
|
||||
if (fmt == NULL)
|
||||
return NULL;
|
||||
|
||||
if (nindices < view->ndim) {
|
||||
PyErr_SetString(PyExc_NotImplementedError,
|
||||
"sub-views are not implemented");
|
||||
return NULL;
|
||||
}
|
||||
ptr = ptr_from_tuple(view, tup);
|
||||
if (ptr == NULL)
|
||||
return NULL;
|
||||
return unpack_single(ptr, fmt);
|
||||
}
|
||||
|
||||
Py_LOCAL_INLINE(int)
|
||||
init_slice(Py_buffer *base, PyObject *key, int dim)
|
||||
{
|
||||
|
@ -2277,6 +2336,22 @@ is_multislice(PyObject *key)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static Py_ssize_t
|
||||
is_multiindex(PyObject *key)
|
||||
{
|
||||
Py_ssize_t size, i;
|
||||
|
||||
if (!PyTuple_Check(key))
|
||||
return 0;
|
||||
size = PyTuple_GET_SIZE(key);
|
||||
for (i = 0; i < size; i++) {
|
||||
PyObject *x = PyTuple_GET_ITEM(key, i);
|
||||
if (!PyIndex_Check(x))
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* mv[obj] returns an object holding the data for one element if obj
|
||||
fully indexes the memoryview or another memoryview object if it
|
||||
does not.
|
||||
|
@ -2332,6 +2407,9 @@ memory_subscript(PyMemoryViewObject *self, PyObject *key)
|
|||
|
||||
return (PyObject *)sliced;
|
||||
}
|
||||
else if (is_multiindex(key)) {
|
||||
return memory_item_multi(self, key);
|
||||
}
|
||||
else if (is_multislice(key)) {
|
||||
PyErr_SetString(PyExc_NotImplementedError,
|
||||
"multi-dimensional slicing is not implemented");
|
||||
|
@ -2376,14 +2454,15 @@ memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
if (view->ndim != 1) {
|
||||
PyErr_SetString(PyExc_NotImplementedError,
|
||||
"memoryview assignments are currently restricted to ndim = 1");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (PyIndex_Check(key)) {
|
||||
Py_ssize_t index = PyNumber_AsSsize_t(key, PyExc_IndexError);
|
||||
Py_ssize_t index;
|
||||
if (1 < view->ndim) {
|
||||
PyErr_SetString(PyExc_NotImplementedError,
|
||||
"sub-views are not implemented");
|
||||
return -1;
|
||||
}
|
||||
index = PyNumber_AsSsize_t(key, PyExc_IndexError);
|
||||
if (index == -1 && PyErr_Occurred())
|
||||
return -1;
|
||||
ptr = ptr_from_index(view, index);
|
||||
|
@ -2418,7 +2497,19 @@ memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
|
|||
PyBuffer_Release(&src);
|
||||
return ret;
|
||||
}
|
||||
else if (PySlice_Check(key) || is_multislice(key)) {
|
||||
if (is_multiindex(key)) {
|
||||
char *ptr;
|
||||
if (PyTuple_GET_SIZE(key) < view->ndim) {
|
||||
PyErr_SetString(PyExc_NotImplementedError,
|
||||
"sub-views are not implemented");
|
||||
return -1;
|
||||
}
|
||||
ptr = ptr_from_tuple(view, key);
|
||||
if (ptr == NULL)
|
||||
return -1;
|
||||
return pack_single(ptr, value, fmt);
|
||||
}
|
||||
if (PySlice_Check(key) || is_multislice(key)) {
|
||||
/* Call memory_subscript() to produce a sliced lvalue, then copy
|
||||
rvalue into lvalue. This is already implemented in _testbuffer.c. */
|
||||
PyErr_SetString(PyExc_NotImplementedError,
|
||||
|
@ -2591,8 +2682,8 @@ cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
|
|||
int equal;
|
||||
|
||||
for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
|
||||
const char *xp = ADJUST_PTR(p, psuboffsets);
|
||||
const char *xq = ADJUST_PTR(q, qsuboffsets);
|
||||
const char *xp = ADJUST_PTR(p, psuboffsets, 0);
|
||||
const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
|
||||
equal = unpack_cmp(xp, xq, fmt, unpack_p, unpack_q);
|
||||
if (equal <= 0)
|
||||
return equal;
|
||||
|
@ -2626,8 +2717,8 @@ cmp_rec(const char *p, const char *q,
|
|||
}
|
||||
|
||||
for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
|
||||
const char *xp = ADJUST_PTR(p, psuboffsets);
|
||||
const char *xq = ADJUST_PTR(q, qsuboffsets);
|
||||
const char *xp = ADJUST_PTR(p, psuboffsets, 0);
|
||||
const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
|
||||
equal = cmp_rec(xp, xq, ndim-1, shape+1,
|
||||
pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
|
||||
qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
|
||||
|
|
Loading…
Reference in New Issue