1 /* Memoryview object implementation */
2
3 #include "Python.h"
4 #include "internal/mem.h"
5 #include "internal/pystate.h"
6 #include "pystrhex.h"
7 #include <stddef.h>
8
9
10 /****************************************************************************/
11 /* ManagedBuffer Object */
12 /****************************************************************************/
13
14 /*
15 ManagedBuffer Object:
16 ---------------------
17
18 The purpose of this object is to facilitate the handling of chained
19 memoryviews that have the same underlying exporting object. PEP-3118
20 allows the underlying object to change while a view is exported. This
21 could lead to unexpected results when constructing a new memoryview
22 from an existing memoryview.
23
24 Rather than repeatedly redirecting buffer requests to the original base
25 object, all chained memoryviews use a single buffer snapshot. This
26 snapshot is generated by the constructor _PyManagedBuffer_FromObject().
27
28 Ownership rules:
29 ----------------
30
31 The master buffer inside a managed buffer is filled in by the original
32 base object. shape, strides, suboffsets and format are read-only for
33 all consumers.
34
35 A memoryview's buffer is a private copy of the exporter's buffer. shape,
36 strides and suboffsets belong to the memoryview and are thus writable.
37
38 If a memoryview itself exports several buffers via memory_getbuf(), all
39 buffer copies share shape, strides and suboffsets. In this case, the
40 arrays are NOT writable.
41
42 Reference count assumptions:
43 ----------------------------
44
45 The 'obj' member of a Py_buffer must either be NULL or refer to the
46 exporting base object. In the Python codebase, all getbufferprocs
47 return a new reference to view.obj (example: bytes_buffer_getbuffer()).
48
49 PyBuffer_Release() decrements view.obj (if non-NULL), so the
50 releasebufferprocs must NOT decrement view.obj.
51 */
52
53
54 #define CHECK_MBUF_RELEASED(mbuf) \
55 if (((_PyManagedBufferObject *)mbuf)->flags&_Py_MANAGED_BUFFER_RELEASED) { \
56 PyErr_SetString(PyExc_ValueError, \
57 "operation forbidden on released memoryview object"); \
58 return NULL; \
59 }
60
61
62 static inline _PyManagedBufferObject *
mbuf_alloc(void)63 mbuf_alloc(void)
64 {
65 _PyManagedBufferObject *mbuf;
66
67 mbuf = (_PyManagedBufferObject *)
68 PyObject_GC_New(_PyManagedBufferObject, &_PyManagedBuffer_Type);
69 if (mbuf == NULL)
70 return NULL;
71 mbuf->flags = 0;
72 mbuf->exports = 0;
73 mbuf->master.obj = NULL;
74 _PyObject_GC_TRACK(mbuf);
75
76 return mbuf;
77 }
78
79 static PyObject *
_PyManagedBuffer_FromObject(PyObject * base)80 _PyManagedBuffer_FromObject(PyObject *base)
81 {
82 _PyManagedBufferObject *mbuf;
83
84 mbuf = mbuf_alloc();
85 if (mbuf == NULL)
86 return NULL;
87
88 if (PyObject_GetBuffer(base, &mbuf->master, PyBUF_FULL_RO) < 0) {
89 mbuf->master.obj = NULL;
90 Py_DECREF(mbuf);
91 return NULL;
92 }
93
94 return (PyObject *)mbuf;
95 }
96
97 static void
mbuf_release(_PyManagedBufferObject * self)98 mbuf_release(_PyManagedBufferObject *self)
99 {
100 if (self->flags&_Py_MANAGED_BUFFER_RELEASED)
101 return;
102
103 /* NOTE: at this point self->exports can still be > 0 if this function
104 is called from mbuf_clear() to break up a reference cycle. */
105 self->flags |= _Py_MANAGED_BUFFER_RELEASED;
106
107 /* PyBuffer_Release() decrements master->obj and sets it to NULL. */
108 _PyObject_GC_UNTRACK(self);
109 PyBuffer_Release(&self->master);
110 }
111
112 static void
mbuf_dealloc(_PyManagedBufferObject * self)113 mbuf_dealloc(_PyManagedBufferObject *self)
114 {
115 assert(self->exports == 0);
116 mbuf_release(self);
117 if (self->flags&_Py_MANAGED_BUFFER_FREE_FORMAT)
118 PyMem_Free(self->master.format);
119 PyObject_GC_Del(self);
120 }
121
122 static int
mbuf_traverse(_PyManagedBufferObject * self,visitproc visit,void * arg)123 mbuf_traverse(_PyManagedBufferObject *self, visitproc visit, void *arg)
124 {
125 Py_VISIT(self->master.obj);
126 return 0;
127 }
128
129 static int
mbuf_clear(_PyManagedBufferObject * self)130 mbuf_clear(_PyManagedBufferObject *self)
131 {
132 assert(self->exports >= 0);
133 mbuf_release(self);
134 return 0;
135 }
136
137 PyTypeObject _PyManagedBuffer_Type = {
138 PyVarObject_HEAD_INIT(&PyType_Type, 0)
139 "managedbuffer",
140 sizeof(_PyManagedBufferObject),
141 0,
142 (destructor)mbuf_dealloc, /* tp_dealloc */
143 0, /* tp_print */
144 0, /* tp_getattr */
145 0, /* tp_setattr */
146 0, /* tp_reserved */
147 0, /* tp_repr */
148 0, /* tp_as_number */
149 0, /* tp_as_sequence */
150 0, /* tp_as_mapping */
151 0, /* tp_hash */
152 0, /* tp_call */
153 0, /* tp_str */
154 PyObject_GenericGetAttr, /* tp_getattro */
155 0, /* tp_setattro */
156 0, /* tp_as_buffer */
157 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
158 0, /* tp_doc */
159 (traverseproc)mbuf_traverse, /* tp_traverse */
160 (inquiry)mbuf_clear /* tp_clear */
161 };
162
163
164 /****************************************************************************/
165 /* MemoryView Object */
166 /****************************************************************************/
167
168 /* In the process of breaking reference cycles mbuf_release() can be
169 called before memory_release(). */
170 #define BASE_INACCESSIBLE(mv) \
171 (((PyMemoryViewObject *)mv)->flags&_Py_MEMORYVIEW_RELEASED || \
172 ((PyMemoryViewObject *)mv)->mbuf->flags&_Py_MANAGED_BUFFER_RELEASED)
173
174 #define CHECK_RELEASED(mv) \
175 if (BASE_INACCESSIBLE(mv)) { \
176 PyErr_SetString(PyExc_ValueError, \
177 "operation forbidden on released memoryview object"); \
178 return NULL; \
179 }
180
181 #define CHECK_RELEASED_INT(mv) \
182 if (BASE_INACCESSIBLE(mv)) { \
183 PyErr_SetString(PyExc_ValueError, \
184 "operation forbidden on released memoryview object"); \
185 return -1; \
186 }
187
188 #define CHECK_LIST_OR_TUPLE(v) \
189 if (!PyList_Check(v) && !PyTuple_Check(v)) { \
190 PyErr_SetString(PyExc_TypeError, \
191 #v " must be a list or a tuple"); \
192 return NULL; \
193 }
194
195 #define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
196
197 /* Check for the presence of suboffsets in the first dimension. */
198 #define HAVE_PTR(suboffsets, dim) (suboffsets && suboffsets[dim] >= 0)
199 /* Adjust ptr if suboffsets are present. */
200 #define ADJUST_PTR(ptr, suboffsets, dim) \
201 (HAVE_PTR(suboffsets, dim) ? *((char**)ptr) + suboffsets[dim] : ptr)
202
203 /* Memoryview buffer properties */
204 #define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
205 #define MV_F_CONTIGUOUS(flags) \
206 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_FORTRAN))
207 #define MV_ANY_CONTIGUOUS(flags) \
208 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN))
209
210 /* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
211 #define MV_CONTIGUOUS_NDIM1(view) \
212 ((view)->shape[0] == 1 || (view)->strides[0] == (view)->itemsize)
213
214 /* getbuffer() requests */
215 #define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
216 #define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
217 #define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
218 #define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
219 #define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
220 #define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
221 #define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
222 #define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
223
224
225 PyDoc_STRVAR(memory_doc,
226 "memoryview(object)\n--\n\
227 \n\
228 Create a new memoryview object which references the given object.");
229
230
231 /**************************************************************************/
232 /* Copy memoryview buffers */
233 /**************************************************************************/
234
235 /* The functions in this section take a source and a destination buffer
236 with the same logical structure: format, itemsize, ndim and shape
237 are identical, with ndim > 0.
238
239 NOTE: All buffers are assumed to have PyBUF_FULL information, which
240 is the case for memoryviews! */
241
242
243 /* Assumptions: ndim >= 1. The macro tests for a corner case that should
244 perhaps be explicitly forbidden in the PEP. */
245 #define HAVE_SUBOFFSETS_IN_LAST_DIM(view) \
246 (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0)
247
248 static inline int
last_dim_is_contiguous(const Py_buffer * dest,const Py_buffer * src)249 last_dim_is_contiguous(const Py_buffer *dest, const Py_buffer *src)
250 {
251 assert(dest->ndim > 0 && src->ndim > 0);
252 return (!HAVE_SUBOFFSETS_IN_LAST_DIM(dest) &&
253 !HAVE_SUBOFFSETS_IN_LAST_DIM(src) &&
254 dest->strides[dest->ndim-1] == dest->itemsize &&
255 src->strides[src->ndim-1] == src->itemsize);
256 }
257
258 /* This is not a general function for determining format equivalence.
259 It is used in copy_single() and copy_buffer() to weed out non-matching
260 formats. Skipping the '@' character is specifically used in slice
261 assignments, where the lvalue is already known to have a single character
262 format. This is a performance hack that could be rewritten (if properly
263 benchmarked). */
264 static inline int
equiv_format(const Py_buffer * dest,const Py_buffer * src)265 equiv_format(const Py_buffer *dest, const Py_buffer *src)
266 {
267 const char *dfmt, *sfmt;
268
269 assert(dest->format && src->format);
270 dfmt = dest->format[0] == '@' ? dest->format+1 : dest->format;
271 sfmt = src->format[0] == '@' ? src->format+1 : src->format;
272
273 if (strcmp(dfmt, sfmt) != 0 ||
274 dest->itemsize != src->itemsize) {
275 return 0;
276 }
277
278 return 1;
279 }
280
281 /* Two shapes are equivalent if they are either equal or identical up
282 to a zero element at the same position. For example, in NumPy arrays
283 the shapes [1, 0, 5] and [1, 0, 7] are equivalent. */
284 static inline int
equiv_shape(const Py_buffer * dest,const Py_buffer * src)285 equiv_shape(const Py_buffer *dest, const Py_buffer *src)
286 {
287 int i;
288
289 if (dest->ndim != src->ndim)
290 return 0;
291
292 for (i = 0; i < dest->ndim; i++) {
293 if (dest->shape[i] != src->shape[i])
294 return 0;
295 if (dest->shape[i] == 0)
296 break;
297 }
298
299 return 1;
300 }
301
302 /* Check that the logical structure of the destination and source buffers
303 is identical. */
304 static int
equiv_structure(const Py_buffer * dest,const Py_buffer * src)305 equiv_structure(const Py_buffer *dest, const Py_buffer *src)
306 {
307 if (!equiv_format(dest, src) ||
308 !equiv_shape(dest, src)) {
309 PyErr_SetString(PyExc_ValueError,
310 "memoryview assignment: lvalue and rvalue have different "
311 "structures");
312 return 0;
313 }
314
315 return 1;
316 }
317
318 /* Base case for recursive multi-dimensional copying. Contiguous arrays are
319 copied with very little overhead. Assumptions: ndim == 1, mem == NULL or
320 sizeof(mem) == shape[0] * itemsize. */
321 static void
copy_base(const Py_ssize_t * shape,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)322 copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
323 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
324 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
325 char *mem)
326 {
327 if (mem == NULL) { /* contiguous */
328 Py_ssize_t size = shape[0] * itemsize;
329 if (dptr + size < sptr || sptr + size < dptr)
330 memcpy(dptr, sptr, size); /* no overlapping */
331 else
332 memmove(dptr, sptr, size);
333 }
334 else {
335 char *p;
336 Py_ssize_t i;
337 for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
338 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
339 memcpy(p, xsptr, itemsize);
340 }
341 for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
342 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
343 memcpy(xdptr, p, itemsize);
344 }
345 }
346
347 }
348
349 /* Recursively copy a source buffer to a destination buffer. The two buffers
350 have the same ndim, shape and itemsize. */
351 static void
copy_rec(const Py_ssize_t * shape,Py_ssize_t ndim,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)352 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
353 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
354 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
355 char *mem)
356 {
357 Py_ssize_t i;
358
359 assert(ndim >= 1);
360
361 if (ndim == 1) {
362 copy_base(shape, itemsize,
363 dptr, dstrides, dsuboffsets,
364 sptr, sstrides, ssuboffsets,
365 mem);
366 return;
367 }
368
369 for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
370 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
371 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
372
373 copy_rec(shape+1, ndim-1, itemsize,
374 xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
375 xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
376 mem);
377 }
378 }
379
380 /* Faster copying of one-dimensional arrays. */
381 static int
copy_single(Py_buffer * dest,Py_buffer * src)382 copy_single(Py_buffer *dest, Py_buffer *src)
383 {
384 char *mem = NULL;
385
386 assert(dest->ndim == 1);
387
388 if (!equiv_structure(dest, src))
389 return -1;
390
391 if (!last_dim_is_contiguous(dest, src)) {
392 mem = PyMem_Malloc(dest->shape[0] * dest->itemsize);
393 if (mem == NULL) {
394 PyErr_NoMemory();
395 return -1;
396 }
397 }
398
399 copy_base(dest->shape, dest->itemsize,
400 dest->buf, dest->strides, dest->suboffsets,
401 src->buf, src->strides, src->suboffsets,
402 mem);
403
404 if (mem)
405 PyMem_Free(mem);
406
407 return 0;
408 }
409
410 /* Recursively copy src to dest. Both buffers must have the same basic
411 structure. Copying is atomic, the function never fails with a partial
412 copy. */
413 static int
copy_buffer(Py_buffer * dest,Py_buffer * src)414 copy_buffer(Py_buffer *dest, Py_buffer *src)
415 {
416 char *mem = NULL;
417
418 assert(dest->ndim > 0);
419
420 if (!equiv_structure(dest, src))
421 return -1;
422
423 if (!last_dim_is_contiguous(dest, src)) {
424 mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
425 if (mem == NULL) {
426 PyErr_NoMemory();
427 return -1;
428 }
429 }
430
431 copy_rec(dest->shape, dest->ndim, dest->itemsize,
432 dest->buf, dest->strides, dest->suboffsets,
433 src->buf, src->strides, src->suboffsets,
434 mem);
435
436 if (mem)
437 PyMem_Free(mem);
438
439 return 0;
440 }
441
442 /* Initialize strides for a C-contiguous array. */
443 static inline void
init_strides_from_shape(Py_buffer * view)444 init_strides_from_shape(Py_buffer *view)
445 {
446 Py_ssize_t i;
447
448 assert(view->ndim > 0);
449
450 view->strides[view->ndim-1] = view->itemsize;
451 for (i = view->ndim-2; i >= 0; i--)
452 view->strides[i] = view->strides[i+1] * view->shape[i+1];
453 }
454
455 /* Initialize strides for a Fortran-contiguous array. */
456 static inline void
init_fortran_strides_from_shape(Py_buffer * view)457 init_fortran_strides_from_shape(Py_buffer *view)
458 {
459 Py_ssize_t i;
460
461 assert(view->ndim > 0);
462
463 view->strides[0] = view->itemsize;
464 for (i = 1; i < view->ndim; i++)
465 view->strides[i] = view->strides[i-1] * view->shape[i-1];
466 }
467
468 /* Copy src to a contiguous representation. order is one of 'C', 'F' (Fortran)
469 or 'A' (Any). Assumptions: src has PyBUF_FULL information, src->ndim >= 1,
470 len(mem) == src->len. */
471 static int
buffer_to_contiguous(char * mem,Py_buffer * src,char order)472 buffer_to_contiguous(char *mem, Py_buffer *src, char order)
473 {
474 Py_buffer dest;
475 Py_ssize_t *strides;
476 int ret;
477
478 assert(src->ndim >= 1);
479 assert(src->shape != NULL);
480 assert(src->strides != NULL);
481
482 strides = PyMem_Malloc(src->ndim * (sizeof *src->strides));
483 if (strides == NULL) {
484 PyErr_NoMemory();
485 return -1;
486 }
487
488 /* initialize dest */
489 dest = *src;
490 dest.buf = mem;
491 /* shape is constant and shared: the logical representation of the
492 array is unaltered. */
493
494 /* The physical representation determined by strides (and possibly
495 suboffsets) may change. */
496 dest.strides = strides;
497 if (order == 'C' || order == 'A') {
498 init_strides_from_shape(&dest);
499 }
500 else {
501 init_fortran_strides_from_shape(&dest);
502 }
503
504 dest.suboffsets = NULL;
505
506 ret = copy_buffer(&dest, src);
507
508 PyMem_Free(strides);
509 return ret;
510 }
511
512
513 /****************************************************************************/
514 /* Constructors */
515 /****************************************************************************/
516
517 /* Initialize values that are shared with the managed buffer. */
518 static inline void
init_shared_values(Py_buffer * dest,const Py_buffer * src)519 init_shared_values(Py_buffer *dest, const Py_buffer *src)
520 {
521 dest->obj = src->obj;
522 dest->buf = src->buf;
523 dest->len = src->len;
524 dest->itemsize = src->itemsize;
525 dest->readonly = src->readonly;
526 dest->format = src->format ? src->format : "B";
527 dest->internal = src->internal;
528 }
529
530 /* Copy shape and strides. Reconstruct missing values. */
531 static void
init_shape_strides(Py_buffer * dest,const Py_buffer * src)532 init_shape_strides(Py_buffer *dest, const Py_buffer *src)
533 {
534 Py_ssize_t i;
535
536 if (src->ndim == 0) {
537 dest->shape = NULL;
538 dest->strides = NULL;
539 return;
540 }
541 if (src->ndim == 1) {
542 dest->shape[0] = src->shape ? src->shape[0] : src->len / src->itemsize;
543 dest->strides[0] = src->strides ? src->strides[0] : src->itemsize;
544 return;
545 }
546
547 for (i = 0; i < src->ndim; i++)
548 dest->shape[i] = src->shape[i];
549 if (src->strides) {
550 for (i = 0; i < src->ndim; i++)
551 dest->strides[i] = src->strides[i];
552 }
553 else {
554 init_strides_from_shape(dest);
555 }
556 }
557
558 static inline void
init_suboffsets(Py_buffer * dest,const Py_buffer * src)559 init_suboffsets(Py_buffer *dest, const Py_buffer *src)
560 {
561 Py_ssize_t i;
562
563 if (src->suboffsets == NULL) {
564 dest->suboffsets = NULL;
565 return;
566 }
567 for (i = 0; i < src->ndim; i++)
568 dest->suboffsets[i] = src->suboffsets[i];
569 }
570
571 /* len = product(shape) * itemsize */
572 static inline void
init_len(Py_buffer * view)573 init_len(Py_buffer *view)
574 {
575 Py_ssize_t i, len;
576
577 len = 1;
578 for (i = 0; i < view->ndim; i++)
579 len *= view->shape[i];
580 len *= view->itemsize;
581
582 view->len = len;
583 }
584
585 /* Initialize memoryview buffer properties. */
586 static void
init_flags(PyMemoryViewObject * mv)587 init_flags(PyMemoryViewObject *mv)
588 {
589 const Py_buffer *view = &mv->view;
590 int flags = 0;
591
592 switch (view->ndim) {
593 case 0:
594 flags |= (_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|
595 _Py_MEMORYVIEW_FORTRAN);
596 break;
597 case 1:
598 if (MV_CONTIGUOUS_NDIM1(view))
599 flags |= (_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
600 break;
601 default:
602 if (PyBuffer_IsContiguous(view, 'C'))
603 flags |= _Py_MEMORYVIEW_C;
604 if (PyBuffer_IsContiguous(view, 'F'))
605 flags |= _Py_MEMORYVIEW_FORTRAN;
606 break;
607 }
608
609 if (view->suboffsets) {
610 flags |= _Py_MEMORYVIEW_PIL;
611 flags &= ~(_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
612 }
613
614 mv->flags = flags;
615 }
616
617 /* Allocate a new memoryview and perform basic initialization. New memoryviews
618 are exclusively created through the mbuf_add functions. */
619 static inline PyMemoryViewObject *
memory_alloc(int ndim)620 memory_alloc(int ndim)
621 {
622 PyMemoryViewObject *mv;
623
624 mv = (PyMemoryViewObject *)
625 PyObject_GC_NewVar(PyMemoryViewObject, &PyMemoryView_Type, 3*ndim);
626 if (mv == NULL)
627 return NULL;
628
629 mv->mbuf = NULL;
630 mv->hash = -1;
631 mv->flags = 0;
632 mv->exports = 0;
633 mv->view.ndim = ndim;
634 mv->view.shape = mv->ob_array;
635 mv->view.strides = mv->ob_array + ndim;
636 mv->view.suboffsets = mv->ob_array + 2 * ndim;
637 mv->weakreflist = NULL;
638
639 _PyObject_GC_TRACK(mv);
640 return mv;
641 }
642
643 /*
644 Return a new memoryview that is registered with mbuf. If src is NULL,
645 use mbuf->master as the underlying buffer. Otherwise, use src.
646
647 The new memoryview has full buffer information: shape and strides
648 are always present, suboffsets as needed. Arrays are copied to
649 the memoryview's ob_array field.
650 */
651 static PyObject *
mbuf_add_view(_PyManagedBufferObject * mbuf,const Py_buffer * src)652 mbuf_add_view(_PyManagedBufferObject *mbuf, const Py_buffer *src)
653 {
654 PyMemoryViewObject *mv;
655 Py_buffer *dest;
656
657 if (src == NULL)
658 src = &mbuf->master;
659
660 if (src->ndim > PyBUF_MAX_NDIM) {
661 PyErr_SetString(PyExc_ValueError,
662 "memoryview: number of dimensions must not exceed "
663 Py_STRINGIFY(PyBUF_MAX_NDIM));
664 return NULL;
665 }
666
667 mv = memory_alloc(src->ndim);
668 if (mv == NULL)
669 return NULL;
670
671 dest = &mv->view;
672 init_shared_values(dest, src);
673 init_shape_strides(dest, src);
674 init_suboffsets(dest, src);
675 init_flags(mv);
676
677 mv->mbuf = mbuf;
678 Py_INCREF(mbuf);
679 mbuf->exports++;
680
681 return (PyObject *)mv;
682 }
683
684 /* Register an incomplete view: shape, strides, suboffsets and flags still
685 need to be initialized. Use 'ndim' instead of src->ndim to determine the
686 size of the memoryview's ob_array.
687
688 Assumption: ndim <= PyBUF_MAX_NDIM. */
689 static PyObject *
mbuf_add_incomplete_view(_PyManagedBufferObject * mbuf,const Py_buffer * src,int ndim)690 mbuf_add_incomplete_view(_PyManagedBufferObject *mbuf, const Py_buffer *src,
691 int ndim)
692 {
693 PyMemoryViewObject *mv;
694 Py_buffer *dest;
695
696 if (src == NULL)
697 src = &mbuf->master;
698
699 assert(ndim <= PyBUF_MAX_NDIM);
700
701 mv = memory_alloc(ndim);
702 if (mv == NULL)
703 return NULL;
704
705 dest = &mv->view;
706 init_shared_values(dest, src);
707
708 mv->mbuf = mbuf;
709 Py_INCREF(mbuf);
710 mbuf->exports++;
711
712 return (PyObject *)mv;
713 }
714
715 /* Expose a raw memory area as a view of contiguous bytes. flags can be
716 PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes).
717 The memoryview has complete buffer information. */
718 PyObject *
PyMemoryView_FromMemory(char * mem,Py_ssize_t size,int flags)719 PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)
720 {
721 _PyManagedBufferObject *mbuf;
722 PyObject *mv;
723 int readonly;
724
725 assert(mem != NULL);
726 assert(flags == PyBUF_READ || flags == PyBUF_WRITE);
727
728 mbuf = mbuf_alloc();
729 if (mbuf == NULL)
730 return NULL;
731
732 readonly = (flags == PyBUF_WRITE) ? 0 : 1;
733 (void)PyBuffer_FillInfo(&mbuf->master, NULL, mem, size, readonly,
734 PyBUF_FULL_RO);
735
736 mv = mbuf_add_view(mbuf, NULL);
737 Py_DECREF(mbuf);
738
739 return mv;
740 }
741
742 /* Create a memoryview from a given Py_buffer. For simple byte views,
743 PyMemoryView_FromMemory() should be used instead.
744 This function is the only entry point that can create a master buffer
745 without full information. Because of this fact init_shape_strides()
746 must be able to reconstruct missing values. */
747 PyObject *
PyMemoryView_FromBuffer(Py_buffer * info)748 PyMemoryView_FromBuffer(Py_buffer *info)
749 {
750 _PyManagedBufferObject *mbuf;
751 PyObject *mv;
752
753 if (info->buf == NULL) {
754 PyErr_SetString(PyExc_ValueError,
755 "PyMemoryView_FromBuffer(): info->buf must not be NULL");
756 return NULL;
757 }
758
759 mbuf = mbuf_alloc();
760 if (mbuf == NULL)
761 return NULL;
762
763 /* info->obj is either NULL or a borrowed reference. This reference
764 should not be decremented in PyBuffer_Release(). */
765 mbuf->master = *info;
766 mbuf->master.obj = NULL;
767
768 mv = mbuf_add_view(mbuf, NULL);
769 Py_DECREF(mbuf);
770
771 return mv;
772 }
773
774 /* Create a memoryview from an object that implements the buffer protocol.
775 If the object is a memoryview, the new memoryview must be registered
776 with the same managed buffer. Otherwise, a new managed buffer is created. */
777 PyObject *
PyMemoryView_FromObject(PyObject * v)778 PyMemoryView_FromObject(PyObject *v)
779 {
780 _PyManagedBufferObject *mbuf;
781
782 if (PyMemoryView_Check(v)) {
783 PyMemoryViewObject *mv = (PyMemoryViewObject *)v;
784 CHECK_RELEASED(mv);
785 return mbuf_add_view(mv->mbuf, &mv->view);
786 }
787 else if (PyObject_CheckBuffer(v)) {
788 PyObject *ret;
789 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(v);
790 if (mbuf == NULL)
791 return NULL;
792 ret = mbuf_add_view(mbuf, NULL);
793 Py_DECREF(mbuf);
794 return ret;
795 }
796
797 PyErr_Format(PyExc_TypeError,
798 "memoryview: a bytes-like object is required, not '%.200s'",
799 Py_TYPE(v)->tp_name);
800 return NULL;
801 }
802
803 /* Copy the format string from a base object that might vanish. */
804 static int
mbuf_copy_format(_PyManagedBufferObject * mbuf,const char * fmt)805 mbuf_copy_format(_PyManagedBufferObject *mbuf, const char *fmt)
806 {
807 if (fmt != NULL) {
808 char *cp = PyMem_Malloc(strlen(fmt)+1);
809 if (cp == NULL) {
810 PyErr_NoMemory();
811 return -1;
812 }
813 mbuf->master.format = strcpy(cp, fmt);
814 mbuf->flags |= _Py_MANAGED_BUFFER_FREE_FORMAT;
815 }
816
817 return 0;
818 }
819
820 /*
821 Return a memoryview that is based on a contiguous copy of src.
822 Assumptions: src has PyBUF_FULL_RO information, src->ndim > 0.
823
824 Ownership rules:
825 1) As usual, the returned memoryview has a private copy
826 of src->shape, src->strides and src->suboffsets.
827 2) src->format is copied to the master buffer and released
828 in mbuf_dealloc(). The releasebufferproc of the bytes
829 object is NULL, so it does not matter that mbuf_release()
830 passes the altered format pointer to PyBuffer_Release().
831 */
832 static PyObject *
memory_from_contiguous_copy(Py_buffer * src,char order)833 memory_from_contiguous_copy(Py_buffer *src, char order)
834 {
835 _PyManagedBufferObject *mbuf;
836 PyMemoryViewObject *mv;
837 PyObject *bytes;
838 Py_buffer *dest;
839 int i;
840
841 assert(src->ndim > 0);
842 assert(src->shape != NULL);
843
844 bytes = PyBytes_FromStringAndSize(NULL, src->len);
845 if (bytes == NULL)
846 return NULL;
847
848 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(bytes);
849 Py_DECREF(bytes);
850 if (mbuf == NULL)
851 return NULL;
852
853 if (mbuf_copy_format(mbuf, src->format) < 0) {
854 Py_DECREF(mbuf);
855 return NULL;
856 }
857
858 mv = (PyMemoryViewObject *)mbuf_add_incomplete_view(mbuf, NULL, src->ndim);
859 Py_DECREF(mbuf);
860 if (mv == NULL)
861 return NULL;
862
863 dest = &mv->view;
864
865 /* shared values are initialized correctly except for itemsize */
866 dest->itemsize = src->itemsize;
867
868 /* shape and strides */
869 for (i = 0; i < src->ndim; i++) {
870 dest->shape[i] = src->shape[i];
871 }
872 if (order == 'C' || order == 'A') {
873 init_strides_from_shape(dest);
874 }
875 else {
876 init_fortran_strides_from_shape(dest);
877 }
878 /* suboffsets */
879 dest->suboffsets = NULL;
880
881 /* flags */
882 init_flags(mv);
883
884 if (copy_buffer(dest, src) < 0) {
885 Py_DECREF(mv);
886 return NULL;
887 }
888
889 return (PyObject *)mv;
890 }
891
892 /*
893 Return a new memoryview object based on a contiguous exporter with
894 buffertype={PyBUF_READ, PyBUF_WRITE} and order={'C', 'F'ortran, or 'A'ny}.
895 The logical structure of the input and output buffers is the same
896 (i.e. tolist(input) == tolist(output)), but the physical layout in
897 memory can be explicitly chosen.
898
899 As usual, if buffertype=PyBUF_WRITE, the exporter's buffer must be writable,
900 otherwise it may be writable or read-only.
901
902 If the exporter is already contiguous with the desired target order,
903 the memoryview will be directly based on the exporter.
904
905 Otherwise, if the buffertype is PyBUF_READ, the memoryview will be
906 based on a new bytes object. If order={'C', 'A'ny}, use 'C' order,
907 'F'ortran order otherwise.
908 */
909 PyObject *
PyMemoryView_GetContiguous(PyObject * obj,int buffertype,char order)910 PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char order)
911 {
912 PyMemoryViewObject *mv;
913 PyObject *ret;
914 Py_buffer *view;
915
916 assert(buffertype == PyBUF_READ || buffertype == PyBUF_WRITE);
917 assert(order == 'C' || order == 'F' || order == 'A');
918
919 mv = (PyMemoryViewObject *)PyMemoryView_FromObject(obj);
920 if (mv == NULL)
921 return NULL;
922
923 view = &mv->view;
924 if (buffertype == PyBUF_WRITE && view->readonly) {
925 PyErr_SetString(PyExc_BufferError,
926 "underlying buffer is not writable");
927 Py_DECREF(mv);
928 return NULL;
929 }
930
931 if (PyBuffer_IsContiguous(view, order))
932 return (PyObject *)mv;
933
934 if (buffertype == PyBUF_WRITE) {
935 PyErr_SetString(PyExc_BufferError,
936 "writable contiguous buffer requested "
937 "for a non-contiguous object.");
938 Py_DECREF(mv);
939 return NULL;
940 }
941
942 ret = memory_from_contiguous_copy(view, order);
943 Py_DECREF(mv);
944 return ret;
945 }
946
947
948 static PyObject *
memory_new(PyTypeObject * subtype,PyObject * args,PyObject * kwds)949 memory_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
950 {
951 PyObject *obj;
952 static char *kwlist[] = {"object", NULL};
953
954 if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:memoryview", kwlist,
955 &obj)) {
956 return NULL;
957 }
958
959 return PyMemoryView_FromObject(obj);
960 }
961
962
963 /****************************************************************************/
964 /* Previously in abstract.c */
965 /****************************************************************************/
966
967 typedef struct {
968 Py_buffer view;
969 Py_ssize_t array[1];
970 } Py_buffer_full;
971
972 int
PyBuffer_ToContiguous(void * buf,Py_buffer * src,Py_ssize_t len,char order)973 PyBuffer_ToContiguous(void *buf, Py_buffer *src, Py_ssize_t len, char order)
974 {
975 Py_buffer_full *fb = NULL;
976 int ret;
977
978 assert(order == 'C' || order == 'F' || order == 'A');
979
980 if (len != src->len) {
981 PyErr_SetString(PyExc_ValueError,
982 "PyBuffer_ToContiguous: len != view->len");
983 return -1;
984 }
985
986 if (PyBuffer_IsContiguous(src, order)) {
987 memcpy((char *)buf, src->buf, len);
988 return 0;
989 }
990
991 /* buffer_to_contiguous() assumes PyBUF_FULL */
992 fb = PyMem_Malloc(sizeof *fb + 3 * src->ndim * (sizeof *fb->array));
993 if (fb == NULL) {
994 PyErr_NoMemory();
995 return -1;
996 }
997 fb->view.ndim = src->ndim;
998 fb->view.shape = fb->array;
999 fb->view.strides = fb->array + src->ndim;
1000 fb->view.suboffsets = fb->array + 2 * src->ndim;
1001
1002 init_shared_values(&fb->view, src);
1003 init_shape_strides(&fb->view, src);
1004 init_suboffsets(&fb->view, src);
1005
1006 src = &fb->view;
1007
1008 ret = buffer_to_contiguous(buf, src, order);
1009 PyMem_Free(fb);
1010 return ret;
1011 }
1012
1013
1014 /****************************************************************************/
1015 /* Release/GC management */
1016 /****************************************************************************/
1017
1018 /* Inform the managed buffer that this particular memoryview will not access
1019 the underlying buffer again. If no other memoryviews are registered with
1020 the managed buffer, the underlying buffer is released instantly and
1021 marked as inaccessible for both the memoryview and the managed buffer.
1022
1023 This function fails if the memoryview itself has exported buffers. */
1024 static int
_memory_release(PyMemoryViewObject * self)1025 _memory_release(PyMemoryViewObject *self)
1026 {
1027 if (self->flags & _Py_MEMORYVIEW_RELEASED)
1028 return 0;
1029
1030 if (self->exports == 0) {
1031 self->flags |= _Py_MEMORYVIEW_RELEASED;
1032 assert(self->mbuf->exports > 0);
1033 if (--self->mbuf->exports == 0)
1034 mbuf_release(self->mbuf);
1035 return 0;
1036 }
1037 if (self->exports > 0) {
1038 PyErr_Format(PyExc_BufferError,
1039 "memoryview has %zd exported buffer%s", self->exports,
1040 self->exports==1 ? "" : "s");
1041 return -1;
1042 }
1043
1044 Py_FatalError("_memory_release(): negative export count");
1045 return -1;
1046 }
1047
1048 static PyObject *
memory_release(PyMemoryViewObject * self,PyObject * noargs)1049 memory_release(PyMemoryViewObject *self, PyObject *noargs)
1050 {
1051 if (_memory_release(self) < 0)
1052 return NULL;
1053 Py_RETURN_NONE;
1054 }
1055
1056 static void
memory_dealloc(PyMemoryViewObject * self)1057 memory_dealloc(PyMemoryViewObject *self)
1058 {
1059 assert(self->exports == 0);
1060 _PyObject_GC_UNTRACK(self);
1061 (void)_memory_release(self);
1062 Py_CLEAR(self->mbuf);
1063 if (self->weakreflist != NULL)
1064 PyObject_ClearWeakRefs((PyObject *) self);
1065 PyObject_GC_Del(self);
1066 }
1067
1068 static int
memory_traverse(PyMemoryViewObject * self,visitproc visit,void * arg)1069 memory_traverse(PyMemoryViewObject *self, visitproc visit, void *arg)
1070 {
1071 Py_VISIT(self->mbuf);
1072 return 0;
1073 }
1074
1075 static int
memory_clear(PyMemoryViewObject * self)1076 memory_clear(PyMemoryViewObject *self)
1077 {
1078 (void)_memory_release(self);
1079 Py_CLEAR(self->mbuf);
1080 return 0;
1081 }
1082
1083 static PyObject *
memory_enter(PyObject * self,PyObject * args)1084 memory_enter(PyObject *self, PyObject *args)
1085 {
1086 CHECK_RELEASED(self);
1087 Py_INCREF(self);
1088 return self;
1089 }
1090
1091 static PyObject *
memory_exit(PyObject * self,PyObject * args)1092 memory_exit(PyObject *self, PyObject *args)
1093 {
1094 return memory_release((PyMemoryViewObject *)self, NULL);
1095 }
1096
1097
1098 /****************************************************************************/
1099 /* Casting format and shape */
1100 /****************************************************************************/
1101
1102 #define IS_BYTE_FORMAT(f) (f == 'b' || f == 'B' || f == 'c')
1103
1104 static inline Py_ssize_t
get_native_fmtchar(char * result,const char * fmt)1105 get_native_fmtchar(char *result, const char *fmt)
1106 {
1107 Py_ssize_t size = -1;
1108
1109 if (fmt[0] == '@') fmt++;
1110
1111 switch (fmt[0]) {
1112 case 'c': case 'b': case 'B': size = sizeof(char); break;
1113 case 'h': case 'H': size = sizeof(short); break;
1114 case 'i': case 'I': size = sizeof(int); break;
1115 case 'l': case 'L': size = sizeof(long); break;
1116 case 'q': case 'Q': size = sizeof(long long); break;
1117 case 'n': case 'N': size = sizeof(Py_ssize_t); break;
1118 case 'f': size = sizeof(float); break;
1119 case 'd': size = sizeof(double); break;
1120 case '?': size = sizeof(_Bool); break;
1121 case 'P': size = sizeof(void *); break;
1122 }
1123
1124 if (size > 0 && fmt[1] == '\0') {
1125 *result = fmt[0];
1126 return size;
1127 }
1128
1129 return -1;
1130 }
1131
1132 static inline const char *
get_native_fmtstr(const char * fmt)1133 get_native_fmtstr(const char *fmt)
1134 {
1135 int at = 0;
1136
1137 if (fmt[0] == '@') {
1138 at = 1;
1139 fmt++;
1140 }
1141 if (fmt[0] == '\0' || fmt[1] != '\0') {
1142 return NULL;
1143 }
1144
1145 #define RETURN(s) do { return at ? "@" s : s; } while (0)
1146
1147 switch (fmt[0]) {
1148 case 'c': RETURN("c");
1149 case 'b': RETURN("b");
1150 case 'B': RETURN("B");
1151 case 'h': RETURN("h");
1152 case 'H': RETURN("H");
1153 case 'i': RETURN("i");
1154 case 'I': RETURN("I");
1155 case 'l': RETURN("l");
1156 case 'L': RETURN("L");
1157 case 'q': RETURN("q");
1158 case 'Q': RETURN("Q");
1159 case 'n': RETURN("n");
1160 case 'N': RETURN("N");
1161 case 'f': RETURN("f");
1162 case 'd': RETURN("d");
1163 case '?': RETURN("?");
1164 case 'P': RETURN("P");
1165 }
1166
1167 return NULL;
1168 }
1169
1170
1171 /* Cast a memoryview's data type to 'format'. The input array must be
1172 C-contiguous. At least one of input-format, output-format must have
1173 byte size. The output array is 1-D, with the same byte length as the
1174 input array. Thus, view->len must be a multiple of the new itemsize. */
1175 static int
cast_to_1D(PyMemoryViewObject * mv,PyObject * format)1176 cast_to_1D(PyMemoryViewObject *mv, PyObject *format)
1177 {
1178 Py_buffer *view = &mv->view;
1179 PyObject *asciifmt;
1180 char srcchar, destchar;
1181 Py_ssize_t itemsize;
1182 int ret = -1;
1183
1184 assert(view->ndim >= 1);
1185 assert(Py_SIZE(mv) == 3*view->ndim);
1186 assert(view->shape == mv->ob_array);
1187 assert(view->strides == mv->ob_array + view->ndim);
1188 assert(view->suboffsets == mv->ob_array + 2*view->ndim);
1189
1190 asciifmt = PyUnicode_AsASCIIString(format);
1191 if (asciifmt == NULL)
1192 return ret;
1193
1194 itemsize = get_native_fmtchar(&destchar, PyBytes_AS_STRING(asciifmt));
1195 if (itemsize < 0) {
1196 PyErr_SetString(PyExc_ValueError,
1197 "memoryview: destination format must be a native single "
1198 "character format prefixed with an optional '@'");
1199 goto out;
1200 }
1201
1202 if ((get_native_fmtchar(&srcchar, view->format) < 0 ||
1203 !IS_BYTE_FORMAT(srcchar)) && !IS_BYTE_FORMAT(destchar)) {
1204 PyErr_SetString(PyExc_TypeError,
1205 "memoryview: cannot cast between two non-byte formats");
1206 goto out;
1207 }
1208 if (view->len % itemsize) {
1209 PyErr_SetString(PyExc_TypeError,
1210 "memoryview: length is not a multiple of itemsize");
1211 goto out;
1212 }
1213
1214 view->format = (char *)get_native_fmtstr(PyBytes_AS_STRING(asciifmt));
1215 if (view->format == NULL) {
1216 /* NOT_REACHED: get_native_fmtchar() already validates the format. */
1217 PyErr_SetString(PyExc_RuntimeError,
1218 "memoryview: internal error");
1219 goto out;
1220 }
1221 view->itemsize = itemsize;
1222
1223 view->ndim = 1;
1224 view->shape[0] = view->len / view->itemsize;
1225 view->strides[0] = view->itemsize;
1226 view->suboffsets = NULL;
1227
1228 init_flags(mv);
1229
1230 ret = 0;
1231
1232 out:
1233 Py_DECREF(asciifmt);
1234 return ret;
1235 }
1236
1237 /* The memoryview must have space for 3*len(seq) elements. */
1238 static Py_ssize_t
copy_shape(Py_ssize_t * shape,const PyObject * seq,Py_ssize_t ndim,Py_ssize_t itemsize)1239 copy_shape(Py_ssize_t *shape, const PyObject *seq, Py_ssize_t ndim,
1240 Py_ssize_t itemsize)
1241 {
1242 Py_ssize_t x, i;
1243 Py_ssize_t len = itemsize;
1244
1245 for (i = 0; i < ndim; i++) {
1246 PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
1247 if (!PyLong_Check(tmp)) {
1248 PyErr_SetString(PyExc_TypeError,
1249 "memoryview.cast(): elements of shape must be integers");
1250 return -1;
1251 }
1252 x = PyLong_AsSsize_t(tmp);
1253 if (x == -1 && PyErr_Occurred()) {
1254 return -1;
1255 }
1256 if (x <= 0) {
1257 /* In general elements of shape may be 0, but not for casting. */
1258 PyErr_Format(PyExc_ValueError,
1259 "memoryview.cast(): elements of shape must be integers > 0");
1260 return -1;
1261 }
1262 if (x > PY_SSIZE_T_MAX / len) {
1263 PyErr_Format(PyExc_ValueError,
1264 "memoryview.cast(): product(shape) > SSIZE_MAX");
1265 return -1;
1266 }
1267 len *= x;
1268 shape[i] = x;
1269 }
1270
1271 return len;
1272 }
1273
1274 /* Cast a 1-D array to a new shape. The result array will be C-contiguous.
1275 If the result array does not have exactly the same byte length as the
1276 input array, raise ValueError. */
1277 static int
cast_to_ND(PyMemoryViewObject * mv,const PyObject * shape,int ndim)1278 cast_to_ND(PyMemoryViewObject *mv, const PyObject *shape, int ndim)
1279 {
1280 Py_buffer *view = &mv->view;
1281 Py_ssize_t len;
1282
1283 assert(view->ndim == 1); /* ndim from cast_to_1D() */
1284 assert(Py_SIZE(mv) == 3*(ndim==0?1:ndim)); /* ndim of result array */
1285 assert(view->shape == mv->ob_array);
1286 assert(view->strides == mv->ob_array + (ndim==0?1:ndim));
1287 assert(view->suboffsets == NULL);
1288
1289 view->ndim = ndim;
1290 if (view->ndim == 0) {
1291 view->shape = NULL;
1292 view->strides = NULL;
1293 len = view->itemsize;
1294 }
1295 else {
1296 len = copy_shape(view->shape, shape, ndim, view->itemsize);
1297 if (len < 0)
1298 return -1;
1299 init_strides_from_shape(view);
1300 }
1301
1302 if (view->len != len) {
1303 PyErr_SetString(PyExc_TypeError,
1304 "memoryview: product(shape) * itemsize != buffer size");
1305 return -1;
1306 }
1307
1308 init_flags(mv);
1309
1310 return 0;
1311 }
1312
1313 static int
zero_in_shape(PyMemoryViewObject * mv)1314 zero_in_shape(PyMemoryViewObject *mv)
1315 {
1316 Py_buffer *view = &mv->view;
1317 Py_ssize_t i;
1318
1319 for (i = 0; i < view->ndim; i++)
1320 if (view->shape[i] == 0)
1321 return 1;
1322
1323 return 0;
1324 }
1325
1326 /*
1327 Cast a copy of 'self' to a different view. The input view must
1328 be C-contiguous. The function always casts the input view to a
1329 1-D output according to 'format'. At least one of input-format,
1330 output-format must have byte size.
1331
1332 If 'shape' is given, the 1-D view from the previous step will
1333 be cast to a C-contiguous view with new shape and strides.
1334
1335 All casts must result in views that will have the exact byte
1336 size of the original input. Otherwise, an error is raised.
1337 */
1338 static PyObject *
memory_cast(PyMemoryViewObject * self,PyObject * args,PyObject * kwds)1339 memory_cast(PyMemoryViewObject *self, PyObject *args, PyObject *kwds)
1340 {
1341 static char *kwlist[] = {"format", "shape", NULL};
1342 PyMemoryViewObject *mv = NULL;
1343 PyObject *shape = NULL;
1344 PyObject *format;
1345 Py_ssize_t ndim = 1;
1346
1347 CHECK_RELEASED(self);
1348
1349 if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwlist,
1350 &format, &shape)) {
1351 return NULL;
1352 }
1353 if (!PyUnicode_Check(format)) {
1354 PyErr_SetString(PyExc_TypeError,
1355 "memoryview: format argument must be a string");
1356 return NULL;
1357 }
1358 if (!MV_C_CONTIGUOUS(self->flags)) {
1359 PyErr_SetString(PyExc_TypeError,
1360 "memoryview: casts are restricted to C-contiguous views");
1361 return NULL;
1362 }
1363 if ((shape || self->view.ndim != 1) && zero_in_shape(self)) {
1364 PyErr_SetString(PyExc_TypeError,
1365 "memoryview: cannot cast view with zeros in shape or strides");
1366 return NULL;
1367 }
1368 if (shape) {
1369 CHECK_LIST_OR_TUPLE(shape)
1370 ndim = PySequence_Fast_GET_SIZE(shape);
1371 if (ndim > PyBUF_MAX_NDIM) {
1372 PyErr_SetString(PyExc_ValueError,
1373 "memoryview: number of dimensions must not exceed "
1374 Py_STRINGIFY(PyBUF_MAX_NDIM));
1375 return NULL;
1376 }
1377 if (self->view.ndim != 1 && ndim != 1) {
1378 PyErr_SetString(PyExc_TypeError,
1379 "memoryview: cast must be 1D -> ND or ND -> 1D");
1380 return NULL;
1381 }
1382 }
1383
1384 mv = (PyMemoryViewObject *)
1385 mbuf_add_incomplete_view(self->mbuf, &self->view, ndim==0 ? 1 : (int)ndim);
1386 if (mv == NULL)
1387 return NULL;
1388
1389 if (cast_to_1D(mv, format) < 0)
1390 goto error;
1391 if (shape && cast_to_ND(mv, shape, (int)ndim) < 0)
1392 goto error;
1393
1394 return (PyObject *)mv;
1395
1396 error:
1397 Py_DECREF(mv);
1398 return NULL;
1399 }
1400
1401
1402 /**************************************************************************/
1403 /* getbuffer */
1404 /**************************************************************************/
1405
1406 static int
memory_getbuf(PyMemoryViewObject * self,Py_buffer * view,int flags)1407 memory_getbuf(PyMemoryViewObject *self, Py_buffer *view, int flags)
1408 {
1409 Py_buffer *base = &self->view;
1410 int baseflags = self->flags;
1411
1412 CHECK_RELEASED_INT(self);
1413
1414 /* start with complete information */
1415 *view = *base;
1416 view->obj = NULL;
1417
1418 if (REQ_WRITABLE(flags) && base->readonly) {
1419 PyErr_SetString(PyExc_BufferError,
1420 "memoryview: underlying buffer is not writable");
1421 return -1;
1422 }
1423 if (!REQ_FORMAT(flags)) {
1424 /* NULL indicates that the buffer's data type has been cast to 'B'.
1425 view->itemsize is the _previous_ itemsize. If shape is present,
1426 the equality product(shape) * itemsize = len still holds at this
1427 point. The equality calcsize(format) = itemsize does _not_ hold
1428 from here on! */
1429 view->format = NULL;
1430 }
1431
1432 if (REQ_C_CONTIGUOUS(flags) && !MV_C_CONTIGUOUS(baseflags)) {
1433 PyErr_SetString(PyExc_BufferError,
1434 "memoryview: underlying buffer is not C-contiguous");
1435 return -1;
1436 }
1437 if (REQ_F_CONTIGUOUS(flags) && !MV_F_CONTIGUOUS(baseflags)) {
1438 PyErr_SetString(PyExc_BufferError,
1439 "memoryview: underlying buffer is not Fortran contiguous");
1440 return -1;
1441 }
1442 if (REQ_ANY_CONTIGUOUS(flags) && !MV_ANY_CONTIGUOUS(baseflags)) {
1443 PyErr_SetString(PyExc_BufferError,
1444 "memoryview: underlying buffer is not contiguous");
1445 return -1;
1446 }
1447 if (!REQ_INDIRECT(flags) && (baseflags & _Py_MEMORYVIEW_PIL)) {
1448 PyErr_SetString(PyExc_BufferError,
1449 "memoryview: underlying buffer requires suboffsets");
1450 return -1;
1451 }
1452 if (!REQ_STRIDES(flags)) {
1453 if (!MV_C_CONTIGUOUS(baseflags)) {
1454 PyErr_SetString(PyExc_BufferError,
1455 "memoryview: underlying buffer is not C-contiguous");
1456 return -1;
1457 }
1458 view->strides = NULL;
1459 }
1460 if (!REQ_SHAPE(flags)) {
1461 /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
1462 so base->buf = ndbuf->data. */
1463 if (view->format != NULL) {
1464 /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
1465 not make sense. */
1466 PyErr_Format(PyExc_BufferError,
1467 "memoryview: cannot cast to unsigned bytes if the format flag "
1468 "is present");
1469 return -1;
1470 }
1471 /* product(shape) * itemsize = len and calcsize(format) = itemsize
1472 do _not_ hold from here on! */
1473 view->ndim = 1;
1474 view->shape = NULL;
1475 }
1476
1477
1478 view->obj = (PyObject *)self;
1479 Py_INCREF(view->obj);
1480 self->exports++;
1481
1482 return 0;
1483 }
1484
1485 static void
memory_releasebuf(PyMemoryViewObject * self,Py_buffer * view)1486 memory_releasebuf(PyMemoryViewObject *self, Py_buffer *view)
1487 {
1488 self->exports--;
1489 return;
1490 /* PyBuffer_Release() decrements view->obj after this function returns. */
1491 }
1492
1493 /* Buffer methods */
1494 static PyBufferProcs memory_as_buffer = {
1495 (getbufferproc)memory_getbuf, /* bf_getbuffer */
1496 (releasebufferproc)memory_releasebuf, /* bf_releasebuffer */
1497 };
1498
1499
1500 /****************************************************************************/
1501 /* Optimized pack/unpack for all native format specifiers */
1502 /****************************************************************************/
1503
1504 /*
1505 Fix exceptions:
1506 1) Include format string in the error message.
1507 2) OverflowError -> ValueError.
1508 3) The error message from PyNumber_Index() is not ideal.
1509 */
1510 static int
type_error_int(const char * fmt)1511 type_error_int(const char *fmt)
1512 {
1513 PyErr_Format(PyExc_TypeError,
1514 "memoryview: invalid type for format '%s'", fmt);
1515 return -1;
1516 }
1517
1518 static int
value_error_int(const char * fmt)1519 value_error_int(const char *fmt)
1520 {
1521 PyErr_Format(PyExc_ValueError,
1522 "memoryview: invalid value for format '%s'", fmt);
1523 return -1;
1524 }
1525
1526 static int
fix_error_int(const char * fmt)1527 fix_error_int(const char *fmt)
1528 {
1529 assert(PyErr_Occurred());
1530 if (PyErr_ExceptionMatches(PyExc_TypeError)) {
1531 PyErr_Clear();
1532 return type_error_int(fmt);
1533 }
1534 else if (PyErr_ExceptionMatches(PyExc_OverflowError) ||
1535 PyErr_ExceptionMatches(PyExc_ValueError)) {
1536 PyErr_Clear();
1537 return value_error_int(fmt);
1538 }
1539
1540 return -1;
1541 }
1542
1543 /* Accept integer objects or objects with an __index__() method. */
1544 static long
pylong_as_ld(PyObject * item)1545 pylong_as_ld(PyObject *item)
1546 {
1547 PyObject *tmp;
1548 long ld;
1549
1550 tmp = PyNumber_Index(item);
1551 if (tmp == NULL)
1552 return -1;
1553
1554 ld = PyLong_AsLong(tmp);
1555 Py_DECREF(tmp);
1556 return ld;
1557 }
1558
1559 static unsigned long
pylong_as_lu(PyObject * item)1560 pylong_as_lu(PyObject *item)
1561 {
1562 PyObject *tmp;
1563 unsigned long lu;
1564
1565 tmp = PyNumber_Index(item);
1566 if (tmp == NULL)
1567 return (unsigned long)-1;
1568
1569 lu = PyLong_AsUnsignedLong(tmp);
1570 Py_DECREF(tmp);
1571 return lu;
1572 }
1573
1574 static long long
pylong_as_lld(PyObject * item)1575 pylong_as_lld(PyObject *item)
1576 {
1577 PyObject *tmp;
1578 long long lld;
1579
1580 tmp = PyNumber_Index(item);
1581 if (tmp == NULL)
1582 return -1;
1583
1584 lld = PyLong_AsLongLong(tmp);
1585 Py_DECREF(tmp);
1586 return lld;
1587 }
1588
1589 static unsigned long long
pylong_as_llu(PyObject * item)1590 pylong_as_llu(PyObject *item)
1591 {
1592 PyObject *tmp;
1593 unsigned long long llu;
1594
1595 tmp = PyNumber_Index(item);
1596 if (tmp == NULL)
1597 return (unsigned long long)-1;
1598
1599 llu = PyLong_AsUnsignedLongLong(tmp);
1600 Py_DECREF(tmp);
1601 return llu;
1602 }
1603
1604 static Py_ssize_t
pylong_as_zd(PyObject * item)1605 pylong_as_zd(PyObject *item)
1606 {
1607 PyObject *tmp;
1608 Py_ssize_t zd;
1609
1610 tmp = PyNumber_Index(item);
1611 if (tmp == NULL)
1612 return -1;
1613
1614 zd = PyLong_AsSsize_t(tmp);
1615 Py_DECREF(tmp);
1616 return zd;
1617 }
1618
1619 static size_t
pylong_as_zu(PyObject * item)1620 pylong_as_zu(PyObject *item)
1621 {
1622 PyObject *tmp;
1623 size_t zu;
1624
1625 tmp = PyNumber_Index(item);
1626 if (tmp == NULL)
1627 return (size_t)-1;
1628
1629 zu = PyLong_AsSize_t(tmp);
1630 Py_DECREF(tmp);
1631 return zu;
1632 }
1633
1634 /* Timings with the ndarray from _testbuffer.c indicate that using the
1635 struct module is around 15x slower than the two functions below. */
1636
1637 #define UNPACK_SINGLE(dest, ptr, type) \
1638 do { \
1639 type x; \
1640 memcpy((char *)&x, ptr, sizeof x); \
1641 dest = x; \
1642 } while (0)
1643
1644 /* Unpack a single item. 'fmt' can be any native format character in struct
1645 module syntax. This function is very sensitive to small changes. With this
1646 layout gcc automatically generates a fast jump table. */
1647 static inline PyObject *
unpack_single(const char * ptr,const char * fmt)1648 unpack_single(const char *ptr, const char *fmt)
1649 {
1650 unsigned long long llu;
1651 unsigned long lu;
1652 size_t zu;
1653 long long lld;
1654 long ld;
1655 Py_ssize_t zd;
1656 double d;
1657 unsigned char uc;
1658 void *p;
1659
1660 switch (fmt[0]) {
1661
1662 /* signed integers and fast path for 'B' */
1663 case 'B': uc = *((unsigned char *)ptr); goto convert_uc;
1664 case 'b': ld = *((signed char *)ptr); goto convert_ld;
1665 case 'h': UNPACK_SINGLE(ld, ptr, short); goto convert_ld;
1666 case 'i': UNPACK_SINGLE(ld, ptr, int); goto convert_ld;
1667 case 'l': UNPACK_SINGLE(ld, ptr, long); goto convert_ld;
1668
1669 /* boolean */
1670 case '?': UNPACK_SINGLE(ld, ptr, _Bool); goto convert_bool;
1671
1672 /* unsigned integers */
1673 case 'H': UNPACK_SINGLE(lu, ptr, unsigned short); goto convert_lu;
1674 case 'I': UNPACK_SINGLE(lu, ptr, unsigned int); goto convert_lu;
1675 case 'L': UNPACK_SINGLE(lu, ptr, unsigned long); goto convert_lu;
1676
1677 /* native 64-bit */
1678 case 'q': UNPACK_SINGLE(lld, ptr, long long); goto convert_lld;
1679 case 'Q': UNPACK_SINGLE(llu, ptr, unsigned long long); goto convert_llu;
1680
1681 /* ssize_t and size_t */
1682 case 'n': UNPACK_SINGLE(zd, ptr, Py_ssize_t); goto convert_zd;
1683 case 'N': UNPACK_SINGLE(zu, ptr, size_t); goto convert_zu;
1684
1685 /* floats */
1686 case 'f': UNPACK_SINGLE(d, ptr, float); goto convert_double;
1687 case 'd': UNPACK_SINGLE(d, ptr, double); goto convert_double;
1688
1689 /* bytes object */
1690 case 'c': goto convert_bytes;
1691
1692 /* pointer */
1693 case 'P': UNPACK_SINGLE(p, ptr, void *); goto convert_pointer;
1694
1695 /* default */
1696 default: goto err_format;
1697 }
1698
1699 convert_uc:
1700 /* PyLong_FromUnsignedLong() is slower */
1701 return PyLong_FromLong(uc);
1702 convert_ld:
1703 return PyLong_FromLong(ld);
1704 convert_lu:
1705 return PyLong_FromUnsignedLong(lu);
1706 convert_lld:
1707 return PyLong_FromLongLong(lld);
1708 convert_llu:
1709 return PyLong_FromUnsignedLongLong(llu);
1710 convert_zd:
1711 return PyLong_FromSsize_t(zd);
1712 convert_zu:
1713 return PyLong_FromSize_t(zu);
1714 convert_double:
1715 return PyFloat_FromDouble(d);
1716 convert_bool:
1717 return PyBool_FromLong(ld);
1718 convert_bytes:
1719 return PyBytes_FromStringAndSize(ptr, 1);
1720 convert_pointer:
1721 return PyLong_FromVoidPtr(p);
1722 err_format:
1723 PyErr_Format(PyExc_NotImplementedError,
1724 "memoryview: format %s not supported", fmt);
1725 return NULL;
1726 }
1727
1728 #define PACK_SINGLE(ptr, src, type) \
1729 do { \
1730 type x; \
1731 x = (type)src; \
1732 memcpy(ptr, (char *)&x, sizeof x); \
1733 } while (0)
1734
1735 /* Pack a single item. 'fmt' can be any native format character in
1736 struct module syntax. */
1737 static int
pack_single(char * ptr,PyObject * item,const char * fmt)1738 pack_single(char *ptr, PyObject *item, const char *fmt)
1739 {
1740 unsigned long long llu;
1741 unsigned long lu;
1742 size_t zu;
1743 long long lld;
1744 long ld;
1745 Py_ssize_t zd;
1746 double d;
1747 void *p;
1748
1749 switch (fmt[0]) {
1750 /* signed integers */
1751 case 'b': case 'h': case 'i': case 'l':
1752 ld = pylong_as_ld(item);
1753 if (ld == -1 && PyErr_Occurred())
1754 goto err_occurred;
1755 switch (fmt[0]) {
1756 case 'b':
1757 if (ld < SCHAR_MIN || ld > SCHAR_MAX) goto err_range;
1758 *((signed char *)ptr) = (signed char)ld; break;
1759 case 'h':
1760 if (ld < SHRT_MIN || ld > SHRT_MAX) goto err_range;
1761 PACK_SINGLE(ptr, ld, short); break;
1762 case 'i':
1763 if (ld < INT_MIN || ld > INT_MAX) goto err_range;
1764 PACK_SINGLE(ptr, ld, int); break;
1765 default: /* 'l' */
1766 PACK_SINGLE(ptr, ld, long); break;
1767 }
1768 break;
1769
1770 /* unsigned integers */
1771 case 'B': case 'H': case 'I': case 'L':
1772 lu = pylong_as_lu(item);
1773 if (lu == (unsigned long)-1 && PyErr_Occurred())
1774 goto err_occurred;
1775 switch (fmt[0]) {
1776 case 'B':
1777 if (lu > UCHAR_MAX) goto err_range;
1778 *((unsigned char *)ptr) = (unsigned char)lu; break;
1779 case 'H':
1780 if (lu > USHRT_MAX) goto err_range;
1781 PACK_SINGLE(ptr, lu, unsigned short); break;
1782 case 'I':
1783 if (lu > UINT_MAX) goto err_range;
1784 PACK_SINGLE(ptr, lu, unsigned int); break;
1785 default: /* 'L' */
1786 PACK_SINGLE(ptr, lu, unsigned long); break;
1787 }
1788 break;
1789
1790 /* native 64-bit */
1791 case 'q':
1792 lld = pylong_as_lld(item);
1793 if (lld == -1 && PyErr_Occurred())
1794 goto err_occurred;
1795 PACK_SINGLE(ptr, lld, long long);
1796 break;
1797 case 'Q':
1798 llu = pylong_as_llu(item);
1799 if (llu == (unsigned long long)-1 && PyErr_Occurred())
1800 goto err_occurred;
1801 PACK_SINGLE(ptr, llu, unsigned long long);
1802 break;
1803
1804 /* ssize_t and size_t */
1805 case 'n':
1806 zd = pylong_as_zd(item);
1807 if (zd == -1 && PyErr_Occurred())
1808 goto err_occurred;
1809 PACK_SINGLE(ptr, zd, Py_ssize_t);
1810 break;
1811 case 'N':
1812 zu = pylong_as_zu(item);
1813 if (zu == (size_t)-1 && PyErr_Occurred())
1814 goto err_occurred;
1815 PACK_SINGLE(ptr, zu, size_t);
1816 break;
1817
1818 /* floats */
1819 case 'f': case 'd':
1820 d = PyFloat_AsDouble(item);
1821 if (d == -1.0 && PyErr_Occurred())
1822 goto err_occurred;
1823 if (fmt[0] == 'f') {
1824 PACK_SINGLE(ptr, d, float);
1825 }
1826 else {
1827 PACK_SINGLE(ptr, d, double);
1828 }
1829 break;
1830
1831 /* bool */
1832 case '?':
1833 ld = PyObject_IsTrue(item);
1834 if (ld < 0)
1835 return -1; /* preserve original error */
1836 PACK_SINGLE(ptr, ld, _Bool);
1837 break;
1838
1839 /* bytes object */
1840 case 'c':
1841 if (!PyBytes_Check(item))
1842 return type_error_int(fmt);
1843 if (PyBytes_GET_SIZE(item) != 1)
1844 return value_error_int(fmt);
1845 *ptr = PyBytes_AS_STRING(item)[0];
1846 break;
1847
1848 /* pointer */
1849 case 'P':
1850 p = PyLong_AsVoidPtr(item);
1851 if (p == NULL && PyErr_Occurred())
1852 goto err_occurred;
1853 PACK_SINGLE(ptr, p, void *);
1854 break;
1855
1856 /* default */
1857 default: goto err_format;
1858 }
1859
1860 return 0;
1861
1862 err_occurred:
1863 return fix_error_int(fmt);
1864 err_range:
1865 return value_error_int(fmt);
1866 err_format:
1867 PyErr_Format(PyExc_NotImplementedError,
1868 "memoryview: format %s not supported", fmt);
1869 return -1;
1870 }
1871
1872
1873 /****************************************************************************/
1874 /* unpack using the struct module */
1875 /****************************************************************************/
1876
1877 /* For reasonable performance it is necessary to cache all objects required
1878 for unpacking. An unpacker can handle the format passed to unpack_from().
1879 Invariant: All pointer fields of the struct should either be NULL or valid
1880 pointers. */
1881 struct unpacker {
1882 PyObject *unpack_from; /* Struct.unpack_from(format) */
1883 PyObject *mview; /* cached memoryview */
1884 char *item; /* buffer for mview */
1885 Py_ssize_t itemsize; /* len(item) */
1886 };
1887
1888 static struct unpacker *
unpacker_new(void)1889 unpacker_new(void)
1890 {
1891 struct unpacker *x = PyMem_Malloc(sizeof *x);
1892
1893 if (x == NULL) {
1894 PyErr_NoMemory();
1895 return NULL;
1896 }
1897
1898 x->unpack_from = NULL;
1899 x->mview = NULL;
1900 x->item = NULL;
1901 x->itemsize = 0;
1902
1903 return x;
1904 }
1905
1906 static void
unpacker_free(struct unpacker * x)1907 unpacker_free(struct unpacker *x)
1908 {
1909 if (x) {
1910 Py_XDECREF(x->unpack_from);
1911 Py_XDECREF(x->mview);
1912 PyMem_Free(x->item);
1913 PyMem_Free(x);
1914 }
1915 }
1916
1917 /* Return a new unpacker for the given format. */
1918 static struct unpacker *
struct_get_unpacker(const char * fmt,Py_ssize_t itemsize)1919 struct_get_unpacker(const char *fmt, Py_ssize_t itemsize)
1920 {
1921 PyObject *structmodule; /* XXX cache these two */
1922 PyObject *Struct = NULL; /* XXX in globals? */
1923 PyObject *structobj = NULL;
1924 PyObject *format = NULL;
1925 struct unpacker *x = NULL;
1926
1927 structmodule = PyImport_ImportModule("struct");
1928 if (structmodule == NULL)
1929 return NULL;
1930
1931 Struct = PyObject_GetAttrString(structmodule, "Struct");
1932 Py_DECREF(structmodule);
1933 if (Struct == NULL)
1934 return NULL;
1935
1936 x = unpacker_new();
1937 if (x == NULL)
1938 goto error;
1939
1940 format = PyBytes_FromString(fmt);
1941 if (format == NULL)
1942 goto error;
1943
1944 structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL);
1945 if (structobj == NULL)
1946 goto error;
1947
1948 x->unpack_from = PyObject_GetAttrString(structobj, "unpack_from");
1949 if (x->unpack_from == NULL)
1950 goto error;
1951
1952 x->item = PyMem_Malloc(itemsize);
1953 if (x->item == NULL) {
1954 PyErr_NoMemory();
1955 goto error;
1956 }
1957 x->itemsize = itemsize;
1958
1959 x->mview = PyMemoryView_FromMemory(x->item, itemsize, PyBUF_WRITE);
1960 if (x->mview == NULL)
1961 goto error;
1962
1963
1964 out:
1965 Py_XDECREF(Struct);
1966 Py_XDECREF(format);
1967 Py_XDECREF(structobj);
1968 return x;
1969
1970 error:
1971 unpacker_free(x);
1972 x = NULL;
1973 goto out;
1974 }
1975
1976 /* unpack a single item */
1977 static PyObject *
struct_unpack_single(const char * ptr,struct unpacker * x)1978 struct_unpack_single(const char *ptr, struct unpacker *x)
1979 {
1980 PyObject *v;
1981
1982 memcpy(x->item, ptr, x->itemsize);
1983 v = PyObject_CallFunctionObjArgs(x->unpack_from, x->mview, NULL);
1984 if (v == NULL)
1985 return NULL;
1986
1987 if (PyTuple_GET_SIZE(v) == 1) {
1988 PyObject *tmp = PyTuple_GET_ITEM(v, 0);
1989 Py_INCREF(tmp);
1990 Py_DECREF(v);
1991 return tmp;
1992 }
1993
1994 return v;
1995 }
1996
1997
1998 /****************************************************************************/
1999 /* Representations */
2000 /****************************************************************************/
2001
2002 /* allow explicit form of native format */
2003 static inline const char *
adjust_fmt(const Py_buffer * view)2004 adjust_fmt(const Py_buffer *view)
2005 {
2006 const char *fmt;
2007
2008 fmt = (view->format[0] == '@') ? view->format+1 : view->format;
2009 if (fmt[0] && fmt[1] == '\0')
2010 return fmt;
2011
2012 PyErr_Format(PyExc_NotImplementedError,
2013 "memoryview: unsupported format %s", view->format);
2014 return NULL;
2015 }
2016
2017 /* Base case for multi-dimensional unpacking. Assumption: ndim == 1. */
2018 static PyObject *
tolist_base(const char * ptr,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2019 tolist_base(const char *ptr, const Py_ssize_t *shape,
2020 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2021 const char *fmt)
2022 {
2023 PyObject *lst, *item;
2024 Py_ssize_t i;
2025
2026 lst = PyList_New(shape[0]);
2027 if (lst == NULL)
2028 return NULL;
2029
2030 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2031 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2032 item = unpack_single(xptr, fmt);
2033 if (item == NULL) {
2034 Py_DECREF(lst);
2035 return NULL;
2036 }
2037 PyList_SET_ITEM(lst, i, item);
2038 }
2039
2040 return lst;
2041 }
2042
2043 /* Unpack a multi-dimensional array into a nested list.
2044 Assumption: ndim >= 1. */
2045 static PyObject *
tolist_rec(const char * ptr,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2046 tolist_rec(const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
2047 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2048 const char *fmt)
2049 {
2050 PyObject *lst, *item;
2051 Py_ssize_t i;
2052
2053 assert(ndim >= 1);
2054 assert(shape != NULL);
2055 assert(strides != NULL);
2056
2057 if (ndim == 1)
2058 return tolist_base(ptr, shape, strides, suboffsets, fmt);
2059
2060 lst = PyList_New(shape[0]);
2061 if (lst == NULL)
2062 return NULL;
2063
2064 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2065 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2066 item = tolist_rec(xptr, ndim-1, shape+1,
2067 strides+1, suboffsets ? suboffsets+1 : NULL,
2068 fmt);
2069 if (item == NULL) {
2070 Py_DECREF(lst);
2071 return NULL;
2072 }
2073 PyList_SET_ITEM(lst, i, item);
2074 }
2075
2076 return lst;
2077 }
2078
2079 /* Return a list representation of the memoryview. Currently only buffers
2080 with native format strings are supported. */
2081 static PyObject *
memory_tolist(PyMemoryViewObject * mv,PyObject * noargs)2082 memory_tolist(PyMemoryViewObject *mv, PyObject *noargs)
2083 {
2084 const Py_buffer *view = &(mv->view);
2085 const char *fmt;
2086
2087 CHECK_RELEASED(mv);
2088
2089 fmt = adjust_fmt(view);
2090 if (fmt == NULL)
2091 return NULL;
2092 if (view->ndim == 0) {
2093 return unpack_single(view->buf, fmt);
2094 }
2095 else if (view->ndim == 1) {
2096 return tolist_base(view->buf, view->shape,
2097 view->strides, view->suboffsets,
2098 fmt);
2099 }
2100 else {
2101 return tolist_rec(view->buf, view->ndim, view->shape,
2102 view->strides, view->suboffsets,
2103 fmt);
2104 }
2105 }
2106
2107 static PyObject *
memory_tobytes(PyMemoryViewObject * self,PyObject * dummy)2108 memory_tobytes(PyMemoryViewObject *self, PyObject *dummy)
2109 {
2110 Py_buffer *src = VIEW_ADDR(self);
2111 PyObject *bytes = NULL;
2112
2113 CHECK_RELEASED(self);
2114
2115 if (MV_C_CONTIGUOUS(self->flags)) {
2116 return PyBytes_FromStringAndSize(src->buf, src->len);
2117 }
2118
2119 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2120 if (bytes == NULL)
2121 return NULL;
2122
2123 if (buffer_to_contiguous(PyBytes_AS_STRING(bytes), src, 'C') < 0) {
2124 Py_DECREF(bytes);
2125 return NULL;
2126 }
2127
2128 return bytes;
2129 }
2130
2131 static PyObject *
memory_hex(PyMemoryViewObject * self,PyObject * dummy)2132 memory_hex(PyMemoryViewObject *self, PyObject *dummy)
2133 {
2134 Py_buffer *src = VIEW_ADDR(self);
2135 PyObject *bytes;
2136 PyObject *ret;
2137
2138 CHECK_RELEASED(self);
2139
2140 if (MV_C_CONTIGUOUS(self->flags)) {
2141 return _Py_strhex(src->buf, src->len);
2142 }
2143
2144 bytes = memory_tobytes(self, dummy);
2145 if (bytes == NULL)
2146 return NULL;
2147
2148 ret = _Py_strhex(PyBytes_AS_STRING(bytes), PyBytes_GET_SIZE(bytes));
2149 Py_DECREF(bytes);
2150
2151 return ret;
2152 }
2153
2154 static PyObject *
memory_repr(PyMemoryViewObject * self)2155 memory_repr(PyMemoryViewObject *self)
2156 {
2157 if (self->flags & _Py_MEMORYVIEW_RELEASED)
2158 return PyUnicode_FromFormat("<released memory at %p>", self);
2159 else
2160 return PyUnicode_FromFormat("<memory at %p>", self);
2161 }
2162
2163
2164 /**************************************************************************/
2165 /* Indexing and slicing */
2166 /**************************************************************************/
2167
2168 static char *
lookup_dimension(Py_buffer * view,char * ptr,int dim,Py_ssize_t index)2169 lookup_dimension(Py_buffer *view, char *ptr, int dim, Py_ssize_t index)
2170 {
2171 Py_ssize_t nitems; /* items in the given dimension */
2172
2173 assert(view->shape);
2174 assert(view->strides);
2175
2176 nitems = view->shape[dim];
2177 if (index < 0) {
2178 index += nitems;
2179 }
2180 if (index < 0 || index >= nitems) {
2181 PyErr_Format(PyExc_IndexError,
2182 "index out of bounds on dimension %d", dim + 1);
2183 return NULL;
2184 }
2185
2186 ptr += view->strides[dim] * index;
2187
2188 ptr = ADJUST_PTR(ptr, view->suboffsets, dim);
2189
2190 return ptr;
2191 }
2192
2193 /* Get the pointer to the item at index. */
2194 static char *
ptr_from_index(Py_buffer * view,Py_ssize_t index)2195 ptr_from_index(Py_buffer *view, Py_ssize_t index)
2196 {
2197 char *ptr = (char *)view->buf;
2198 return lookup_dimension(view, ptr, 0, index);
2199 }
2200
2201 /* Get the pointer to the item at tuple. */
2202 static char *
ptr_from_tuple(Py_buffer * view,PyObject * tup)2203 ptr_from_tuple(Py_buffer *view, PyObject *tup)
2204 {
2205 char *ptr = (char *)view->buf;
2206 Py_ssize_t dim, nindices = PyTuple_GET_SIZE(tup);
2207
2208 if (nindices > view->ndim) {
2209 PyErr_Format(PyExc_TypeError,
2210 "cannot index %zd-dimension view with %zd-element tuple",
2211 view->ndim, nindices);
2212 return NULL;
2213 }
2214
2215 for (dim = 0; dim < nindices; dim++) {
2216 Py_ssize_t index;
2217 index = PyNumber_AsSsize_t(PyTuple_GET_ITEM(tup, dim),
2218 PyExc_IndexError);
2219 if (index == -1 && PyErr_Occurred())
2220 return NULL;
2221 ptr = lookup_dimension(view, ptr, (int)dim, index);
2222 if (ptr == NULL)
2223 return NULL;
2224 }
2225 return ptr;
2226 }
2227
2228 /* Return the item at index. In a one-dimensional view, this is an object
2229 with the type specified by view->format. Otherwise, the item is a sub-view.
2230 The function is used in memory_subscript() and memory_as_sequence. */
2231 static PyObject *
memory_item(PyMemoryViewObject * self,Py_ssize_t index)2232 memory_item(PyMemoryViewObject *self, Py_ssize_t index)
2233 {
2234 Py_buffer *view = &(self->view);
2235 const char *fmt;
2236
2237 CHECK_RELEASED(self);
2238
2239 fmt = adjust_fmt(view);
2240 if (fmt == NULL)
2241 return NULL;
2242
2243 if (view->ndim == 0) {
2244 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
2245 return NULL;
2246 }
2247 if (view->ndim == 1) {
2248 char *ptr = ptr_from_index(view, index);
2249 if (ptr == NULL)
2250 return NULL;
2251 return unpack_single(ptr, fmt);
2252 }
2253
2254 PyErr_SetString(PyExc_NotImplementedError,
2255 "multi-dimensional sub-views are not implemented");
2256 return NULL;
2257 }
2258
2259 /* Return the item at position *key* (a tuple of indices). */
2260 static PyObject *
memory_item_multi(PyMemoryViewObject * self,PyObject * tup)2261 memory_item_multi(PyMemoryViewObject *self, PyObject *tup)
2262 {
2263 Py_buffer *view = &(self->view);
2264 const char *fmt;
2265 Py_ssize_t nindices = PyTuple_GET_SIZE(tup);
2266 char *ptr;
2267
2268 CHECK_RELEASED(self);
2269
2270 fmt = adjust_fmt(view);
2271 if (fmt == NULL)
2272 return NULL;
2273
2274 if (nindices < view->ndim) {
2275 PyErr_SetString(PyExc_NotImplementedError,
2276 "sub-views are not implemented");
2277 return NULL;
2278 }
2279 ptr = ptr_from_tuple(view, tup);
2280 if (ptr == NULL)
2281 return NULL;
2282 return unpack_single(ptr, fmt);
2283 }
2284
2285 static inline int
init_slice(Py_buffer * base,PyObject * key,int dim)2286 init_slice(Py_buffer *base, PyObject *key, int dim)
2287 {
2288 Py_ssize_t start, stop, step, slicelength;
2289
2290 if (PySlice_Unpack(key, &start, &stop, &step) < 0) {
2291 return -1;
2292 }
2293 slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step);
2294
2295
2296 if (base->suboffsets == NULL || dim == 0) {
2297 adjust_buf:
2298 base->buf = (char *)base->buf + base->strides[dim] * start;
2299 }
2300 else {
2301 Py_ssize_t n = dim-1;
2302 while (n >= 0 && base->suboffsets[n] < 0)
2303 n--;
2304 if (n < 0)
2305 goto adjust_buf; /* all suboffsets are negative */
2306 base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
2307 }
2308 base->shape[dim] = slicelength;
2309 base->strides[dim] = base->strides[dim] * step;
2310
2311 return 0;
2312 }
2313
2314 static int
is_multislice(PyObject * key)2315 is_multislice(PyObject *key)
2316 {
2317 Py_ssize_t size, i;
2318
2319 if (!PyTuple_Check(key))
2320 return 0;
2321 size = PyTuple_GET_SIZE(key);
2322 if (size == 0)
2323 return 0;
2324
2325 for (i = 0; i < size; i++) {
2326 PyObject *x = PyTuple_GET_ITEM(key, i);
2327 if (!PySlice_Check(x))
2328 return 0;
2329 }
2330 return 1;
2331 }
2332
2333 static Py_ssize_t
is_multiindex(PyObject * key)2334 is_multiindex(PyObject *key)
2335 {
2336 Py_ssize_t size, i;
2337
2338 if (!PyTuple_Check(key))
2339 return 0;
2340 size = PyTuple_GET_SIZE(key);
2341 for (i = 0; i < size; i++) {
2342 PyObject *x = PyTuple_GET_ITEM(key, i);
2343 if (!PyIndex_Check(x))
2344 return 0;
2345 }
2346 return 1;
2347 }
2348
2349 /* mv[obj] returns an object holding the data for one element if obj
2350 fully indexes the memoryview or another memoryview object if it
2351 does not.
2352
2353 0-d memoryview objects can be referenced using mv[...] or mv[()]
2354 but not with anything else. */
2355 static PyObject *
memory_subscript(PyMemoryViewObject * self,PyObject * key)2356 memory_subscript(PyMemoryViewObject *self, PyObject *key)
2357 {
2358 Py_buffer *view;
2359 view = &(self->view);
2360
2361 CHECK_RELEASED(self);
2362
2363 if (view->ndim == 0) {
2364 if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
2365 const char *fmt = adjust_fmt(view);
2366 if (fmt == NULL)
2367 return NULL;
2368 return unpack_single(view->buf, fmt);
2369 }
2370 else if (key == Py_Ellipsis) {
2371 Py_INCREF(self);
2372 return (PyObject *)self;
2373 }
2374 else {
2375 PyErr_SetString(PyExc_TypeError,
2376 "invalid indexing of 0-dim memory");
2377 return NULL;
2378 }
2379 }
2380
2381 if (PyIndex_Check(key)) {
2382 Py_ssize_t index;
2383 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2384 if (index == -1 && PyErr_Occurred())
2385 return NULL;
2386 return memory_item(self, index);
2387 }
2388 else if (PySlice_Check(key)) {
2389 PyMemoryViewObject *sliced;
2390
2391 sliced = (PyMemoryViewObject *)mbuf_add_view(self->mbuf, view);
2392 if (sliced == NULL)
2393 return NULL;
2394
2395 if (init_slice(&sliced->view, key, 0) < 0) {
2396 Py_DECREF(sliced);
2397 return NULL;
2398 }
2399 init_len(&sliced->view);
2400 init_flags(sliced);
2401
2402 return (PyObject *)sliced;
2403 }
2404 else if (is_multiindex(key)) {
2405 return memory_item_multi(self, key);
2406 }
2407 else if (is_multislice(key)) {
2408 PyErr_SetString(PyExc_NotImplementedError,
2409 "multi-dimensional slicing is not implemented");
2410 return NULL;
2411 }
2412
2413 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2414 return NULL;
2415 }
2416
2417 static int
memory_ass_sub(PyMemoryViewObject * self,PyObject * key,PyObject * value)2418 memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
2419 {
2420 Py_buffer *view = &(self->view);
2421 Py_buffer src;
2422 const char *fmt;
2423 char *ptr;
2424
2425 CHECK_RELEASED_INT(self);
2426
2427 fmt = adjust_fmt(view);
2428 if (fmt == NULL)
2429 return -1;
2430
2431 if (view->readonly) {
2432 PyErr_SetString(PyExc_TypeError, "cannot modify read-only memory");
2433 return -1;
2434 }
2435 if (value == NULL) {
2436 PyErr_SetString(PyExc_TypeError, "cannot delete memory");
2437 return -1;
2438 }
2439 if (view->ndim == 0) {
2440 if (key == Py_Ellipsis ||
2441 (PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
2442 ptr = (char *)view->buf;
2443 return pack_single(ptr, value, fmt);
2444 }
2445 else {
2446 PyErr_SetString(PyExc_TypeError,
2447 "invalid indexing of 0-dim memory");
2448 return -1;
2449 }
2450 }
2451
2452 if (PyIndex_Check(key)) {
2453 Py_ssize_t index;
2454 if (1 < view->ndim) {
2455 PyErr_SetString(PyExc_NotImplementedError,
2456 "sub-views are not implemented");
2457 return -1;
2458 }
2459 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2460 if (index == -1 && PyErr_Occurred())
2461 return -1;
2462 ptr = ptr_from_index(view, index);
2463 if (ptr == NULL)
2464 return -1;
2465 return pack_single(ptr, value, fmt);
2466 }
2467 /* one-dimensional: fast path */
2468 if (PySlice_Check(key) && view->ndim == 1) {
2469 Py_buffer dest; /* sliced view */
2470 Py_ssize_t arrays[3];
2471 int ret = -1;
2472
2473 /* rvalue must be an exporter */
2474 if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) < 0)
2475 return ret;
2476
2477 dest = *view;
2478 dest.shape = &arrays[0]; dest.shape[0] = view->shape[0];
2479 dest.strides = &arrays[1]; dest.strides[0] = view->strides[0];
2480 if (view->suboffsets) {
2481 dest.suboffsets = &arrays[2]; dest.suboffsets[0] = view->suboffsets[0];
2482 }
2483
2484 if (init_slice(&dest, key, 0) < 0)
2485 goto end_block;
2486 dest.len = dest.shape[0] * dest.itemsize;
2487
2488 ret = copy_single(&dest, &src);
2489
2490 end_block:
2491 PyBuffer_Release(&src);
2492 return ret;
2493 }
2494 if (is_multiindex(key)) {
2495 char *ptr;
2496 if (PyTuple_GET_SIZE(key) < view->ndim) {
2497 PyErr_SetString(PyExc_NotImplementedError,
2498 "sub-views are not implemented");
2499 return -1;
2500 }
2501 ptr = ptr_from_tuple(view, key);
2502 if (ptr == NULL)
2503 return -1;
2504 return pack_single(ptr, value, fmt);
2505 }
2506 if (PySlice_Check(key) || is_multislice(key)) {
2507 /* Call memory_subscript() to produce a sliced lvalue, then copy
2508 rvalue into lvalue. This is already implemented in _testbuffer.c. */
2509 PyErr_SetString(PyExc_NotImplementedError,
2510 "memoryview slice assignments are currently restricted "
2511 "to ndim = 1");
2512 return -1;
2513 }
2514
2515 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2516 return -1;
2517 }
2518
2519 static Py_ssize_t
memory_length(PyMemoryViewObject * self)2520 memory_length(PyMemoryViewObject *self)
2521 {
2522 CHECK_RELEASED_INT(self);
2523 return self->view.ndim == 0 ? 1 : self->view.shape[0];
2524 }
2525
2526 /* As mapping */
2527 static PyMappingMethods memory_as_mapping = {
2528 (lenfunc)memory_length, /* mp_length */
2529 (binaryfunc)memory_subscript, /* mp_subscript */
2530 (objobjargproc)memory_ass_sub, /* mp_ass_subscript */
2531 };
2532
2533 /* As sequence */
2534 static PySequenceMethods memory_as_sequence = {
2535 (lenfunc)memory_length, /* sq_length */
2536 0, /* sq_concat */
2537 0, /* sq_repeat */
2538 (ssizeargfunc)memory_item, /* sq_item */
2539 };
2540
2541
2542 /**************************************************************************/
2543 /* Comparisons */
2544 /**************************************************************************/
2545
2546 #define MV_COMPARE_EX -1 /* exception */
2547 #define MV_COMPARE_NOT_IMPL -2 /* not implemented */
2548
2549 /* Translate a StructError to "not equal". Preserve other exceptions. */
2550 static int
fix_struct_error_int(void)2551 fix_struct_error_int(void)
2552 {
2553 assert(PyErr_Occurred());
2554 /* XXX Cannot get at StructError directly? */
2555 if (PyErr_ExceptionMatches(PyExc_ImportError) ||
2556 PyErr_ExceptionMatches(PyExc_MemoryError)) {
2557 return MV_COMPARE_EX;
2558 }
2559 /* StructError: invalid or unknown format -> not equal */
2560 PyErr_Clear();
2561 return 0;
2562 }
2563
2564 /* Unpack and compare single items of p and q using the struct module. */
2565 static int
struct_unpack_cmp(const char * p,const char * q,struct unpacker * unpack_p,struct unpacker * unpack_q)2566 struct_unpack_cmp(const char *p, const char *q,
2567 struct unpacker *unpack_p, struct unpacker *unpack_q)
2568 {
2569 PyObject *v, *w;
2570 int ret;
2571
2572 /* At this point any exception from the struct module should not be
2573 StructError, since both formats have been accepted already. */
2574 v = struct_unpack_single(p, unpack_p);
2575 if (v == NULL)
2576 return MV_COMPARE_EX;
2577
2578 w = struct_unpack_single(q, unpack_q);
2579 if (w == NULL) {
2580 Py_DECREF(v);
2581 return MV_COMPARE_EX;
2582 }
2583
2584 /* MV_COMPARE_EX == -1: exceptions are preserved */
2585 ret = PyObject_RichCompareBool(v, w, Py_EQ);
2586 Py_DECREF(v);
2587 Py_DECREF(w);
2588
2589 return ret;
2590 }
2591
2592 /* Unpack and compare single items of p and q. If both p and q have the same
2593 single element native format, the comparison uses a fast path (gcc creates
2594 a jump table and converts memcpy into simple assignments on x86/x64).
2595
2596 Otherwise, the comparison is delegated to the struct module, which is
2597 30-60x slower. */
2598 #define CMP_SINGLE(p, q, type) \
2599 do { \
2600 type x; \
2601 type y; \
2602 memcpy((char *)&x, p, sizeof x); \
2603 memcpy((char *)&y, q, sizeof y); \
2604 equal = (x == y); \
2605 } while (0)
2606
2607 static inline int
unpack_cmp(const char * p,const char * q,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2608 unpack_cmp(const char *p, const char *q, char fmt,
2609 struct unpacker *unpack_p, struct unpacker *unpack_q)
2610 {
2611 int equal;
2612
2613 switch (fmt) {
2614
2615 /* signed integers and fast path for 'B' */
2616 case 'B': return *((unsigned char *)p) == *((unsigned char *)q);
2617 case 'b': return *((signed char *)p) == *((signed char *)q);
2618 case 'h': CMP_SINGLE(p, q, short); return equal;
2619 case 'i': CMP_SINGLE(p, q, int); return equal;
2620 case 'l': CMP_SINGLE(p, q, long); return equal;
2621
2622 /* boolean */
2623 case '?': CMP_SINGLE(p, q, _Bool); return equal;
2624
2625 /* unsigned integers */
2626 case 'H': CMP_SINGLE(p, q, unsigned short); return equal;
2627 case 'I': CMP_SINGLE(p, q, unsigned int); return equal;
2628 case 'L': CMP_SINGLE(p, q, unsigned long); return equal;
2629
2630 /* native 64-bit */
2631 case 'q': CMP_SINGLE(p, q, long long); return equal;
2632 case 'Q': CMP_SINGLE(p, q, unsigned long long); return equal;
2633
2634 /* ssize_t and size_t */
2635 case 'n': CMP_SINGLE(p, q, Py_ssize_t); return equal;
2636 case 'N': CMP_SINGLE(p, q, size_t); return equal;
2637
2638 /* floats */
2639 /* XXX DBL_EPSILON? */
2640 case 'f': CMP_SINGLE(p, q, float); return equal;
2641 case 'd': CMP_SINGLE(p, q, double); return equal;
2642
2643 /* bytes object */
2644 case 'c': return *p == *q;
2645
2646 /* pointer */
2647 case 'P': CMP_SINGLE(p, q, void *); return equal;
2648
2649 /* use the struct module */
2650 case '_':
2651 assert(unpack_p);
2652 assert(unpack_q);
2653 return struct_unpack_cmp(p, q, unpack_p, unpack_q);
2654 }
2655
2656 /* NOT REACHED */
2657 PyErr_SetString(PyExc_RuntimeError,
2658 "memoryview: internal error in richcompare");
2659 return MV_COMPARE_EX;
2660 }
2661
2662 /* Base case for recursive array comparisons. Assumption: ndim == 1. */
2663 static int
cmp_base(const char * p,const char * q,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2664 cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
2665 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2666 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2667 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2668 {
2669 Py_ssize_t i;
2670 int equal;
2671
2672 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2673 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2674 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2675 equal = unpack_cmp(xp, xq, fmt, unpack_p, unpack_q);
2676 if (equal <= 0)
2677 return equal;
2678 }
2679
2680 return 1;
2681 }
2682
2683 /* Recursively compare two multi-dimensional arrays that have the same
2684 logical structure. Assumption: ndim >= 1. */
2685 static int
cmp_rec(const char * p,const char * q,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2686 cmp_rec(const char *p, const char *q,
2687 Py_ssize_t ndim, const Py_ssize_t *shape,
2688 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2689 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2690 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2691 {
2692 Py_ssize_t i;
2693 int equal;
2694
2695 assert(ndim >= 1);
2696 assert(shape != NULL);
2697 assert(pstrides != NULL);
2698 assert(qstrides != NULL);
2699
2700 if (ndim == 1) {
2701 return cmp_base(p, q, shape,
2702 pstrides, psuboffsets,
2703 qstrides, qsuboffsets,
2704 fmt, unpack_p, unpack_q);
2705 }
2706
2707 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2708 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2709 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2710 equal = cmp_rec(xp, xq, ndim-1, shape+1,
2711 pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
2712 qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
2713 fmt, unpack_p, unpack_q);
2714 if (equal <= 0)
2715 return equal;
2716 }
2717
2718 return 1;
2719 }
2720
2721 static PyObject *
memory_richcompare(PyObject * v,PyObject * w,int op)2722 memory_richcompare(PyObject *v, PyObject *w, int op)
2723 {
2724 PyObject *res;
2725 Py_buffer wbuf, *vv;
2726 Py_buffer *ww = NULL;
2727 struct unpacker *unpack_v = NULL;
2728 struct unpacker *unpack_w = NULL;
2729 char vfmt, wfmt;
2730 int equal = MV_COMPARE_NOT_IMPL;
2731
2732 if (op != Py_EQ && op != Py_NE)
2733 goto result; /* Py_NotImplemented */
2734
2735 assert(PyMemoryView_Check(v));
2736 if (BASE_INACCESSIBLE(v)) {
2737 equal = (v == w);
2738 goto result;
2739 }
2740 vv = VIEW_ADDR(v);
2741
2742 if (PyMemoryView_Check(w)) {
2743 if (BASE_INACCESSIBLE(w)) {
2744 equal = (v == w);
2745 goto result;
2746 }
2747 ww = VIEW_ADDR(w);
2748 }
2749 else {
2750 if (PyObject_GetBuffer(w, &wbuf, PyBUF_FULL_RO) < 0) {
2751 PyErr_Clear();
2752 goto result; /* Py_NotImplemented */
2753 }
2754 ww = &wbuf;
2755 }
2756
2757 if (!equiv_shape(vv, ww)) {
2758 PyErr_Clear();
2759 equal = 0;
2760 goto result;
2761 }
2762
2763 /* Use fast unpacking for identical primitive C type formats. */
2764 if (get_native_fmtchar(&vfmt, vv->format) < 0)
2765 vfmt = '_';
2766 if (get_native_fmtchar(&wfmt, ww->format) < 0)
2767 wfmt = '_';
2768 if (vfmt == '_' || wfmt == '_' || vfmt != wfmt) {
2769 /* Use struct module unpacking. NOTE: Even for equal format strings,
2770 memcmp() cannot be used for item comparison since it would give
2771 incorrect results in the case of NaNs or uninitialized padding
2772 bytes. */
2773 vfmt = '_';
2774 unpack_v = struct_get_unpacker(vv->format, vv->itemsize);
2775 if (unpack_v == NULL) {
2776 equal = fix_struct_error_int();
2777 goto result;
2778 }
2779 unpack_w = struct_get_unpacker(ww->format, ww->itemsize);
2780 if (unpack_w == NULL) {
2781 equal = fix_struct_error_int();
2782 goto result;
2783 }
2784 }
2785
2786 if (vv->ndim == 0) {
2787 equal = unpack_cmp(vv->buf, ww->buf,
2788 vfmt, unpack_v, unpack_w);
2789 }
2790 else if (vv->ndim == 1) {
2791 equal = cmp_base(vv->buf, ww->buf, vv->shape,
2792 vv->strides, vv->suboffsets,
2793 ww->strides, ww->suboffsets,
2794 vfmt, unpack_v, unpack_w);
2795 }
2796 else {
2797 equal = cmp_rec(vv->buf, ww->buf, vv->ndim, vv->shape,
2798 vv->strides, vv->suboffsets,
2799 ww->strides, ww->suboffsets,
2800 vfmt, unpack_v, unpack_w);
2801 }
2802
2803 result:
2804 if (equal < 0) {
2805 if (equal == MV_COMPARE_NOT_IMPL)
2806 res = Py_NotImplemented;
2807 else /* exception */
2808 res = NULL;
2809 }
2810 else if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
2811 res = Py_True;
2812 else
2813 res = Py_False;
2814
2815 if (ww == &wbuf)
2816 PyBuffer_Release(ww);
2817
2818 unpacker_free(unpack_v);
2819 unpacker_free(unpack_w);
2820
2821 Py_XINCREF(res);
2822 return res;
2823 }
2824
2825 /**************************************************************************/
2826 /* Hash */
2827 /**************************************************************************/
2828
2829 static Py_hash_t
memory_hash(PyMemoryViewObject * self)2830 memory_hash(PyMemoryViewObject *self)
2831 {
2832 if (self->hash == -1) {
2833 Py_buffer *view = &self->view;
2834 char *mem = view->buf;
2835 Py_ssize_t ret;
2836 char fmt;
2837
2838 CHECK_RELEASED_INT(self);
2839
2840 if (!view->readonly) {
2841 PyErr_SetString(PyExc_ValueError,
2842 "cannot hash writable memoryview object");
2843 return -1;
2844 }
2845 ret = get_native_fmtchar(&fmt, view->format);
2846 if (ret < 0 || !IS_BYTE_FORMAT(fmt)) {
2847 PyErr_SetString(PyExc_ValueError,
2848 "memoryview: hashing is restricted to formats 'B', 'b' or 'c'");
2849 return -1;
2850 }
2851 if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
2852 /* Keep the original error message */
2853 return -1;
2854 }
2855
2856 if (!MV_C_CONTIGUOUS(self->flags)) {
2857 mem = PyMem_Malloc(view->len);
2858 if (mem == NULL) {
2859 PyErr_NoMemory();
2860 return -1;
2861 }
2862 if (buffer_to_contiguous(mem, view, 'C') < 0) {
2863 PyMem_Free(mem);
2864 return -1;
2865 }
2866 }
2867
2868 /* Can't fail */
2869 self->hash = _Py_HashBytes(mem, view->len);
2870
2871 if (mem != view->buf)
2872 PyMem_Free(mem);
2873 }
2874
2875 return self->hash;
2876 }
2877
2878
2879 /**************************************************************************/
2880 /* getters */
2881 /**************************************************************************/
2882
2883 static PyObject *
_IntTupleFromSsizet(int len,Py_ssize_t * vals)2884 _IntTupleFromSsizet(int len, Py_ssize_t *vals)
2885 {
2886 int i;
2887 PyObject *o;
2888 PyObject *intTuple;
2889
2890 if (vals == NULL)
2891 return PyTuple_New(0);
2892
2893 intTuple = PyTuple_New(len);
2894 if (!intTuple)
2895 return NULL;
2896 for (i=0; i<len; i++) {
2897 o = PyLong_FromSsize_t(vals[i]);
2898 if (!o) {
2899 Py_DECREF(intTuple);
2900 return NULL;
2901 }
2902 PyTuple_SET_ITEM(intTuple, i, o);
2903 }
2904 return intTuple;
2905 }
2906
2907 static PyObject *
memory_obj_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2908 memory_obj_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2909 {
2910 Py_buffer *view = &self->view;
2911
2912 CHECK_RELEASED(self);
2913 if (view->obj == NULL) {
2914 Py_RETURN_NONE;
2915 }
2916 Py_INCREF(view->obj);
2917 return view->obj;
2918 }
2919
2920 static PyObject *
memory_nbytes_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2921 memory_nbytes_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2922 {
2923 CHECK_RELEASED(self);
2924 return PyLong_FromSsize_t(self->view.len);
2925 }
2926
2927 static PyObject *
memory_format_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2928 memory_format_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2929 {
2930 CHECK_RELEASED(self);
2931 return PyUnicode_FromString(self->view.format);
2932 }
2933
2934 static PyObject *
memory_itemsize_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2935 memory_itemsize_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2936 {
2937 CHECK_RELEASED(self);
2938 return PyLong_FromSsize_t(self->view.itemsize);
2939 }
2940
2941 static PyObject *
memory_shape_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2942 memory_shape_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2943 {
2944 CHECK_RELEASED(self);
2945 return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
2946 }
2947
2948 static PyObject *
memory_strides_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2949 memory_strides_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2950 {
2951 CHECK_RELEASED(self);
2952 return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
2953 }
2954
2955 static PyObject *
memory_suboffsets_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2956 memory_suboffsets_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2957 {
2958 CHECK_RELEASED(self);
2959 return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
2960 }
2961
2962 static PyObject *
memory_readonly_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2963 memory_readonly_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2964 {
2965 CHECK_RELEASED(self);
2966 return PyBool_FromLong(self->view.readonly);
2967 }
2968
2969 static PyObject *
memory_ndim_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2970 memory_ndim_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2971 {
2972 CHECK_RELEASED(self);
2973 return PyLong_FromLong(self->view.ndim);
2974 }
2975
2976 static PyObject *
memory_c_contiguous(PyMemoryViewObject * self,PyObject * dummy)2977 memory_c_contiguous(PyMemoryViewObject *self, PyObject *dummy)
2978 {
2979 CHECK_RELEASED(self);
2980 return PyBool_FromLong(MV_C_CONTIGUOUS(self->flags));
2981 }
2982
2983 static PyObject *
memory_f_contiguous(PyMemoryViewObject * self,PyObject * dummy)2984 memory_f_contiguous(PyMemoryViewObject *self, PyObject *dummy)
2985 {
2986 CHECK_RELEASED(self);
2987 return PyBool_FromLong(MV_F_CONTIGUOUS(self->flags));
2988 }
2989
2990 static PyObject *
memory_contiguous(PyMemoryViewObject * self,PyObject * dummy)2991 memory_contiguous(PyMemoryViewObject *self, PyObject *dummy)
2992 {
2993 CHECK_RELEASED(self);
2994 return PyBool_FromLong(MV_ANY_CONTIGUOUS(self->flags));
2995 }
2996
2997 PyDoc_STRVAR(memory_obj_doc,
2998 "The underlying object of the memoryview.");
2999 PyDoc_STRVAR(memory_nbytes_doc,
3000 "The amount of space in bytes that the array would use in\n"
3001 " a contiguous representation.");
3002 PyDoc_STRVAR(memory_readonly_doc,
3003 "A bool indicating whether the memory is read only.");
3004 PyDoc_STRVAR(memory_itemsize_doc,
3005 "The size in bytes of each element of the memoryview.");
3006 PyDoc_STRVAR(memory_format_doc,
3007 "A string containing the format (in struct module style)\n"
3008 " for each element in the view.");
3009 PyDoc_STRVAR(memory_ndim_doc,
3010 "An integer indicating how many dimensions of a multi-dimensional\n"
3011 " array the memory represents.");
3012 PyDoc_STRVAR(memory_shape_doc,
3013 "A tuple of ndim integers giving the shape of the memory\n"
3014 " as an N-dimensional array.");
3015 PyDoc_STRVAR(memory_strides_doc,
3016 "A tuple of ndim integers giving the size in bytes to access\n"
3017 " each element for each dimension of the array.");
3018 PyDoc_STRVAR(memory_suboffsets_doc,
3019 "A tuple of integers used internally for PIL-style arrays.");
3020 PyDoc_STRVAR(memory_c_contiguous_doc,
3021 "A bool indicating whether the memory is C contiguous.");
3022 PyDoc_STRVAR(memory_f_contiguous_doc,
3023 "A bool indicating whether the memory is Fortran contiguous.");
3024 PyDoc_STRVAR(memory_contiguous_doc,
3025 "A bool indicating whether the memory is contiguous.");
3026
3027
3028 static PyGetSetDef memory_getsetlist[] = {
3029 {"obj", (getter)memory_obj_get, NULL, memory_obj_doc},
3030 {"nbytes", (getter)memory_nbytes_get, NULL, memory_nbytes_doc},
3031 {"readonly", (getter)memory_readonly_get, NULL, memory_readonly_doc},
3032 {"itemsize", (getter)memory_itemsize_get, NULL, memory_itemsize_doc},
3033 {"format", (getter)memory_format_get, NULL, memory_format_doc},
3034 {"ndim", (getter)memory_ndim_get, NULL, memory_ndim_doc},
3035 {"shape", (getter)memory_shape_get, NULL, memory_shape_doc},
3036 {"strides", (getter)memory_strides_get, NULL, memory_strides_doc},
3037 {"suboffsets", (getter)memory_suboffsets_get, NULL, memory_suboffsets_doc},
3038 {"c_contiguous", (getter)memory_c_contiguous, NULL, memory_c_contiguous_doc},
3039 {"f_contiguous", (getter)memory_f_contiguous, NULL, memory_f_contiguous_doc},
3040 {"contiguous", (getter)memory_contiguous, NULL, memory_contiguous_doc},
3041 {NULL, NULL, NULL, NULL},
3042 };
3043
3044 PyDoc_STRVAR(memory_release_doc,
3045 "release($self, /)\n--\n\
3046 \n\
3047 Release the underlying buffer exposed by the memoryview object.");
3048 PyDoc_STRVAR(memory_tobytes_doc,
3049 "tobytes($self, /)\n--\n\
3050 \n\
3051 Return the data in the buffer as a byte string.");
3052 PyDoc_STRVAR(memory_hex_doc,
3053 "hex($self, /)\n--\n\
3054 \n\
3055 Return the data in the buffer as a string of hexadecimal numbers.");
3056 PyDoc_STRVAR(memory_tolist_doc,
3057 "tolist($self, /)\n--\n\
3058 \n\
3059 Return the data in the buffer as a list of elements.");
3060 PyDoc_STRVAR(memory_cast_doc,
3061 "cast($self, /, format, *, shape)\n--\n\
3062 \n\
3063 Cast a memoryview to a new format or shape.");
3064
3065 static PyMethodDef memory_methods[] = {
3066 {"release", (PyCFunction)memory_release, METH_NOARGS, memory_release_doc},
3067 {"tobytes", (PyCFunction)memory_tobytes, METH_NOARGS, memory_tobytes_doc},
3068 {"hex", (PyCFunction)memory_hex, METH_NOARGS, memory_hex_doc},
3069 {"tolist", (PyCFunction)memory_tolist, METH_NOARGS, memory_tolist_doc},
3070 {"cast", (PyCFunction)memory_cast, METH_VARARGS|METH_KEYWORDS, memory_cast_doc},
3071 {"__enter__", memory_enter, METH_NOARGS, NULL},
3072 {"__exit__", memory_exit, METH_VARARGS, NULL},
3073 {NULL, NULL}
3074 };
3075
3076
3077 PyTypeObject PyMemoryView_Type = {
3078 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3079 "memoryview", /* tp_name */
3080 offsetof(PyMemoryViewObject, ob_array), /* tp_basicsize */
3081 sizeof(Py_ssize_t), /* tp_itemsize */
3082 (destructor)memory_dealloc, /* tp_dealloc */
3083 0, /* tp_print */
3084 0, /* tp_getattr */
3085 0, /* tp_setattr */
3086 0, /* tp_reserved */
3087 (reprfunc)memory_repr, /* tp_repr */
3088 0, /* tp_as_number */
3089 &memory_as_sequence, /* tp_as_sequence */
3090 &memory_as_mapping, /* tp_as_mapping */
3091 (hashfunc)memory_hash, /* tp_hash */
3092 0, /* tp_call */
3093 0, /* tp_str */
3094 PyObject_GenericGetAttr, /* tp_getattro */
3095 0, /* tp_setattro */
3096 &memory_as_buffer, /* tp_as_buffer */
3097 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
3098 memory_doc, /* tp_doc */
3099 (traverseproc)memory_traverse, /* tp_traverse */
3100 (inquiry)memory_clear, /* tp_clear */
3101 memory_richcompare, /* tp_richcompare */
3102 offsetof(PyMemoryViewObject, weakreflist),/* tp_weaklistoffset */
3103 0, /* tp_iter */
3104 0, /* tp_iternext */
3105 memory_methods, /* tp_methods */
3106 0, /* tp_members */
3107 memory_getsetlist, /* tp_getset */
3108 0, /* tp_base */
3109 0, /* tp_dict */
3110 0, /* tp_descr_get */
3111 0, /* tp_descr_set */
3112 0, /* tp_dictoffset */
3113 0, /* tp_init */
3114 0, /* tp_alloc */
3115 memory_new, /* tp_new */
3116 };
3117