from __future__ import division, absolute_import, print_function try: # Accessing collections abstract classes from collections # has been deprecated since Python 3.3 import collections.abc as collections_abc except ImportError: import collections as collections_abc import tempfile import sys import shutil import warnings import operator import io import itertools import functools import ctypes import os import gc import weakref import pytest from contextlib import contextmanager from numpy.core.numeric import pickle if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins from decimal import Decimal import numpy as np from numpy.compat import strchar, unicode import numpy.core._multiarray_tests as _multiarray_tests from numpy.testing import ( assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, temppath, suppress_warnings ) from numpy.core.tests._locales import CommaDecimalPointLocale # Need to test an object that does not fully implement math interface from datetime import timedelta, datetime if sys.version_info[:2] > (3, 2): # In Python 3.3 the representation of empty shape, strides and sub-offsets # is an empty tuple instead of None. # https://docs.python.org/dev/whatsnew/3.3.html#api-changes EMPTY = () else: EMPTY = None def _aligned_zeros(shape, dtype=float, order="C", align=None): """ Allocate a new ndarray with aligned memory. The ndarray is guaranteed *not* aligned to twice the requested alignment. Eg, if align=4, guarantees it is not aligned to 8. If align=None uses dtype.alignment.""" dtype = np.dtype(dtype) if dtype == np.dtype(object): # Can't do this, fall back to standard allocation (which # should always be sufficiently aligned) if align is not None: raise ValueError("object array alignment not supported") return np.zeros(shape, dtype=dtype, order=order) if align is None: align = dtype.alignment if not hasattr(shape, '__len__'): shape = (shape,) size = functools.reduce(operator.mul, shape) * dtype.itemsize buf = np.empty(size + 2*align + 1, np.uint8) ptr = buf.__array_interface__['data'][0] offset = ptr % align if offset != 0: offset = align - offset if (ptr % (2*align)) == 0: offset += align # Note: slices producing 0-size arrays do not necessarily change # data pointer --- so we use and allocate size+1 buf = buf[offset:offset+size+1][:-1] data = np.ndarray(shape, dtype, buf, order=order) data.fill(0) return data class TestFlags(object): def setup(self): self.a = np.arange(10) def test_writeable(self): mydict = locals() self.a.flags.writeable = False assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) self.a.flags.writeable = True self.a[0] = 5 self.a[0] = 0 def test_writeable_from_readonly(self): # gh-9440 - make sure fromstring, from buffer on readonly buffers # set writeable False data = b'\x00' * 100 vals = np.frombuffer(data, 'B') assert_raises(ValueError, vals.setflags, write=True) types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) values = np.core.records.fromstring(data, types) vals = values['vals'] assert_raises(ValueError, vals.setflags, write=True) def test_writeable_from_buffer(self): data = bytearray(b'\x00' * 100) vals = np.frombuffer(data, 'B') assert_(vals.flags.writeable) vals.setflags(write=False) assert_(vals.flags.writeable is False) vals.setflags(write=True) assert_(vals.flags.writeable) types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) values = np.core.records.fromstring(data, types) vals = values['vals'] assert_(vals.flags.writeable) vals.setflags(write=False) assert_(vals.flags.writeable is False) vals.setflags(write=True) assert_(vals.flags.writeable) @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies") def test_writeable_pickle(self): import pickle # Small arrays will be copied without setting base. # See condition for using PyArray_SetBaseObject in # array_setstate. a = np.arange(1000) for v in range(pickle.HIGHEST_PROTOCOL): vals = pickle.loads(pickle.dumps(a, v)) assert_(vals.flags.writeable) assert_(isinstance(vals.base, bytes)) def test_otherflags(self): assert_equal(self.a.flags.carray, True) assert_equal(self.a.flags['C'], True) assert_equal(self.a.flags.farray, False) assert_equal(self.a.flags.behaved, True) assert_equal(self.a.flags.fnc, False) assert_equal(self.a.flags.forc, True) assert_equal(self.a.flags.owndata, True) assert_equal(self.a.flags.writeable, True) assert_equal(self.a.flags.aligned, True) with assert_warns(DeprecationWarning): assert_equal(self.a.flags.updateifcopy, False) with assert_warns(DeprecationWarning): assert_equal(self.a.flags['U'], False) assert_equal(self.a.flags['UPDATEIFCOPY'], False) assert_equal(self.a.flags.writebackifcopy, False) assert_equal(self.a.flags['X'], False) assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) assert_(a.flags.aligned) # not power of two are accessed byte-wise and thus considered aligned a = np.zeros(5, dtype=np.dtype('|S4')) assert_(a.flags.aligned) def test_void_align(self): a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) assert_(a.flags.aligned) class TestHash(object): # see #3793 def test_int(self): for st, ut, s in [(np.int8, np.uint8, 8), (np.int16, np.uint16, 16), (np.int32, np.uint32, 32), (np.int64, np.uint64, 64)]: for i in range(1, s): assert_equal(hash(st(-2**i)), hash(-2**i), err_msg="%r: -2**%d" % (st, i)) assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (st, i - 1)) assert_equal(hash(st(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (st, i)) i = max(i - 1, 1) assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (ut, i - 1)) assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (ut, i)) class TestAttributes(object): def setup(self): self.one = np.arange(10) self.two = np.arange(20).reshape(4, 5) self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) def test_attributes(self): assert_equal(self.one.shape, (10,)) assert_equal(self.two.shape, (4, 5)) assert_equal(self.three.shape, (2, 5, 6)) self.three.shape = (10, 3, 2) assert_equal(self.three.shape, (10, 3, 2)) self.three.shape = (2, 5, 6) assert_equal(self.one.strides, (self.one.itemsize,)) num = self.two.itemsize assert_equal(self.two.strides, (5*num, num)) num = self.three.itemsize assert_equal(self.three.strides, (30*num, 6*num, num)) assert_equal(self.one.ndim, 1) assert_equal(self.two.ndim, 2) assert_equal(self.three.ndim, 3) num = self.two.itemsize assert_equal(self.two.size, 20) assert_equal(self.two.nbytes, 20*num) assert_equal(self.two.itemsize, self.two.dtype.itemsize) assert_equal(self.two.base, np.arange(20)) def test_dtypeattr(self): assert_equal(self.one.dtype, np.dtype(np.int_)) assert_equal(self.three.dtype, np.dtype(np.float_)) assert_equal(self.one.dtype.char, 'l') assert_equal(self.three.dtype.char, 'd') assert_(self.three.dtype.str[0] in '<>') assert_equal(self.one.dtype.str[1], 'i') assert_equal(self.three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 numpy_int = np.int_(0) if sys.version_info[0] >= 3: # On Py3k int_ should not inherit from int, because it's not # fixed-width anymore assert_equal(isinstance(numpy_int, int), False) else: # Otherwise, it should inherit from int... assert_equal(isinstance(numpy_int, int), True) # ... and fast-path checks on C-API level should also work from numpy.core._multiarray_tests import test_int_subclass assert_equal(test_int_subclass(numpy_int), True) def test_stridesattr(self): x = self.one def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, offset=offset*x.itemsize, strides=strides*x.itemsize) assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) assert_raises(ValueError, make_array, 4, 4, -2) assert_raises(ValueError, make_array, 4, 2, -1) assert_raises(ValueError, make_array, 8, 3, 1) assert_equal(make_array(8, 3, 0), np.array([3]*8)) # Check behavior reported in gh-2503: assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) make_array(0, 0, 10) def test_set_stridesattr(self): x = self.one def make_array(size, offset, strides): try: r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) except Exception as e: raise RuntimeError(e) r.strides = strides = strides*x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) assert_raises(ValueError, make_array, 4, 4, -2) assert_raises(ValueError, make_array, 4, 2, -1) assert_raises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): arr.strides = strides assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) # Test for offset calculations: x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) assert_raises(ValueError, set_strides, x[::-1], -1) a = x[::-1] a.strides = 1 a[::2].strides = 2 def test_fill(self): for t in "?bhilqpBHILQPfdgFDGO": x = np.empty((3, 2, 1), t) y = np.empty((3, 2, 1), t) x.fill(1) y[...] = 1 assert_equal(x, y) def test_fill_max_uint64(self): x = np.empty((3, 2, 1), dtype=np.uint64) y = np.empty((3, 2, 1), dtype=np.uint64) value = 2**64 - 1 y[...] = value x.fill(value) assert_array_equal(x, y) def test_fill_struct_array(self): # Filling from a scalar x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') x.fill(x[0]) assert_equal(x['f1'][1], x['f1'][0]) # Filling from a tuple that can be converted # to a scalar x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) x.fill((3.5, -2)) assert_array_equal(x['a'], [3.5, 3.5]) assert_array_equal(x['b'], [-2, -2]) class TestArrayConstruction(object): def test_array(self): d = np.ones(6) r = np.array([d, d]) assert_equal(r, np.ones((2, 6))) d = np.ones(6) tgt = np.ones((2, 6)) r = np.array([d, d]) assert_equal(r, tgt) tgt[1] = 2 r = np.array([d, d + 1]) assert_equal(r, tgt) d = np.ones(6) r = np.array([[d, d]]) assert_equal(r, np.ones((1, 2, 6))) d = np.ones(6) r = np.array([[d, d], [d, d]]) assert_equal(r, np.ones((2, 2, 6))) d = np.ones((6, 6)) r = np.array([d, d]) assert_equal(r, np.ones((2, 6, 6))) d = np.ones((6, )) r = np.array([[d, d + 1], d + 2]) assert_equal(len(r), 2) assert_equal(r[0], [d, d + 1]) assert_equal(r[1], d + 2) tgt = np.ones((2, 3), dtype=bool) tgt[0, 2] = False tgt[1, 0:2] = False r = np.array([[True, True, False], [False, False, True]]) assert_equal(r, tgt) r = np.array([[True, False], [True, False], [False, True]]) assert_equal(r, tgt.T) def test_array_empty(self): assert_raises(TypeError, np.array) def test_array_copy_false(self): d = np.array([1, 2, 3]) e = np.array(d, copy=False) d[1] = 3 assert_array_equal(e, [1, 3, 3]) e = np.array(d, copy=False, order='F') d[1] = 4 assert_array_equal(e, [1, 4, 3]) e[2] = 7 assert_array_equal(d, [1, 4, 7]) def test_array_copy_true(self): d = np.array([[1,2,3], [1, 2, 3]]) e = np.array(d, copy=True) d[0, 1] = 3 e[0, 2] = -7 assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) e = np.array(d, copy=True, order='F') d[0, 1] = 5 e[0, 2] = 7 assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) assert_array_equal(d, [[1, 5, 3], [1,2,3]]) def test_array_cont(self): d = np.ones(10)[::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.ascontiguousarray(d).flags.f_contiguous) assert_(np.asfortranarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) d = np.ones((10, 10))[::2,::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) class TestAssignment(object): def test_assignment_broadcasting(self): a = np.arange(6).reshape(2, 3) # Broadcasting the input to the output a[...] = np.arange(3) assert_equal(a, [[0, 1, 2], [0, 1, 2]]) a[...] = np.arange(2).reshape(2, 1) assert_equal(a, [[0, 0, 0], [1, 1, 1]]) # For compatibility with <= 1.5, a limited version of broadcasting # the output to the input. # # This behavior is inconsistent with NumPy broadcasting # in general, because it only uses one of the two broadcasting # rules (adding a new "1" dimension to the left of the shape), # applied to the output instead of an input. In NumPy 2.0, this kind # of broadcasting assignment will likely be disallowed. a[...] = np.arange(6)[::-1].reshape(1, 2, 3) assert_equal(a, [[5, 4, 3], [2, 1, 0]]) # The other type of broadcasting would require a reduction operation. def assign(a, b): a[...] = b assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) def test_assignment_errors(self): # Address issue #2276 class C: pass a = np.zeros(1) def assign(v): a[0] = v assert_raises((AttributeError, TypeError), assign, C()) assert_raises(ValueError, assign, [1]) def test_unicode_assignment(self): # gh-5049 from numpy.core.numeric import set_string_function @contextmanager def inject_str(s): """ replace ndarray.__str__ temporarily """ set_string_function(lambda x: s, repr=False) try: yield finally: set_string_function(None, repr=False) a1d = np.array([u'test']) a0d = np.array(u'done') with inject_str(u'bad'): a1d[0] = a0d # previously this would invoke __str__ assert_equal(a1d[0], u'done') # this would crash for the same reason np.array([np.array(u'\xe5\xe4\xf6')]) def test_stringlike_empty_list(self): # gh-8902 u = np.array([u'done']) b = np.array([b'done']) class bad_sequence(object): def __getitem__(self): pass def __len__(self): raise RuntimeError assert_raises(ValueError, operator.setitem, u, 0, []) assert_raises(ValueError, operator.setitem, b, 0, []) assert_raises(ValueError, operator.setitem, u, 0, bad_sequence()) assert_raises(ValueError, operator.setitem, b, 0, bad_sequence()) def test_longdouble_assignment(self): # only relevant if longdouble is larger than float # we're looking for loss of precision for dtype in (np.longdouble, np.longcomplex): # gh-8902 tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype) tinya = np.nextafter(np.longdouble(0), -1).astype(dtype) # construction tiny1d = np.array([tinya]) assert_equal(tiny1d[0], tinya) # scalar = scalar tiny1d[0] = tinyb assert_equal(tiny1d[0], tinyb) # 0d = scalar tiny1d[0, ...] = tinya assert_equal(tiny1d[0], tinya) # 0d = 0d tiny1d[0, ...] = tinyb[...] assert_equal(tiny1d[0], tinyb) # scalar = 0d tiny1d[0] = tinyb[...] assert_equal(tiny1d[0], tinyb) arr = np.array([np.array(tinya)]) assert_equal(arr[0], tinya) def test_cast_to_string(self): # cast to str should do "str(scalar)", not "str(scalar.item())" # Example: In python2, str(float) is truncated, so we want to avoid # str(np.float64(...).item()) as this would incorrectly truncate. a = np.zeros(1, dtype='S20') a[:] = np.array(['1.12345678901234567890'], dtype='f8') assert_equal(a[0], b"1.1234567890123457") class TestDtypedescr(object): def test_construction(self): d1 = np.dtype('i4') assert_equal(d1, np.dtype(np.int32)) d2 = np.dtype('f8') assert_equal(d2, np.dtype(np.float64)) def test_byteorders(self): assert_(np.dtype('i4')) assert_(np.dtype([('a', 'i4')])) def test_structured_non_void(self): fields = [('a', '= 3, reason="Not Python 2") def test_sequence_long(self): assert_equal(np.array([long(4), long(4)]).dtype, np.long) assert_equal(np.array([long(4), 2**80]).dtype, object) assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object) assert_equal(np.array([2**80, long(4)]).dtype, object) def test_non_sequence_sequence(self): """Should not segfault. Class Fail breaks the sequence protocol for new style classes, i.e., those derived from object. Class Map is a mapping type indicated by raising a ValueError. At some point we may raise a warning instead of an error in the Fail case. """ class Fail(object): def __len__(self): return 1 def __getitem__(self, index): raise ValueError() class Map(object): def __len__(self): return 1 def __getitem__(self, index): raise KeyError() a = np.array([Map()]) assert_(a.shape == (1,)) assert_(a.dtype == np.dtype(object)) assert_raises(ValueError, np.array, [Fail()]) def test_no_len_object_type(self): # gh-5100, want object array from iterable object without len() class Point2: def __init__(self): pass def __getitem__(self, ind): if ind in [0, 1]: return ind else: raise IndexError() d = np.array([Point2(), Point2(), Point2()]) assert_equal(d.dtype, np.dtype(object)) def test_false_len_sequence(self): # gh-7264, segfault for this example class C: def __getitem__(self, i): raise IndexError def __len__(self): return 42 assert_raises(ValueError, np.array, C()) # segfault? def test_failed_len_sequence(self): # gh-7393 class A(object): def __init__(self, data): self._data = data def __getitem__(self, item): return type(self)(self._data[item]) def __len__(self): return len(self._data) # len(d) should give 3, but len(d[0]) will fail d = A([1,2,3]) assert_equal(len(np.array(d)), 3) def test_array_too_big(self): # Test that array creation succeeds for arrays addressable by intp # on the byte level and fails for too large arrays. buf = np.zeros(100) max_bytes = np.iinfo(np.intp).max for dtype in ["intp", "S20", "b"]: dtype = np.dtype(dtype) itemsize = dtype.itemsize np.ndarray(buffer=buf, strides=(0,), shape=(max_bytes//itemsize,), dtype=dtype) assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,), shape=(max_bytes//itemsize + 1,), dtype=dtype) def test_jagged_ndim_object(self): # Lists of mismatching depths are treated as object arrays a = np.array([[1], 2, 3]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) a = np.array([1, [2], 3]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) a = np.array([1, 2, [3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) def test_jagged_shape_object(self): # The jagged dimension of a list is turned into an object array a = np.array([[1, 1], [2], [3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) a = np.array([[1], [2, 2], [3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) a = np.array([[1], [2], [3, 3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) class TestStructured(object): def test_subarray_field_access(self): a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) a['a'] = np.arange(60).reshape(3, 5, 2, 2) # Since the subarray is always in C-order, a transpose # does not swap the subarray: assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) # In Fortran order, the subarray gets appended # like in all other cases, not prepended as a special case b = a.copy(order='F') assert_equal(a['a'].shape, b['a'].shape) assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) def test_subarray_comparison(self): # Check that comparisons between record arrays with # multi-dimensional field types work properly a = np.rec.fromrecords( [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))]) b = a.copy() assert_equal(a == b, [True, True]) assert_equal(a != b, [False, False]) b[1].b = 'c' assert_equal(a == b, [True, False]) assert_equal(a != b, [False, True]) for i in range(3): b[0].a = a[0].a b[0].a[i] = 5 assert_equal(a == b, [False, False]) assert_equal(a != b, [True, True]) for i in range(2): for j in range(2): b = a.copy() b[0].c[i, j] = 10 assert_equal(a == b, [False, True]) assert_equal(a != b, [True, False]) # Check that broadcasting with a subarray works a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) assert_equal(a == b, [[True, True, False], [False, False, True]]) assert_equal(b == a, [[True, True, False], [False, False, True]]) a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) assert_equal(a == b, [[True, True, False], [False, False, True]]) assert_equal(b == a, [[True, True, False], [False, False, True]]) a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a == b, [[True, False, False], [False, False, True]]) assert_equal(b == a, [[True, False, False], [False, False, True]]) # Check that broadcasting Fortran-style arrays with a subarray work a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a == b, [[True, False, False], [False, False, True]]) assert_equal(b == a, [[True, False, False], [False, False, True]]) # Check that incompatible sub-array shapes don't result to broadcasting x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) # This comparison invokes deprecated behaviour, and will probably # start raising an error eventually. What we really care about in this # test is just that it doesn't return True. with suppress_warnings() as sup: sup.filter(FutureWarning, "elementwise == comparison failed") assert_equal(x == y, False) x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) # This comparison invokes deprecated behaviour, and will probably # start raising an error eventually. What we really care about in this # test is just that it doesn't return True. with suppress_warnings() as sup: sup.filter(FutureWarning, "elementwise == comparison failed") assert_equal(x == y, False) # Check that structured arrays that are different only in # byte-order work a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', 'f8')]) assert_equal(a == b, [False, True]) def test_casting(self): # Check that casting a structured array to change its byte order # works a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe')) b = a.astype([('a', '>i4')]) assert_equal(b, a.byteswap().newbyteorder()) assert_equal(a['a'][0], b['a'][0]) # Check that equality comparison works on structured arrays if # they are 'equiv'-castable a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')]) assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) assert_equal(a == b, [True, True]) # Check that 'equiv' casting can change byte order assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) c = a.astype(b.dtype, casting='equiv') assert_equal(a == c, [True, True]) # Check that 'safe' casting can change byte order and up-cast # fields t = [('a', 'f8')] assert_(np.can_cast(a.dtype, t, casting='safe')) c = a.astype(t, casting='safe') assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), [True, True]) # Check that 'same_kind' casting can change byte order and # change field widths within a "kind" t = [('a', 'f4')] assert_(np.can_cast(a.dtype, t, casting='same_kind')) c = a.astype(t, casting='same_kind') assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), [True, True]) # Check that casting fails if the casting rule should fail on # any of the fields t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')] assert_(not np.can_cast(a.dtype, t, casting=casting)) t = [('a', '>i4'), ('b', ' false for n in range(3): v = np.array(b'', (dtype, n)) assert_equal(bool(v), False) assert_equal(bool(v[()]), False) assert_equal(v.astype(bool), False) assert_(isinstance(v.astype(bool), np.ndarray)) assert_(v[()].astype(bool) is np.False_) # anything else -> true for n in range(1, 4): for val in [b'a', b'0', b' ']: v = np.array(val, (dtype, n)) assert_equal(bool(v), True) assert_equal(bool(v[()]), True) assert_equal(v.astype(bool), True) assert_(isinstance(v.astype(bool), np.ndarray)) assert_(v[()].astype(bool) is np.True_) def test_cast_from_void(self): self._test_cast_from_flexible(np.void) @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_unicode(self): self._test_cast_from_flexible(np.unicode_) @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_bytes(self): self._test_cast_from_flexible(np.bytes_) class TestZeroSizeFlexible(object): @staticmethod def _zeros(shape, dtype=str): dtype = np.dtype(dtype) if dtype == np.void: return np.zeros(shape, dtype=(dtype, 0)) # not constructable directly dtype = np.dtype([('x', dtype, 0)]) return np.zeros(shape, dtype=dtype)['x'] def test_create(self): zs = self._zeros(10, bytes) assert_equal(zs.itemsize, 0) zs = self._zeros(10, np.void) assert_equal(zs.itemsize, 0) zs = self._zeros(10, unicode) assert_equal(zs.itemsize, 0) def _test_sort_partition(self, name, kinds, **kwargs): # Previously, these would all hang for dt in [bytes, np.void, unicode]: zs = self._zeros(10, dt) sort_method = getattr(zs, name) sort_func = getattr(np, name) for kind in kinds: sort_method(kind=kind, **kwargs) sort_func(zs, kind=kind, **kwargs) def test_sort(self): self._test_sort_partition('sort', kinds='qhm') def test_argsort(self): self._test_sort_partition('argsort', kinds='qhm') def test_partition(self): self._test_sort_partition('partition', kinds=['introselect'], kth=2) def test_argpartition(self): self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) def test_resize(self): # previously an error for dt in [bytes, np.void, unicode]: zs = self._zeros(10, dt) zs.resize(25) zs.resize((10, 10)) def test_view(self): for dt in [bytes, np.void, unicode]: zs = self._zeros(10, dt) # viewing as itself should be allowed assert_equal(zs.view(dt).dtype, np.dtype(dt)) # viewing as any non-empty type gives an empty result assert_equal(zs.view((dt, 1)).shape, (0,)) def test_pickle(self): for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): for dt in [bytes, np.void, unicode]: zs = self._zeros(10, dt) p = pickle.dumps(zs, protocol=proto) zs2 = pickle.loads(p) assert_equal(zs.dtype, zs2.dtype) @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") def test_pickle_with_buffercallback(self): array = np.arange(10) buffers = [] bytes_string = pickle.dumps(array, buffer_callback=buffers.append, protocol=5) array_from_buffer = pickle.loads(bytes_string, buffers=buffers) # when using pickle protocol 5 with buffer callbacks, # array_from_buffer is reconstructed from a buffer holding a view # to the initial array's data, so modifying an element in array # should modify it in array_from_buffer too. array[0] = -1 assert array_from_buffer[0] == -1, array_from_buffer[0] class TestMethods(object): def test_compress(self): tgt = [[5, 6, 7, 8, 9]] arr = np.arange(10).reshape(2, 5) out = arr.compress([0, 1], axis=0) assert_equal(out, tgt) tgt = [[1, 3], [6, 8]] out = arr.compress([0, 1, 0, 1, 0], axis=1) assert_equal(out, tgt) tgt = [[1], [6]] arr = np.arange(10).reshape(2, 5) out = arr.compress([0, 1], axis=1) assert_equal(out, tgt) arr = np.arange(10).reshape(2, 5) out = arr.compress([0, 1]) assert_equal(out, 1) def test_choose(self): x = 2*np.ones((3,), dtype=int) y = 3*np.ones((3,), dtype=int) x2 = 2*np.ones((2, 3), dtype=int) y2 = 3*np.ones((2, 3), dtype=int) ind = np.array([0, 0, 1]) A = ind.choose((x, y)) assert_equal(A, [2, 2, 3]) A = ind.choose((x2, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) A = ind.choose((x, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) oned = np.ones(1) # gh-12031, caused SEGFAULT assert_raises(TypeError, oned.choose,np.void(0), [oned]) def test_prod(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: assert_raises(ArithmeticError, a.prod) assert_raises(ArithmeticError, a2.prod, axis=1) else: assert_equal(a.prod(axis=0), 26400) assert_array_equal(a2.prod(axis=0), np.array([50, 36, 84, 180], ctype)) assert_array_equal(a2.prod(axis=-1), np.array([24, 1890, 600], ctype)) def test_repeat(self): m = np.array([1, 2, 3, 4, 5, 6]) m_rect = m.reshape((2, 3)) A = m.repeat([1, 3, 2, 1, 1, 2]) assert_equal(A, [1, 2, 2, 2, 3, 3, 4, 5, 6, 6]) A = m.repeat(2) assert_equal(A, [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) A = m_rect.repeat([2, 1], axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6]]) A = m_rect.repeat([1, 3, 2], axis=1) assert_equal(A, [[1, 2, 2, 2, 3, 3], [4, 5, 5, 5, 6, 6]]) A = m_rect.repeat(2, axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) A = m_rect.repeat(2, axis=1) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) def test_reshape(self): arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] assert_equal(arr.reshape(2, 6), tgt) tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] assert_equal(arr.reshape(3, 4), tgt) tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] assert_equal(arr.reshape((3, 4), order='F'), tgt) tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] assert_equal(arr.T.reshape((3, 4), order='C'), tgt) def test_round(self): def check_round(arr, expected, *round_args): assert_equal(arr.round(*round_args), expected) # With output array out = np.zeros_like(arr) res = arr.round(*round_args, out=out) assert_equal(out, expected) assert_equal(out, res) check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) check_round(np.array([12.2, 15.5]), [10, 20], -1) check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) # Complex rounding check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) def test_squeeze(self): a = np.array([[[1], [2], [3]]]) assert_equal(a.squeeze(), [1, 2, 3]) assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) assert_raises(ValueError, a.squeeze, axis=(1,)) assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) def test_transpose(self): a = np.array([[1, 2], [3, 4]]) assert_equal(a.transpose(), [[1, 3], [2, 4]]) assert_raises(ValueError, lambda: a.transpose(0)) assert_raises(ValueError, lambda: a.transpose(0, 0)) assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) def test_sort(self): # test ordering for floats and complex containing nans. It is only # necessary to check the less-than comparison, so sorts that # only follow the insertion sort path are sufficient. We only # test doubles and complex doubles as the logic is the same. # check doubles msg = "Test real sort order with nans" a = np.array([np.nan, 1, 0]) b = np.sort(a) assert_equal(b, a[::-1], msg) # check complex msg = "Test complex sort order with nans" a = np.zeros(9, dtype=np.complex128) a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] b = np.sort(a) assert_equal(b, a[::-1], msg) # all c scalar sorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual # algorithm because quick and merge sort fall over to insertion # sort for small arrays. a = np.arange(101) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "scalar sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test complex sorts. These use the same code as the scalars # but the compare function differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h']: msg = "complex sort, real part == 1, kind=%s" % kind c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy() c.sort(kind=kind) assert_equal(c, ai, msg) ai = a + 1j bi = b + 1j for kind in ['q', 'm', 'h']: msg = "complex sort, imag part == 1, kind=%s" % kind c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy() c.sort(kind=kind) assert_equal(c, ai, msg) # test sorting of complex arrays requiring byte-swapping, gh-5441 for endianness in '<>': for dt in np.typecodes['Complex']: arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) c = arr.copy() c.sort() msg = 'byte-swapped complex sort, dtype={0}'.format(dt) assert_equal(c, arr, msg) # test string sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "string sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test unicode sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "unicode sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test object array sorts. a = np.empty((101,), dtype=object) a[:] = list(range(101)) b = a[::-1] for kind in ['q', 'h', 'm']: msg = "object sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test record array sorts. dt = np.dtype([('f', float), ('i', int)]) a = np.array([(i, i) for i in range(101)], dtype=dt) b = a[::-1] for kind in ['q', 'h', 'm']: msg = "object sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test datetime64 sorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] for kind in ['q', 'h', 'm']: msg = "datetime64 sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test timedelta64 sorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] for kind in ['q', 'h', 'm']: msg = "timedelta64 sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # check axis handling. This should be the same for all type # specific sorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 0], [3, 2]]) c = np.array([[2, 3], [0, 1]]) d = a.copy() d.sort(axis=0) assert_equal(d, b, "test sort with axis=0") d = a.copy() d.sort(axis=1) assert_equal(d, c, "test sort with axis=1") d = a.copy() d.sort() assert_equal(d, c, "test sort with default axis") # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array sort with axis={0}'.format(axis) assert_equal(np.sort(a, axis=axis), a, msg) msg = 'test empty array sort with axis=None' assert_equal(np.sort(a, axis=None), a.ravel(), msg) # test generic class with bogus ordering, # should not segfault. class Boom(object): def __lt__(self, other): return True a = np.array([Boom()]*100, dtype=object) for kind in ['q', 'm', 'h']: msg = "bogus comparison object sort, kind=%s" % kind c.sort(kind=kind) def test_void_sort(self): # gh-8210 - previously segfaulted for i in range(4): rand = np.random.randint(256, size=4000, dtype=np.uint8) arr = rand.view('V4') arr[::-1].sort() dt = np.dtype([('val', 'i4', (1,))]) for i in range(4): rand = np.random.randint(256, size=4000, dtype=np.uint8) arr = rand.view(dt) arr[::-1].sort() def test_sort_raises(self): #gh-9404 arr = np.array([0, datetime.now(), 1], dtype=object) for kind in ['q', 'm', 'h']: assert_raises(TypeError, arr.sort, kind=kind) #gh-3879 class Raiser(object): def raises_anything(*args, **kwargs): raise TypeError("SOMETHING ERRORED") __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) np.random.shuffle(arr) for kind in ['q', 'm', 'h']: assert_raises(TypeError, arr.sort, kind=kind) def test_sort_degraded(self): # test degraded dataset would take minutes to run with normal qsort d = np.arange(1000000) do = d.copy() x = d # create a median of 3 killer where each median is the sorted second # last element of the quicksort partition while x.size > 3: mid = x.size // 2 x[mid], x[-2] = x[-2], x[mid] x = x[:-2] assert_equal(np.sort(d), do) assert_equal(d[np.argsort(d)], do) def test_copy(self): def assert_fortran(arr): assert_(arr.flags.fortran) assert_(arr.flags.f_contiguous) assert_(not arr.flags.c_contiguous) def assert_c(arr): assert_(not arr.flags.fortran) assert_(not arr.flags.f_contiguous) assert_(arr.flags.c_contiguous) a = np.empty((2, 2), order='F') # Test copying a Fortran array assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_fortran(a.copy('A')) # Now test starting with a C array. a = np.empty((2, 2), order='C') assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_c(a.copy('A')) def test_sort_order(self): # Test sorting an array with fields x1 = np.array([21, 32, 14]) x2 = np.array(['my', 'first', 'name']) x3 = np.array([3.1, 4.5, 6.2]) r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') r.sort(order=['id']) assert_equal(r.id, np.array([14, 21, 32])) assert_equal(r.word, np.array(['name', 'my', 'first'])) assert_equal(r.number, np.array([6.2, 3.1, 4.5])) r.sort(order=['word']) assert_equal(r.id, np.array([32, 21, 14])) assert_equal(r.word, np.array(['first', 'my', 'name'])) assert_equal(r.number, np.array([4.5, 3.1, 6.2])) r.sort(order=['number']) assert_equal(r.id, np.array([21, 32, 14])) assert_equal(r.word, np.array(['my', 'first', 'name'])) assert_equal(r.number, np.array([3.1, 4.5, 6.2])) assert_raises_regex(ValueError, 'duplicate', lambda: r.sort(order=['id', 'id'])) if sys.byteorder == 'little': strtype = '>i2' else: strtype = '': for dt in np.typecodes['Complex']: arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) assert_equal(arr.argsort(), np.arange(len(arr), dtype=np.intp), msg) # test string argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "string argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test unicode argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "unicode argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test object array argsorts. a = np.empty((101,), dtype=object) a[:] = list(range(101)) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "object argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test structured array argsorts. dt = np.dtype([('f', float), ('i', int)]) a = np.array([(i, i) for i in range(101)], dtype=dt) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "structured array argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test datetime64 argsorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: msg = "datetime64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test timedelta64 argsorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: msg = "timedelta64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # check axis handling. This should be the same for all type # specific argsorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 1], [0, 0]]) c = np.array([[1, 0], [1, 0]]) assert_equal(a.copy().argsort(axis=0), b) assert_equal(a.copy().argsort(axis=1), c) assert_equal(a.copy().argsort(), c) # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array argsort with axis={0}'.format(axis) assert_equal(np.argsort(a, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argsort with axis=None' assert_equal(np.argsort(a, axis=None), np.zeros_like(a.ravel(), dtype=np.intp), msg) # check that stable argsorts are stable r = np.arange(100) # scalars a = np.zeros(100) assert_equal(a.argsort(kind='m'), r) # complex a = np.zeros(100, dtype=complex) assert_equal(a.argsort(kind='m'), r) # string a = np.array(['aaaaaaaaa' for i in range(100)]) assert_equal(a.argsort(kind='m'), r) # unicode a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode) assert_equal(a.argsort(kind='m'), r) def test_sort_unicode_kind(self): d = np.arange(10) k = b'\xc3\xa4'.decode("UTF8") assert_raises(ValueError, d.sort, kind=k) assert_raises(ValueError, d.argsort, kind=k) def test_searchsorted(self): # test for floats and complex containing nans. The logic is the # same for all float types so only test double types for now. # The search sorted routines use the compare functions for the # array type, so this checks if that is consistent with the sort # order. # check double a = np.array([0, 1, np.nan]) msg = "Test real searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(3), msg) msg = "Test real searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 4), msg) # check double complex a = np.zeros(9, dtype=np.complex128) a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] msg = "Test complex searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(9), msg) msg = "Test complex searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 10), msg) msg = "Test searchsorted with little endian, side='l'" a = np.array([0, 128], dtype=' p[:, i]).all(), msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) aae(p, d1[np.arange(d1.shape[0])[:, None], np.argpartition(d1, i, axis=1, kind=k)]) p = np.partition(d0, i, axis=0, kind=k) aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:i, :] <= p[i, :]).all(), msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) at((p[i + 1:, :] > p[i, :]).all(), msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), np.arange(d0.shape[1])[None, :]]) # check inplace dc = d.copy() dc.partition(i, kind=k) assert_equal(dc, np.partition(d, i, kind=k)) dc = d0.copy() dc.partition(i, axis=0, kind=k) assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) dc = d1.copy() dc.partition(i, axis=1, kind=k) assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) def assert_partitioned(self, d, kth): prev = 0 for k in np.sort(kth): assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) assert_((d[k:] >= d[k]).all(), msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) prev = k + 1 def test_partition_iterative(self): d = np.arange(17) kth = (0, 1, 2, 429, 231) assert_raises(ValueError, d.partition, kth) assert_raises(ValueError, d.argpartition, kth) d = np.arange(10).reshape((2, 5)) assert_raises(ValueError, d.partition, kth, axis=0) assert_raises(ValueError, d.partition, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=None) d = np.array([3, 4, 2, 1]) p = np.partition(d, (0, 3)) self.assert_partitioned(p, (0, 3)) self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) assert_array_equal(p, np.partition(d, (-3, -1))) assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) d = np.arange(17) np.random.shuffle(d) d.partition(range(d.size)) assert_array_equal(np.arange(17), d) np.random.shuffle(d) assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) # test unsorted kth d = np.arange(17) np.random.shuffle(d) keys = np.array([1, 3, 8, -2]) np.random.shuffle(d) p = np.partition(d, keys) self.assert_partitioned(p, keys) p = d[np.argpartition(d, keys)] self.assert_partitioned(p, keys) np.random.shuffle(keys) assert_array_equal(np.partition(d, keys), p) assert_array_equal(d[np.argpartition(d, keys)], p) # equal kth d = np.arange(20)[::-1] self.assert_partitioned(np.partition(d, [5]*4), [5]) self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), [5]*4 + [6, 13]) self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], [5]*4 + [6, 13]) d = np.arange(12) np.random.shuffle(d) d1 = np.tile(np.arange(12), (4, 1)) map(np.random.shuffle, d1) d0 = np.transpose(d1) kth = (1, 6, 7, -1) p = np.partition(d1, kth, axis=1) pa = d1[np.arange(d1.shape[0])[:, None], d1.argpartition(kth, axis=1)] assert_array_equal(p, pa) for i in range(d1.shape[0]): self.assert_partitioned(p[i,:], kth) p = np.partition(d0, kth, axis=0) pa = d0[np.argpartition(d0, kth, axis=0), np.arange(d0.shape[1])[None,:]] assert_array_equal(p, pa) for i in range(d0.shape[1]): self.assert_partitioned(p[:, i], kth) def test_partition_cdtype(self): d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), ('Lancelot', 1.9, 38)], dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype) ops = { 'add': (np.add, True, float), 'sub': (np.subtract, True, float), 'mul': (np.multiply, True, float), 'truediv': (np.true_divide, True, float), 'floordiv': (np.floor_divide, True, float), 'mod': (np.remainder, True, float), 'divmod': (np.divmod, False, float), 'pow': (np.power, True, int), 'lshift': (np.left_shift, True, int), 'rshift': (np.right_shift, True, int), 'and': (np.bitwise_and, True, int), 'xor': (np.bitwise_xor, True, int), 'or': (np.bitwise_or, True, int), # 'ge': (np.less_equal, False), # 'gt': (np.less, False), # 'le': (np.greater_equal, False), # 'lt': (np.greater, False), # 'eq': (np.equal, False), # 'ne': (np.not_equal, False), } if sys.version_info >= (3, 5): ops['matmul'] = (np.matmul, False, float) class Coerced(Exception): pass def array_impl(self): raise Coerced def op_impl(self, other): return "forward" def rop_impl(self, other): return "reverse" def iop_impl(self, other): return "in-place" def array_ufunc_impl(self, ufunc, method, *args, **kwargs): return ("__array_ufunc__", ufunc, method, args, kwargs) # Create an object with the given base, in the given module, with a # bunch of placeholder __op__ methods, and optionally a # __array_ufunc__ and __array_priority__. def make_obj(base, array_priority=False, array_ufunc=False, alleged_module="__main__"): class_namespace = {"__array__": array_impl} if array_priority is not False: class_namespace["__array_priority__"] = array_priority for op in ops: class_namespace["__{0}__".format(op)] = op_impl class_namespace["__r{0}__".format(op)] = rop_impl class_namespace["__i{0}__".format(op)] = iop_impl if array_ufunc is not False: class_namespace["__array_ufunc__"] = array_ufunc eval_namespace = {"base": base, "class_namespace": class_namespace, "__name__": alleged_module, } MyType = eval("type('MyType', (base,), class_namespace)", eval_namespace) if issubclass(MyType, np.ndarray): # Use this range to avoid special case weirdnesses around # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. return np.arange(3, 7).reshape(2, 2).view(MyType) else: return MyType() def check(obj, binop_override_expected, ufunc_override_expected, inplace_override_expected, check_scalar=True): for op, (ufunc, has_inplace, dtype) in ops.items(): err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' % (op, ufunc, has_inplace, dtype)) check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] if check_scalar: check_objs.append(check_objs[0][0]) for arr in check_objs: arr_method = getattr(arr, "__{0}__".format(op)) def first_out_arg(result): if op == "divmod": assert_(isinstance(result, tuple)) return result[0] else: return result # arr __op__ obj if binop_override_expected: assert_equal(arr_method(obj), NotImplemented, err_msg) elif ufunc_override_expected: assert_equal(arr_method(obj)[0], "__array_ufunc__", err_msg) else: if (isinstance(obj, np.ndarray) and (type(obj).__array_ufunc__ is np.ndarray.__array_ufunc__)): # __array__ gets ignored res = first_out_arg(arr_method(obj)) assert_(res.__class__ is obj.__class__, err_msg) else: assert_raises((TypeError, Coerced), arr_method, obj, err_msg=err_msg) # obj __op__ arr arr_rmethod = getattr(arr, "__r{0}__".format(op)) if ufunc_override_expected: res = arr_rmethod(obj) assert_equal(res[0], "__array_ufunc__", err_msg=err_msg) assert_equal(res[1], ufunc, err_msg=err_msg) else: if (isinstance(obj, np.ndarray) and (type(obj).__array_ufunc__ is np.ndarray.__array_ufunc__)): # __array__ gets ignored res = first_out_arg(arr_rmethod(obj)) assert_(res.__class__ is obj.__class__, err_msg) else: # __array_ufunc__ = "asdf" creates a TypeError assert_raises((TypeError, Coerced), arr_rmethod, obj, err_msg=err_msg) # arr __iop__ obj # array scalars don't have in-place operators if has_inplace and isinstance(arr, np.ndarray): arr_imethod = getattr(arr, "__i{0}__".format(op)) if inplace_override_expected: assert_equal(arr_method(obj), NotImplemented, err_msg=err_msg) elif ufunc_override_expected: res = arr_imethod(obj) assert_equal(res[0], "__array_ufunc__", err_msg) assert_equal(res[1], ufunc, err_msg) assert_(type(res[-1]["out"]) is tuple, err_msg) assert_(res[-1]["out"][0] is arr, err_msg) else: if (isinstance(obj, np.ndarray) and (type(obj).__array_ufunc__ is np.ndarray.__array_ufunc__)): # __array__ gets ignored assert_(arr_imethod(obj) is arr, err_msg) else: assert_raises((TypeError, Coerced), arr_imethod, obj, err_msg=err_msg) op_fn = getattr(operator, op, None) if op_fn is None: op_fn = getattr(operator, op + "_", None) if op_fn is None: op_fn = getattr(builtins, op) assert_equal(op_fn(obj, arr), "forward", err_msg) if not isinstance(obj, np.ndarray): if binop_override_expected: assert_equal(op_fn(arr, obj), "reverse", err_msg) elif ufunc_override_expected: assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", err_msg) if ufunc_override_expected: assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", err_msg) # No array priority, no array_ufunc -> nothing called check(make_obj(object), False, False, False) # Negative array priority, no array_ufunc -> nothing called # (has to be very negative, because scalar priority is -1000000.0) check(make_obj(object, array_priority=-2**30), False, False, False) # Positive array priority, no array_ufunc -> binops and iops only check(make_obj(object, array_priority=1), True, False, True) # ndarray ignores array_priority for ndarray subclasses check(make_obj(np.ndarray, array_priority=1), False, False, False, check_scalar=False) # Positive array_priority and array_ufunc -> array_ufunc only check(make_obj(object, array_priority=1, array_ufunc=array_ufunc_impl), False, True, False) check(make_obj(np.ndarray, array_priority=1, array_ufunc=array_ufunc_impl), False, True, False) # array_ufunc set to None -> defer binops only check(make_obj(object, array_ufunc=None), True, False, False) check(make_obj(np.ndarray, array_ufunc=None), True, False, False, check_scalar=False) def test_ufunc_override_normalize_signature(self): # gh-5674 class SomeClass(object): def __array_ufunc__(self, ufunc, method, *inputs, **kw): return kw a = SomeClass() kw = np.add(a, [1]) assert_('sig' not in kw and 'signature' not in kw) kw = np.add(a, [1], sig='ii->i') assert_('sig' not in kw and 'signature' in kw) assert_equal(kw['signature'], 'ii->i') kw = np.add(a, [1], signature='ii->i') assert_('sig' not in kw and 'signature' in kw) assert_equal(kw['signature'], 'ii->i') def test_array_ufunc_index(self): # Check that index is set appropriately, also if only an output # is passed on (latter is another regression tests for github bug 4753) # This also checks implicitly that 'out' is always a tuple. class CheckIndex(object): def __array_ufunc__(self, ufunc, method, *inputs, **kw): for i, a in enumerate(inputs): if a is self: return i # calls below mean we must be in an output. for j, a in enumerate(kw['out']): if a is self: return (j,) a = CheckIndex() dummy = np.arange(2.) # 1 input, 1 output assert_equal(np.sin(a), 0) assert_equal(np.sin(dummy, a), (0,)) assert_equal(np.sin(dummy, out=a), (0,)) assert_equal(np.sin(dummy, out=(a,)), (0,)) assert_equal(np.sin(a, a), 0) assert_equal(np.sin(a, out=a), 0) assert_equal(np.sin(a, out=(a,)), 0) # 1 input, 2 outputs assert_equal(np.modf(dummy, a), (0,)) assert_equal(np.modf(dummy, None, a), (1,)) assert_equal(np.modf(dummy, dummy, a), (1,)) assert_equal(np.modf(dummy, out=(a, None)), (0,)) assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) assert_equal(np.modf(dummy, out=(None, a)), (1,)) assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) assert_equal(np.modf(a, out=(dummy, a)), 0) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', DeprecationWarning) assert_equal(np.modf(dummy, out=a), (0,)) assert_(w[0].category is DeprecationWarning) assert_raises(ValueError, np.modf, dummy, out=(a,)) # 2 inputs, 1 output assert_equal(np.add(a, dummy), 0) assert_equal(np.add(dummy, a), 1) assert_equal(np.add(dummy, dummy, a), (0,)) assert_equal(np.add(dummy, a, a), 1) assert_equal(np.add(dummy, dummy, out=a), (0,)) assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) assert_equal(np.add(a, dummy, out=a), 0) def test_out_override(self): # regression test for github bug 4753 class OutClass(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kw): if 'out' in kw: tmp_kw = kw.copy() tmp_kw.pop('out') func = getattr(ufunc, method) kw['out'][0][...] = func(*inputs, **tmp_kw) A = np.array([0]).view(OutClass) B = np.array([5]) C = np.array([6]) np.multiply(C, B, A) assert_equal(A[0], 30) assert_(isinstance(A, OutClass)) A[0] = 0 np.multiply(C, B, out=A) assert_equal(A[0], 30) assert_(isinstance(A, OutClass)) def test_pow_override_with_errors(self): # regression test for gh-9112 class PowerOnly(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kw): if ufunc is not np.power: raise NotImplementedError return "POWER!" # explicit cast to float, to ensure the fast power path is taken. a = np.array(5., dtype=np.float64).view(PowerOnly) assert_equal(a ** 2.5, "POWER!") with assert_raises(NotImplementedError): a ** 0.5 with assert_raises(NotImplementedError): a ** 0 with assert_raises(NotImplementedError): a ** 1 with assert_raises(NotImplementedError): a ** -1 with assert_raises(NotImplementedError): a ** 2 def test_pow_array_object_dtype(self): # test pow on arrays of object dtype class SomeClass(object): def __init__(self, num=None): self.num = num # want to ensure a fast pow path is not taken def __mul__(self, other): raise AssertionError('__mul__ should not be called') def __div__(self, other): raise AssertionError('__div__ should not be called') def __pow__(self, exp): return SomeClass(num=self.num ** exp) def __eq__(self, other): if isinstance(other, SomeClass): return self.num == other.num __rpow__ = __pow__ def pow_for(exp, arr): return np.array([x ** exp for x in arr]) obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) def test_pos_array_ufunc_override(self): class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): return getattr(ufunc, method)(*[i.view(np.ndarray) for i in inputs], **kwargs) tst = np.array('foo').view(A) with assert_raises(TypeError): +tst class TestTemporaryElide(object): # elision is only triggered on relatively large arrays def test_extension_incref_elide(self): # test extension (e.g. cython) calling PyNumber_* slots without # increasing the reference counts # # def incref_elide(a): # d = input.copy() # refcount 1 # return d, d + d # PyNumber_Add without increasing refcount from numpy.core._multiarray_tests import incref_elide d = np.ones(100000) orig, res = incref_elide(d) d + d # the return original should not be changed to an inplace operation assert_array_equal(orig, d) assert_array_equal(res, d + d) def test_extension_incref_elide_stack(self): # scanning if the refcount == 1 object is on the python stack to check # that we are called directly from python is flawed as object may still # be above the stack pointer and we have no access to the top of it # # def incref_elide_l(d): # return l[4] + l[4] # PyNumber_Add without increasing refcount from numpy.core._multiarray_tests import incref_elide_l # padding with 1 makes sure the object on the stack is not overwritten l = [1, 1, 1, 1, np.ones(100000)] res = incref_elide_l(l) # the return original should not be changed to an inplace operation assert_array_equal(l[4], np.ones(100000)) assert_array_equal(res, l[4] + l[4]) def test_temporary_with_cast(self): # check that we don't elide into a temporary which would need casting d = np.ones(200000, dtype=np.int64) assert_equal(((d + d) + 2**222).dtype, np.dtype('O')) r = ((d + d) / 2) assert_equal(r.dtype, np.dtype('f8')) r = np.true_divide((d + d), 2) assert_equal(r.dtype, np.dtype('f8')) r = ((d + d) / 2.) assert_equal(r.dtype, np.dtype('f8')) r = ((d + d) // 2) assert_equal(r.dtype, np.dtype(np.int64)) # commutative elision into the astype result f = np.ones(100000, dtype=np.float32) assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) # no elision into lower type d = f.astype(np.float64) assert_equal(((f + f) + d).dtype, d.dtype) l = np.ones(100000, dtype=np.longdouble) assert_equal(((d + d) + l).dtype, l.dtype) # test unary abs with different output dtype for dt in (np.complex64, np.complex128, np.clongdouble): c = np.ones(100000, dtype=dt) r = abs(c * 2.0) assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) def test_elide_broadcast(self): # test no elision on broadcast to higher dimension # only triggers elision code path in debug mode as triggering it in # normal mode needs 256kb large matching dimension, so a lot of memory d = np.ones((2000, 1), dtype=int) b = np.ones((2000), dtype=bool) r = (1 - d) + b assert_equal(r, 1) assert_equal(r.shape, (2000, 2000)) def test_elide_scalar(self): # check inplace op does not create ndarray from scalars a = np.bool_() assert_(type(~(a & a)) is np.bool_) def test_elide_scalar_readonly(self): # The imaginary part of a real array is readonly. This needs to go # through fast_scalar_power which is only called for powers of # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for # elision which can be gotten for the imaginary part of a real # array. Should not error. a = np.empty(100000, dtype=np.float64) a.imag ** 2 def test_elide_readonly(self): # don't try to elide readonly temporaries r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 assert_equal(r, 0) def test_elide_updateifcopy(self): a = np.ones(2**20)[::2] b = a.flat.__array__() + 1 del b assert_equal(a, 1) class TestCAPI(object): def test_IsPythonScalar(self): from numpy.core._multiarray_tests import IsPythonScalar assert_(IsPythonScalar(b'foobar')) assert_(IsPythonScalar(1)) assert_(IsPythonScalar(2**80)) assert_(IsPythonScalar(2.)) assert_(IsPythonScalar("a")) class TestSubscripting(object): def test_test_zero_rank(self): x = np.array([1, 2, 3]) assert_(isinstance(x[0], np.int_)) if sys.version_info[0] < 3: assert_(isinstance(x[0], int)) assert_(type(x[0, ...]) is np.ndarray) class TestPickling(object): def test_highest_available_pickle_protocol(self): try: import pickle5 except ImportError: pickle5 = None if sys.version_info[:2] >= (3, 8) or pickle5 is not None: assert pickle.HIGHEST_PROTOCOL >= 5 else: assert pickle.HIGHEST_PROTOCOL < 5 @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, reason=('this tests the error messages when trying to' 'protocol 5 although it is not available')) def test_correct_protocol5_error_message(self): array = np.arange(10) if sys.version_info[:2] in ((3, 6), (3, 7)): # For the specific case of python3.6 and 3.7, raise a clear import # error about the pickle5 backport when trying to use protocol=5 # without the pickle5 package with pytest.raises(ImportError): array.__reduce_ex__(5) elif sys.version_info[:2] < (3, 6): # when calling __reduce_ex__ explicitly with protocol=5 on python # raise a ValueError saying that protocol 5 is not available for # this python version with pytest.raises(ValueError): array.__reduce_ex__(5) def test_record_array_with_object_dtype(self): my_object = object() arr_with_object = np.array( [(my_object, 1, 2.0)], dtype=[('a', object), ('b', int), ('c', float)]) arr_without_object = np.array( [('xxx', 1, 2.0)], dtype=[('a', str), ('b', int), ('c', float)]) for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): depickled_arr_with_object = pickle.loads( pickle.dumps(arr_with_object, protocol=proto)) depickled_arr_without_object = pickle.loads( pickle.dumps(arr_without_object, protocol=proto)) assert_equal(arr_with_object.dtype, depickled_arr_with_object.dtype) assert_equal(arr_without_object.dtype, depickled_arr_without_object.dtype) @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") def test_f_contiguous_array(self): f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') buffers = [] # When using pickle protocol 5, Fortran-contiguous arrays can be # serialized using out-of-band buffers bytes_string = pickle.dumps(f_contiguous_array, protocol=5, buffer_callback=buffers.append) assert len(buffers) > 0 depickled_f_contiguous_array = pickle.loads(bytes_string, buffers=buffers) assert_equal(f_contiguous_array, depickled_f_contiguous_array) def test_non_contiguous_array(self): non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] assert not non_contiguous_array.flags.c_contiguous assert not non_contiguous_array.flags.f_contiguous # make sure non-contiguous arrays can be pickled-depickled # using any protocol for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): depickled_non_contiguous_array = pickle.loads( pickle.dumps(non_contiguous_array, protocol=proto)) assert_equal(non_contiguous_array, depickled_non_contiguous_array) def test_roundtrip(self): for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): carray = np.array([[2, 9], [7, 0], [3, 8]]) DATA = [ carray, np.transpose(carray), np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), ('c', float)]) ] refs = [weakref.ref(a) for a in DATA] for a in DATA: assert_equal( a, pickle.loads(pickle.dumps(a, protocol=proto)), err_msg="%r" % a) del a, DATA, carray gc.collect() # check for reference leaks (gh-12793) for ref in refs: assert ref() is None def _loads(self, obj): if sys.version_info[0] >= 3: return pickle.loads(obj, encoding='latin1') else: return pickle.loads(obj) # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field def test_version0_int8(self): s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) def test_version0_float32(self): s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) def test_mixed(self): g1 = np.array(["spam", "spa", "spammer", "and eggs"]) g2 = "spam" assert_array_equal(g1 == g2, [x == g2 for x in g1]) assert_array_equal(g1 != g2, [x != g2 for x in g1]) assert_array_equal(g1 < g2, [x < g2 for x in g1]) assert_array_equal(g1 > g2, [x > g2 for x in g1]) assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) def test_unicode(self): g1 = np.array([u"This", u"is", u"example"]) g2 = np.array([u"This", u"was", u"example"]) assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) class TestArgmax(object): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), ([0, 1, 2, 3, complex(0, np.nan)], 4), ([0, 1, 2, 3, complex(np.nan, 0)], 4), ([0, 1, 2, complex(np.nan, 0), 3], 3), ([0, 1, 2, complex(0, np.nan), 3], 3), ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), ([np.datetime64('1923-04-14T12:43:12'), np.datetime64('1994-06-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('1995-11-25T16:02:16'), np.datetime64('2005-01-04T03:14:12'), np.datetime64('2041-12-03T14:05:03')], 5), ([np.datetime64('1935-09-14T04:40:11'), np.datetime64('1949-10-12T12:32:11'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('2015-11-20T12:20:59'), np.datetime64('1932-09-23T10:10:13'), np.datetime64('2014-10-10T03:50:30')], 3), # Assorted tests with NaTs ([np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('NaT'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 4), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('NaT'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 0), ([np.timedelta64(2, 's'), np.timedelta64(1, 's'), np.timedelta64('NaT', 's'), np.timedelta64(3, 's')], 3), ([np.timedelta64('NaT', 's')] * 3, 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), timedelta(days=-1, seconds=23)], 0), ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), timedelta(days=5, seconds=14)], 1), ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), timedelta(days=10, seconds=43)], 2), ([False, False, False, False, True], 4), ([False, False, False, True, False], 3), ([True, False, False, False, False], 0), ([True, False, True, False, False], 0), ] def test_all(self): a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amax = a.max(i) aargmax = a.argmax(i) axes = list(range(a.ndim)) axes.remove(i) assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes)))) def test_combinations(self): for arr, pos in self.nan_arr: with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in reduce") max_val = np.max(arr) assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr) def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) out = np.ones(10, dtype=np.int_) a.argmax(-1, out=out) assert_equal(out, a.argmax(-1)) def test_argmax_unicode(self): d = np.zeros(6031, dtype='= cmin)) assert_(np.all(x <= cmax)) def _clip_type(self, type_group, array_max, clip_min, clip_max, inplace=False, expected_min=None, expected_max=None): if expected_min is None: expected_min = clip_min if expected_max is None: expected_max = clip_max for T in np.sctypes[type_group]: if sys.byteorder == 'little': byte_orders = ['=', '>'] else: byte_orders = ['<', '='] for byteorder in byte_orders: dtype = np.dtype(T).newbyteorder(byteorder) x = (np.random.random(1000) * array_max).astype(dtype) if inplace: x.clip(clip_min, clip_max, x) else: x = x.clip(clip_min, clip_max) byteorder = '=' if x.dtype.byteorder == '|': byteorder = '|' assert_equal(x.dtype.byteorder, byteorder) self._check_range(x, expected_min, expected_max) return x def test_basic(self): for inplace in [False, True]: self._clip_type( 'float', 1024, -12.8, 100.2, inplace=inplace) self._clip_type( 'float', 1024, 0, 0, inplace=inplace) self._clip_type( 'int', 1024, -120, 100.5, inplace=inplace) self._clip_type( 'int', 1024, 0, 0, inplace=inplace) self._clip_type( 'uint', 1024, 0, 0, inplace=inplace) self._clip_type( 'uint', 1024, -120, 100, inplace=inplace, expected_min=0) def test_record_array(self): rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '= 3)) x = val.clip(min=3) assert_(np.all(x >= 3)) x = val.clip(max=4) assert_(np.all(x <= 4)) def test_nan(self): input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) result = input_arr.clip(-1, 1) expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) assert_array_equal(result, expected) class TestCompress(object): def test_axis(self): tgt = [[5, 6, 7, 8, 9]] arr = np.arange(10).reshape(2, 5) out = np.compress([0, 1], arr, axis=0) assert_equal(out, tgt) tgt = [[1, 3], [6, 8]] out = np.compress([0, 1, 0, 1, 0], arr, axis=1) assert_equal(out, tgt) def test_truncate(self): tgt = [[1], [6]] arr = np.arange(10).reshape(2, 5) out = np.compress([0, 1], arr, axis=1) assert_equal(out, tgt) def test_flatten(self): arr = np.arange(10).reshape(2, 5) out = np.compress([0, 1], arr) assert_equal(out, 1) class TestPutmask(object): def tst_basic(self, x, T, mask, val): np.putmask(x, mask, val) assert_equal(x[mask], T(val)) assert_equal(x.dtype, T) def test_ip_types(self): unchecked_types = [bytes, unicode, np.void, object] x = np.random.random(1000)*100 mask = x < 40 for val in [-100, 0, 15]: for types in np.sctypes.values(): for T in types: if T not in unchecked_types: self.tst_basic(x.copy().astype(T), T, mask, val) def test_mask_size(self): assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', 'i4', 'f8'), ('z', ' 16MB d = np.zeros(4 * 1024 ** 2) d.tofile(self.filename) assert_equal(os.path.getsize(self.filename), d.nbytes) assert_array_equal(d, np.fromfile(self.filename)) # check offset with open(self.filename, "r+b") as f: f.seek(d.nbytes) d.tofile(f) assert_equal(os.path.getsize(self.filename), d.nbytes * 2) # check append mode (gh-8329) open(self.filename, "w").close() # delete file contents with open(self.filename, "ab") as f: d.tofile(f) assert_array_equal(d, np.fromfile(self.filename)) with open(self.filename, "ab") as f: d.tofile(f) assert_equal(os.path.getsize(self.filename), d.nbytes * 2) def test_io_open_buffered_fromfile(self): # gh-6632 self.x.tofile(self.filename) with io.open(self.filename, 'rb', buffering=-1) as f: y = np.fromfile(f, dtype=self.dtype) assert_array_equal(y, self.x.flat) def test_file_position_after_fromfile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.close() for mode in ['rb', 'r+b']: err_msg = "%d %s" % (size, mode) f = open(self.filename, mode) f.read(2) np.fromfile(f, dtype=np.float64, count=1) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def test_file_position_after_tofile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: err_msg = "%d" % (size,) f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.seek(10) f.write(b'12') np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) f = open(self.filename, 'r+b') f.read(2) f.seek(0, 1) # seek between read&write required by ANSI C np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def test_load_object_array_fromfile(self): # gh-12300 with open(self.filename, 'w') as f: # Ensure we have a file with consistent contents pass with open(self.filename, 'rb') as f: assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, f, dtype=object) assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, self.filename, dtype=object) def _check_from(self, s, value, **kw): if 'sep' not in kw: y = np.frombuffer(s, **kw) else: y = np.fromstring(s, **kw) assert_array_equal(y, value) f = open(self.filename, 'wb') f.write(s) f.close() y = np.fromfile(self.filename, **kw) assert_array_equal(y, value) def test_nan(self): self._check_from( b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], sep=' ') def test_inf(self): self._check_from( b"inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], sep=' ') def test_numbers(self): self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') def test_binary(self): self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', np.array([1, 2, 3, 4]), dtype=' 1 minute on mechanical hard drive def test_big_binary(self): """Test workarounds for 32-bit limited fwrite, fseek, and ftell calls in windows. These normally would hang doing something like this. See http://projects.scipy.org/numpy/ticket/1660""" if sys.platform != 'win32': return try: # before workarounds, only up to 2**32-1 worked fourgbplus = 2**32 + 2**16 testbytes = np.arange(8, dtype=np.int8) n = len(testbytes) flike = tempfile.NamedTemporaryFile() f = flike.file np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) flike.seek(0) a = np.fromfile(f, dtype=np.int8) flike.close() assert_(len(a) == fourgbplus) # check only start and end for speed: assert_((a[:n] == testbytes).all()) assert_((a[-n:] == testbytes).all()) except (MemoryError, ValueError): pass def test_string(self): self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',') def test_counted_string(self): self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',') self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') def test_string_with_ws(self): self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') def test_counted_string_with_ws(self): self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int, sep=' ') def test_ascii(self): self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') def test_malformed(self): self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ') def test_long_sep(self): self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') def test_dtype(self): v = np.array([1, 2, 3, 4], dtype=np.int_) self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_) def test_dtype_bool(self): # can't use _check_from because fromstring can't handle True/False v = np.array([True, False, True, False], dtype=np.bool_) s = b'1,0,-2.3,0' f = open(self.filename, 'wb') f.write(s) f.close() y = np.fromfile(self.filename, sep=',', dtype=np.bool_) assert_(y.dtype == '?') assert_array_equal(y, v) def test_tofile_sep(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',') f.close() f = open(self.filename, 'r') s = f.read() f.close() #assert_equal(s, '1.51,2.0,3.51,4.0') y = np.array([float(p) for p in s.split(',')]) assert_array_equal(x,y) def test_tofile_format(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',', format='%.2f') f.close() f = open(self.filename, 'r') s = f.read() f.close() assert_equal(s, '1.51,2.00,3.51,4.00') def test_locale(self): with CommaDecimalPointLocale(): self.test_numbers() self.test_nan() self.test_inf() self.test_counted_string() self.test_ascii() self.test_malformed() self.test_tofile_sep() self.test_tofile_format() class TestFromBuffer(object): @pytest.mark.parametrize('byteorder', ['<', '>']) @pytest.mark.parametrize('dtype', [float, int, complex]) def test_basic(self, byteorder, dtype): dt = np.dtype(dtype).newbyteorder(byteorder) x = (np.random.random((4, 7)) * 5).astype(dt) buf = x.tobytes() assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) def test_empty(self): assert_array_equal(np.frombuffer(b''), np.array([])) class TestFlat(object): def setup(self): a0 = np.arange(20.0) a = a0.reshape(4, 5) a0.shape = (4, 5) a.flags.writeable = False self.a = a self.b = a[::2, ::2] self.a0 = a0 self.b0 = a0[::2, ::2] def test_contiguous(self): testpassed = False try: self.a.flat[12] = 100.0 except ValueError: testpassed = True assert_(testpassed) assert_(self.a.flat[12] == 12.0) def test_discontiguous(self): testpassed = False try: self.b.flat[4] = 100.0 except ValueError: testpassed = True assert_(testpassed) assert_(self.b.flat[4] == 12.0) def test___array__(self): c = self.a.flat.__array__() d = self.b.flat.__array__() e = self.a0.flat.__array__() f = self.b0.flat.__array__() assert_(c.flags.writeable is False) assert_(d.flags.writeable is False) # for 1.14 all are set to non-writeable on the way to replacing the # UPDATEIFCOPY array returned for non-contiguous arrays. assert_(e.flags.writeable is True) assert_(f.flags.writeable is False) with assert_warns(DeprecationWarning): assert_(c.flags.updateifcopy is False) with assert_warns(DeprecationWarning): assert_(d.flags.updateifcopy is False) with assert_warns(DeprecationWarning): assert_(e.flags.updateifcopy is False) with assert_warns(DeprecationWarning): # UPDATEIFCOPY is removed. assert_(f.flags.updateifcopy is False) assert_(c.flags.writebackifcopy is False) assert_(d.flags.writebackifcopy is False) assert_(e.flags.writebackifcopy is False) assert_(f.flags.writebackifcopy is False) class TestResize(object): def test_basic(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) if IS_PYPY: x.resize((5, 5), refcheck=False) else: x.resize((5, 5)) assert_array_equal(x.flat[:9], np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) assert_array_equal(x[9:].flat, 0) def test_check_reference(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y = x assert_raises(ValueError, x.resize, (5, 1)) del y # avoid pyflakes unused variable warning. def test_int_shape(self): x = np.eye(3) if IS_PYPY: x.resize(3, refcheck=False) else: x.resize(3) assert_array_equal(x, np.eye(3)[0,:]) def test_none_shape(self): x = np.eye(3) x.resize(None) assert_array_equal(x, np.eye(3)) x.resize() assert_array_equal(x, np.eye(3)) def test_0d_shape(self): # to it multiple times to test it does not break alloc cache gh-9216 for i in range(10): x = np.empty((1,)) x.resize(()) assert_equal(x.shape, ()) assert_equal(x.size, 1) x = np.empty(()) x.resize((1,)) assert_equal(x.shape, (1,)) assert_equal(x.size, 1) def test_invalid_arguments(self): assert_raises(TypeError, np.eye(3).resize, 'hi') assert_raises(ValueError, np.eye(3).resize, -1) assert_raises(TypeError, np.eye(3).resize, order=1) assert_raises(TypeError, np.eye(3).resize, refcheck='hi') def test_freeform_shape(self): x = np.eye(3) if IS_PYPY: x.resize(3, 2, 1, refcheck=False) else: x.resize(3, 2, 1) assert_(x.shape == (3, 2, 1)) def test_zeros_appended(self): x = np.eye(3) if IS_PYPY: x.resize(2, 3, 3, refcheck=False) else: x.resize(2, 3, 3) assert_array_equal(x[0], np.eye(3)) assert_array_equal(x[1], np.zeros((3, 3))) def test_obj_obj(self): # check memory is initialized on resize, gh-4857 a = np.ones(10, dtype=[('k', object, 2)]) if IS_PYPY: a.resize(15, refcheck=False) else: a.resize(15,) assert_equal(a.shape, (15,)) assert_array_equal(a['k'][-5:], 0) assert_array_equal(a['k'][:-5], 1) def test_empty_view(self): # check that sizes containing a zero don't trigger a reallocate for # already empty arrays x = np.zeros((10, 0), int) x_view = x[...] x_view.resize((0, 10)) x_view.resize((0, 100)) def test_check_weakref(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) xref = weakref.ref(x) assert_raises(ValueError, x.resize, (5, 1)) del xref # avoid pyflakes unused variable warning. class TestRecord(object): def test_field_rename(self): dt = np.dtype([('f', float), ('i', int)]) dt.names = ['p', 'q'] assert_equal(dt.names, ['p', 'q']) def test_multiple_field_name_occurrence(self): def test_dtype_init(): np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) # Error raised when multiple fields have the same name assert_raises(ValueError, test_dtype_init) @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") def test_bytes_fields(self): # Bytes are not allowed in field names and not recognized in titles # on Py3 assert_raises(TypeError, np.dtype, [(b'a', int)]) assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) dt = np.dtype([((b'a', 'b'), int)]) assert_raises(TypeError, dt.__getitem__, b'a') x = np.array([(1,), (2,), (3,)], dtype=dt) assert_raises(IndexError, x.__getitem__, b'a') y = x[0] assert_raises(IndexError, y.__getitem__, b'a') @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") def test_multiple_field_name_unicode(self): def test_dtype_unicode(): np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) # Error raised when multiple fields have the same name(unicode included) assert_raises(ValueError, test_dtype_unicode) @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") def test_unicode_field_titles(self): # Unicode field titles are added to field dict on Py2 title = u'b' dt = np.dtype([((title, 'a'), int)]) dt[title] dt['a'] x = np.array([(1,), (2,), (3,)], dtype=dt) x[title] x['a'] y = x[0] y[title] y['a'] @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") def test_unicode_field_names(self): # Unicode field names are converted to ascii on Python 2: encodable_name = u'b' assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') # But raises UnicodeEncodeError if it can't be encoded: nonencodable_name = u'\uc3bc' assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) def test_fromarrays_unicode(self): # A single name string provided to fromarrays() is allowed to be unicode # on both Python 2 and 3: x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4') assert_equal(x['a'][0], 0) assert_equal(x['b'][0], 1) def test_unicode_order(self): # Test that we can sort with order as a unicode field name in both Python 2 and # 3: name = u'b' x = np.array([1, 3, 2], dtype=[(name, int)]) x.sort(order=name) assert_equal(x[u'b'], np.array([1, 2, 3])) def test_field_names(self): # Test unicode and 8-bit / byte strings can be used a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) is_py3 = sys.version_info[0] >= 3 if is_py3: funcs = (str,) # byte string indexing fails gracefully assert_raises(IndexError, a.__setitem__, b'f1', 1) assert_raises(IndexError, a.__getitem__, b'f1') assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) assert_raises(IndexError, a['f1'].__getitem__, b'sf1') else: funcs = (str, unicode) for func in funcs: b = a.copy() fn1 = func('f1') b[fn1] = 1 assert_equal(b[fn1], 1) fnn = func('not at all') assert_raises(ValueError, b.__setitem__, fnn, 1) assert_raises(ValueError, b.__getitem__, fnn) b[0][fn1] = 2 assert_equal(b[fn1], 2) # Subfield assert_raises(ValueError, b[0].__setitem__, fnn, 1) assert_raises(ValueError, b[0].__getitem__, fnn) # Subfield fn3 = func('f3') sfn1 = func('sf1') b[fn3][sfn1] = 1 assert_equal(b[fn3][sfn1], 1) assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) assert_raises(ValueError, b[fn3].__getitem__, fnn) # multiple subfields fn2 = func('f2') b[fn2] = 3 assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) # non-ascii unicode field indexing is well behaved if not is_py3: pytest.skip('non ascii unicode field indexing skipped; ' 'raises segfault on python 2.x') else: assert_raises(ValueError, a.__setitem__, u'\u03e0', 1) assert_raises(ValueError, a.__getitem__, u'\u03e0') def test_record_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') a.flags.writeable = False b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) b.flags.writeable = False c = np.array([(1, 2), (3, 4)], dtype='i1,i2') c.flags.writeable = False assert_(hash(a[0]) == hash(a[1])) assert_(hash(a[0]) == hash(b[0])) assert_(hash(a[0]) != hash(b[1])) assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) def test_record_no_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') assert_raises(TypeError, hash, a[0]) def test_empty_structure_creation(self): # make sure these do not raise errors (gh-5631) np.array([()], dtype={'names': [], 'formats': [], 'offsets': [], 'itemsize': 12}) np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], 'offsets': [], 'itemsize': 12}) def test_multifield_indexing_view(self): a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')]) v = a[['a', 'c']] assert_(v.base is a) assert_(v.dtype == np.dtype({'names': ['a', 'c'], 'formats': ['i4', 'u4'], 'offsets': [0, 8]})) v[:] = (4,5) assert_equal(a[0].item(), (4, 1, 5)) class TestView(object): def test_basic(self): x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8), ('b', np.int8), ('a', np.int8)]) # We must be specific about the endianness here: y = x.view(dtype=' 0) assert_(issubclass(w[0].category, RuntimeWarning)) def test_empty(self): A = np.zeros((0, 3)) for f in self.funcs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(A, axis=axis)).all()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] assert_almost_equal(res, tgt) for axis in [None]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * np.prod(mat.shape) assert_almost_equal(res, tgt) def test_mean_float16(self): # This fail if the sum inside mean is done in float16 instead # of float32. assert_(_mean(np.ones(100000, dtype='float16')) == 1) def test_var_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() res = _var(mat, axis=axis) assert_almost_equal(res, tgt) def test_std_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) assert_almost_equal(res, tgt) def test_subclass(self): class TestArray(np.ndarray): def __new__(cls, data, info): result = np.array(data) result = result.view(cls) result.info = info return result def __array_finalize__(self, obj): self.info = getattr(obj, "info", '') dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') res = dat.mean(1) assert_(res.info == dat.info) res = dat.std(1) assert_(res.info == dat.info) res = dat.var(1) assert_(res.info == dat.info) class TestVdot(object): def test_basic(self): dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] dt_complex = np.typecodes['Complex'] # test real a = np.eye(3) for dt in dt_numeric + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test complex a = np.eye(3) * 1j for dt in dt_complex + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test boolean b = np.eye(3, dtype=bool) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), True) def test_vdot_array_order(self): a = np.array([[1, 2], [3, 4]], order='C') b = np.array([[1, 2], [3, 4]], order='F') res = np.vdot(a, a) # integer arrays are exact assert_equal(np.vdot(a, b), res) assert_equal(np.vdot(b, a), res) assert_equal(np.vdot(b, b), res) def test_vdot_uncontiguous(self): for size in [2, 1000]: # Different sizes match different branches in vdot. a = np.zeros((size, 2, 2)) b = np.zeros((size, 2, 2)) a[:, 0, 0] = np.arange(size) b[:, 0, 0] = np.arange(size) + 1 # Make a and b uncontiguous: a = a[..., 0] b = b[..., 0] assert_equal(np.vdot(a, b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a, b.copy()), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a.copy(), b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a.copy('F'), b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a, b.copy('F')), np.vdot(a.flatten(), b.flatten())) class TestDot(object): def setup(self): np.random.seed(128) self.A = np.random.rand(4, 2) self.b1 = np.random.rand(2, 1) self.b2 = np.random.rand(2) self.b3 = np.random.rand(1, 2) self.b4 = np.random.rand(4) self.N = 7 def test_dotmatmat(self): A = self.A res = np.dot(A.transpose(), A) tgt = np.array([[1.45046013, 0.86323640], [0.86323640, 0.84934569]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec(self): A, b1 = self.A, self.b1 res = np.dot(A, b1) tgt = np.array([[0.32114320], [0.04889721], [0.15696029], [0.33612621]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec2(self): A, b2 = self.A, self.b2 res = np.dot(A, b2) tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat(self): A, b4 = self.A, self.b4 res = np.dot(b4, A) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat2(self): b3, A = self.b3, self.A res = np.dot(b3, A.transpose()) tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat3(self): A, b4 = self.A, self.b4 res = np.dot(A.transpose(), b4) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecouter(self): b1, b3 = self.b1, self.b3 res = np.dot(b1, b3) tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecinner(self): b1, b3 = self.b1, self.b3 res = np.dot(b3, b1) tgt = np.array([[ 0.23129668]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect1(self): b1 = np.ones((3, 1)) b2 = [5.3] res = np.dot(b1, b2) tgt = np.array([5.3, 5.3, 5.3]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect2(self): b1 = np.ones((3, 1)).transpose() b2 = [6.2] res = np.dot(b2, b1) tgt = np.array([6.2, 6.2, 6.2]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar(self): np.random.seed(100) b1 = np.random.rand(1, 1) b2 = np.random.rand(1, 4) res = np.dot(b1, b2) tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar2(self): np.random.seed(100) b1 = np.random.rand(4, 1) b2 = np.random.rand(1, 1) res = np.dot(b1, b2) tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) assert_almost_equal(res, tgt, decimal=self.N) def test_all(self): dims = [(), (1,), (1, 1)] dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): b1 = np.zeros(dim1) b2 = np.zeros(dim2) res = np.dot(b1, b2) tgt = np.zeros(dim) assert_(res.shape == tgt.shape) assert_almost_equal(res, tgt, decimal=self.N) def test_vecobject(self): class Vec(object): def __init__(self, sequence=None): if sequence is None: sequence = [] self.array = np.array(sequence) def __add__(self, other): out = Vec() out.array = self.array + other.array return out def __sub__(self, other): out = Vec() out.array = self.array - other.array return out def __mul__(self, other): # with scalar out = Vec(self.array.copy()) out.array *= other return out def __rmul__(self, other): return self*other U_non_cont = np.transpose([[1., 1.], [1., 2.]]) U_cont = np.ascontiguousarray(U_non_cont) x = np.array([Vec([1., 0.]), Vec([0., 1.])]) zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) assert_equal(zeros[0].array, zeros_test[0].array) assert_equal(zeros[1].array, zeros_test[1].array) def test_dot_2args(self): from numpy.core.multiarray import dot a = np.array([[1, 2], [3, 4]], dtype=float) b = np.array([[1, 0], [1, 1]], dtype=float) c = np.array([[3, 2], [7, 4]], dtype=float) d = dot(a, b) assert_allclose(c, d) def test_dot_3args(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 32)) for i in range(12): dot(f, v, r) if HAS_REFCOUNT: assert_equal(sys.getrefcount(r), 2) r2 = dot(f, v, out=None) assert_array_equal(r2, r) assert_(r is dot(f, v, out=r)) v = v[:, 0].copy() # v.shape == (16,) r = r[:, 0].copy() # r.shape == (1024,) r2 = dot(f, v) assert_(r is dot(f, v, r)) assert_array_equal(r2, r) def test_dot_3args_errors(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 31)) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32, 1024)) assert_raises(ValueError, dot, f, v, r) assert_raises(ValueError, dot, f, v, r.T) r = np.empty((1024, 64)) assert_raises(ValueError, dot, f, v, r[:, ::2]) assert_raises(ValueError, dot, f, v, r[:, :32]) r = np.empty((1024, 32), dtype=np.float32) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024, 32), dtype=int) assert_raises(ValueError, dot, f, v, r) def test_dot_array_order(self): a = np.array([[1, 2], [3, 4]], order='C') b = np.array([[1, 2], [3, 4]], order='F') res = np.dot(a, a) # integer arrays are exact assert_equal(np.dot(a, b), res) assert_equal(np.dot(b, a), res) assert_equal(np.dot(b, b), res) def test_accelerate_framework_sgemv_fix(self): def aligned_array(shape, align, dtype, order='C'): d = dtype(0) N = np.prod(shape) tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) address = tmp.__array_interface__["data"][0] for offset in range(align): if (address + offset) % align == 0: break tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) return tmp.reshape(shape, order=order) def as_aligned(arr, align, dtype, order='C'): aligned = aligned_array(arr.shape, align, dtype, order) aligned[:] = arr[:] return aligned def assert_dot_close(A, X, desired): assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) m = aligned_array(100, 15, np.float32) s = aligned_array((100, 100), 15, np.float32) np.dot(s, m) # this will always segfault if the bug is present testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) for align, m, n, a_order in testdata: # Calculation in double precision A_d = np.random.rand(m, n) X_d = np.random.rand(n) desired = np.dot(A_d, X_d) # Calculation with aligned single precision A_f = as_aligned(A_d, align, np.float32, order=a_order) X_f = as_aligned(X_d, align, np.float32) assert_dot_close(A_f, X_f, desired) # Strided A rows A_d_2 = A_d[::2] desired = np.dot(A_d_2, X_d) A_f_2 = A_f[::2] assert_dot_close(A_f_2, X_f, desired) # Strided A columns, strided X vector A_d_22 = A_d_2[:, ::2] X_d_2 = X_d[::2] desired = np.dot(A_d_22, X_d_2) A_f_22 = A_f_2[:, ::2] X_f_2 = X_f[::2] assert_dot_close(A_f_22, X_f_2, desired) # Check the strides are as expected if a_order == 'F': assert_equal(A_f_22.strides, (8, 8 * m)) else: assert_equal(A_f_22.strides, (8 * n, 8)) assert_equal(X_f_2.strides, (8,)) # Strides in A rows + cols only X_f_2c = as_aligned(X_f_2, align, np.float32) assert_dot_close(A_f_22, X_f_2c, desired) # Strides just in A cols A_d_12 = A_d[:, ::2] desired = np.dot(A_d_12, X_d_2) A_f_12 = A_f[:, ::2] assert_dot_close(A_f_12, X_f_2c, desired) # Strides in A cols and X assert_dot_close(A_f_12, X_f_2, desired) class MatmulCommon(object): """Common tests for '@' operator and numpy.matmul. """ # Should work with these types. Will want to add # "O" at some point types = "?bhilqBHILQefdgFDG" def test_exceptions(self): dims = [ ((1,), (2,)), # mismatched vector vector ((2, 1,), (2,)), # mismatched matrix vector ((2,), (1, 2)), # mismatched vector matrix ((1, 2), (3, 1)), # mismatched matrix matrix ((1,), ()), # vector scalar ((), (1)), # scalar vector ((1, 1), ()), # matrix scalar ((), (1, 1)), # scalar matrix ((2, 2, 1), (3, 1, 2)), # cannot broadcast ] for dt, (dm1, dm2) in itertools.product(self.types, dims): a = np.ones(dm1, dtype=dt) b = np.ones(dm2, dtype=dt) assert_raises(ValueError, self.matmul, a, b) def test_shapes(self): dims = [ ((1, 1), (2, 1, 1)), # broadcast first argument ((2, 1, 1), (1, 1)), # broadcast second argument ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match ] for dt, (dm1, dm2) in itertools.product(self.types, dims): a = np.ones(dm1, dtype=dt) b = np.ones(dm2, dtype=dt) res = self.matmul(a, b) assert_(res.shape == (2, 1, 1)) # vector vector returns scalars. for dt in self.types: a = np.ones((2,), dtype=dt) b = np.ones((2,), dtype=dt) c = self.matmul(a, b) assert_(np.array(c).shape == ()) def test_result_types(self): mat = np.ones((1,1)) vec = np.ones((1,)) for dt in self.types: m = mat.astype(dt) v = vec.astype(dt) for arg in [(m, v), (v, m), (m, m)]: res = self.matmul(*arg) assert_(res.dtype == dt) # vector vector returns scalars res = self.matmul(v, v) assert_(type(res) is np.dtype(dt).type) def test_scalar_output(self): vec1 = np.array([2]) vec2 = np.array([3, 4]).reshape(1, -1) tgt = np.array([6, 8]) for dt in self.types[1:]: v1 = vec1.astype(dt) v2 = vec2.astype(dt) res = self.matmul(v1, v2) assert_equal(res, tgt) res = self.matmul(v2.T, v1) assert_equal(res, tgt) # boolean type vec = np.array([True, True], dtype='?').reshape(1, -1) res = self.matmul(vec[:, 0], vec) assert_equal(res, True) def test_vector_vector_values(self): vec1 = np.array([1, 2]) vec2 = np.array([3, 4]).reshape(-1, 1) tgt1 = np.array([11]) tgt2 = np.array([[3, 6], [4, 8]]) for dt in self.types[1:]: v1 = vec1.astype(dt) v2 = vec2.astype(dt) res = self.matmul(v1, v2) assert_equal(res, tgt1) # no broadcast, we must make v1 into a 2d ndarray res = self.matmul(v2, v1.reshape(1, -1)) assert_equal(res, tgt2) # boolean type vec = np.array([True, True], dtype='?') res = self.matmul(vec, vec) assert_equal(res, True) def test_vector_matrix_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([7, 10]) tgt2 = np.stack([tgt1]*2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) m2 = mat2.astype(dt) res = self.matmul(v, m1) assert_equal(res, tgt1) res = self.matmul(v, m2) assert_equal(res, tgt2) # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([True, False]) tgt2 = np.stack([tgt1]*2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) res = self.matmul(vec, mat2) assert_equal(res, tgt2) def test_matrix_vector_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([5, 11]) tgt2 = np.stack([tgt1]*2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) m2 = mat2.astype(dt) res = self.matmul(m1, v) assert_equal(res, tgt1) res = self.matmul(m2, v) assert_equal(res, tgt2) # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([True, False]) tgt2 = np.stack([tgt1]*2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) res = self.matmul(vec, mat2) assert_equal(res, tgt2) def test_matrix_matrix_values(self): mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.array([[1, 0], [1, 1]]) mat12 = np.stack([mat1, mat2], axis=0) mat21 = np.stack([mat2, mat1], axis=0) tgt11 = np.array([[7, 10], [15, 22]]) tgt12 = np.array([[3, 2], [7, 4]]) tgt21 = np.array([[1, 2], [4, 6]]) tgt12_21 = np.stack([tgt12, tgt21], axis=0) tgt11_12 = np.stack((tgt11, tgt12), axis=0) tgt11_21 = np.stack((tgt11, tgt21), axis=0) for dt in self.types[1:]: m1 = mat1.astype(dt) m2 = mat2.astype(dt) m12 = mat12.astype(dt) m21 = mat21.astype(dt) # matrix @ matrix res = self.matmul(m1, m2) assert_equal(res, tgt12) res = self.matmul(m2, m1) assert_equal(res, tgt21) # stacked @ matrix res = self.matmul(m12, m1) assert_equal(res, tgt11_21) # matrix @ stacked res = self.matmul(m1, m12) assert_equal(res, tgt11_12) # stacked @ stacked res = self.matmul(m12, m21) assert_equal(res, tgt12_21) # boolean type m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_) m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_) m12 = np.stack([m1, m2], axis=0) m21 = np.stack([m2, m1], axis=0) tgt11 = m1 tgt12 = m1 tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_) tgt12_21 = np.stack([tgt12, tgt21], axis=0) tgt11_12 = np.stack((tgt11, tgt12), axis=0) tgt11_21 = np.stack((tgt11, tgt21), axis=0) # matrix @ matrix res = self.matmul(m1, m2) assert_equal(res, tgt12) res = self.matmul(m2, m1) assert_equal(res, tgt21) # stacked @ matrix res = self.matmul(m12, m1) assert_equal(res, tgt11_21) # matrix @ stacked res = self.matmul(m1, m12) assert_equal(res, tgt11_12) # stacked @ stacked res = self.matmul(m12, m21) assert_equal(res, tgt12_21) class TestMatmul(MatmulCommon): matmul = np.matmul def test_out_arg(self): a = np.ones((5, 2), dtype=float) b = np.array([[1, 3], [5, 7]], dtype=float) tgt = np.dot(a, b) # test as positional argument msg = "out positional argument" out = np.zeros((5, 2), dtype=float) self.matmul(a, b, out) assert_array_equal(out, tgt, err_msg=msg) # test as keyword argument msg = "out keyword argument" out = np.zeros((5, 2), dtype=float) self.matmul(a, b, out=out) assert_array_equal(out, tgt, err_msg=msg) # test out with not allowed type cast (safe casting) msg = "Cannot cast ufunc matmul output" out = np.zeros((5, 2), dtype=np.int32) assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out) # test out with type upcast to complex out = np.zeros((5, 2), dtype=np.complex128) c = self.matmul(a, b, out=out) assert_(c is out) with suppress_warnings() as sup: sup.filter(np.ComplexWarning, '') c = c.astype(tgt.dtype) assert_array_equal(c, tgt) def test_out_contiguous(self): a = np.ones((5, 2), dtype=float) b = np.array([[1, 3], [5, 7]], dtype=float) v = np.array([1, 3], dtype=float) tgt = np.dot(a, b) tgt_mv = np.dot(a, v) # test out non-contiguous out = np.ones((5, 2, 2), dtype=float) c = self.matmul(a, b, out=out[..., 0]) assert c.base is out assert_array_equal(c, tgt) c = self.matmul(a, v, out=out[:, 0, 0]) assert_array_equal(c, tgt_mv) c = self.matmul(v, a.T, out=out[:, 0, 0]) assert_array_equal(c, tgt_mv) # test out contiguous in only last dim out = np.ones((10, 2), dtype=float) c = self.matmul(a, b, out=out[::2, :]) assert_array_equal(c, tgt) # test transposes of out, args out = np.ones((5, 2), dtype=float) c = self.matmul(b.T, a.T, out=out.T) assert_array_equal(out, tgt) m1 = np.arange(15.).reshape(5, 3) m2 = np.arange(21.).reshape(3, 7) m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous vc = np.arange(10.) vr = np.arange(6.) m0 = np.zeros((3, 0)) @pytest.mark.parametrize('args', ( # matrix-matrix (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), # matrix-matrix-transpose, contiguous and non (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T), (m3, m3.T), (m3.T, m3), # matrix-matrix non-contiguous (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T), # vector-matrix, matrix-vector, contiguous (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T), # vector-matrix, matrix-vector, vector non-contiguous (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T), # vector-matrix, matrix-vector, matrix non-contiguous (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T), # vector-matrix, matrix-vector, both non-contiguous (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T), # size == 0 (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T), )) def test_dot_equivalent(self, args): r1 = np.matmul(*args) r2 = np.dot(*args) assert_equal(r1, r2) r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) if sys.version_info[:2] >= (3, 5): class TestMatmulOperator(MatmulCommon): import operator matmul = operator.matmul def test_array_priority_override(self): class A(object): __array_priority__ = 1000 def __matmul__(self, other): return "A" def __rmatmul__(self, other): return "A" a = A() b = np.ones(2) assert_equal(self.matmul(a, b), "A") assert_equal(self.matmul(b, a), "A") def test_matmul_raises(self): assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc')) def test_matmul_inplace(): # It would be nice to support in-place matmul eventually, but for now # we don't have a working implementation, so better just to error out # and nudge people to writing "a = a @ b". a = np.eye(3) b = np.eye(3) assert_raises(TypeError, a.__imatmul__, b) import operator assert_raises(TypeError, operator.imatmul, a, b) # we avoid writing the token `exec` so as not to crash python 2's # parser exec_ = getattr(builtins, "exec") assert_raises(TypeError, exec_, "a @= b", globals(), locals()) def test_matmul_axes(): a = np.arange(3*4*5).reshape(3, 4, 5) c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) assert c.shape == (3, 4, 4) d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) assert d.shape == (4, 4, 3) e = np.swapaxes(d, 0, 2) assert_array_equal(e, c) f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) assert f.shape == (4, 5) class TestInner(object): def test_inner_type_mismatch(self): c = 1. A = np.array((1,1), dtype='i,i') assert_raises(TypeError, np.inner, c, A) assert_raises(TypeError, np.inner, A, c) def test_inner_scalar_and_vector(self): for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': sca = np.array(3, dtype=dt)[()] vec = np.array([1, 2], dtype=dt) desired = np.array([3, 6], dtype=dt) assert_equal(np.inner(vec, sca), desired) assert_equal(np.inner(sca, vec), desired) def test_vecself(self): # Ticket 844. # Inner product of a vector with itself segfaults or give # meaningless result a = np.zeros(shape=(1, 80), dtype=np.float64) p = np.inner(a, a) assert_almost_equal(p, 0, decimal=14) def test_inner_product_with_various_contiguities(self): # github issue 6532 for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': # check an inner product involving a matrix transpose A = np.array([[1, 2], [3, 4]], dtype=dt) B = np.array([[1, 3], [2, 4]], dtype=dt) C = np.array([1, 1], dtype=dt) desired = np.array([4, 6], dtype=dt) assert_equal(np.inner(A.T, C), desired) assert_equal(np.inner(C, A.T), desired) assert_equal(np.inner(B, C), desired) assert_equal(np.inner(C, B), desired) # check a matrix product desired = np.array([[7, 10], [15, 22]], dtype=dt) assert_equal(np.inner(A, B), desired) # check the syrk vs. gemm paths desired = np.array([[5, 11], [11, 25]], dtype=dt) assert_equal(np.inner(A, A), desired) assert_equal(np.inner(A, A.copy()), desired) # check an inner product involving an aliased and reversed view a = np.arange(5).astype(dt) b = a[::-1] desired = np.array(10, dtype=dt).item() assert_equal(np.inner(b, a), desired) def test_3d_tensor(self): for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': a = np.arange(24).reshape(2,3,4).astype(dt) b = np.arange(24, 48).reshape(2,3,4).astype(dt) desired = np.array( [[[[ 158, 182, 206], [ 230, 254, 278]], [[ 566, 654, 742], [ 830, 918, 1006]], [[ 974, 1126, 1278], [1430, 1582, 1734]]], [[[1382, 1598, 1814], [2030, 2246, 2462]], [[1790, 2070, 2350], [2630, 2910, 3190]], [[2198, 2542, 2886], [3230, 3574, 3918]]]], dtype=dt ) assert_equal(np.inner(a, b), desired) assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) class TestAlen(object): def test_basic(self): m = np.array([1, 2, 3]) assert_equal(np.alen(m), 3) m = np.array([[1, 2, 3], [4, 5, 7]]) assert_equal(np.alen(m), 2) m = [1, 2, 3] assert_equal(np.alen(m), 3) m = [[1, 2, 3], [4, 5, 7]] assert_equal(np.alen(m), 2) def test_singleton(self): assert_equal(np.alen(5), 1) class TestChoose(object): def setup(self): self.x = 2*np.ones((3,), dtype=int) self.y = 3*np.ones((3,), dtype=int) self.x2 = 2*np.ones((2, 3), dtype=int) self.y2 = 3*np.ones((2, 3), dtype=int) self.ind = [0, 0, 1] def test_basic(self): A = np.choose(self.ind, (self.x, self.y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): A = np.choose(self.ind, (self.x2, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): A = np.choose(self.ind, (self.x, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) class TestRepeat(object): def setup(self): self.m = np.array([1, 2, 3, 4, 5, 6]) self.m_rect = self.m.reshape((2, 3)) def test_basic(self): A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) assert_equal(A, [1, 2, 2, 2, 3, 3, 4, 5, 6, 6]) def test_broadcast1(self): A = np.repeat(self.m, 2) assert_equal(A, [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) def test_axis_spec(self): A = np.repeat(self.m_rect, [2, 1], axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6]]) A = np.repeat(self.m_rect, [1, 3, 2], axis=1) assert_equal(A, [[1, 2, 2, 2, 3, 3], [4, 5, 5, 5, 6, 6]]) def test_broadcast2(self): A = np.repeat(self.m_rect, 2, axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) A = np.repeat(self.m_rect, 2, axis=1) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) # TODO: test for multidimensional NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} @pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object']) class TestNeighborhoodIter(object): # Simple, 2d tests def test_simple2d(self, dt): # Test zero and one padding for simple data type x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator( x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator( x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator( x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) assert_array_equal(l, r) def test_mirror2d(self, dt): x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator( x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Simple, 1d tests def test_simple(self, dt): # Test padding with constant values x = np.linspace(1, 5, 5).astype(dt) r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] l = _multiarray_tests.test_neighborhood_iterator( x, [-1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] l = _multiarray_tests.test_neighborhood_iterator( x, [-1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] l = _multiarray_tests.test_neighborhood_iterator( x, [-1, 1], x[4], NEIGH_MODE['constant']) assert_array_equal(l, r) # Test mirror modes def test_mirror(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) l = _multiarray_tests.test_neighborhood_iterator( x, [-2, 2], x[1], NEIGH_MODE['mirror']) assert_([i.dtype == dt for i in l]) assert_array_equal(l, r) # Circular mode def test_circular(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) l = _multiarray_tests.test_neighborhood_iterator( x, [-2, 2], x[0], NEIGH_MODE['circular']) assert_array_equal(l, r) # Test stacking neighborhood iterators class TestStackedNeighborhoodIter(object): # Simple, 1d test: stacking 2 constant-padded neigh iterators def test_simple_const(self): dt = np.float64 # Test zero and one padding for simple data type x = np.array([1, 2, 3], dtype=dt) r = [np.array([0], dtype=dt), np.array([0], dtype=dt), np.array([1], dtype=dt), np.array([2], dtype=dt), np.array([3], dtype=dt), np.array([0], dtype=dt), np.array([0], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([1, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 1], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) assert_array_equal(l, r) # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # mirror padding def test_simple_mirror(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 1], dtype=dt), np.array([1, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 3], dtype=dt), np.array([3, 3, 0], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 3], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 3], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # circular padding def test_simple_circular(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 3, 1], dtype=dt), np.array([3, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 1], dtype=dt), np.array([3, 1, 0], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 1], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator # being strictly within the array def test_simple_strict_within(self): dt = np.float64 # Stacking zero on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 0], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 3], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 1], dtype=dt)] l = _multiarray_tests.test_neighborhood_iterator_oob( x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) class TestWarnings(object): def test_complex_warning(self): x = np.array([1, 2]) y = np.array([1-2j, 1+2j]) with warnings.catch_warnings(): warnings.simplefilter("error", np.ComplexWarning) assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) assert_equal(x, [1, 2]) class TestMinScalarType(object): def test_usigned_shortshort(self): dt = np.min_scalar_type(2**8-1) wanted = np.dtype('uint8') assert_equal(wanted, dt) def test_usigned_short(self): dt = np.min_scalar_type(2**16-1) wanted = np.dtype('uint16') assert_equal(wanted, dt) def test_usigned_int(self): dt = np.min_scalar_type(2**32-1) wanted = np.dtype('uint32') assert_equal(wanted, dt) def test_usigned_longlong(self): dt = np.min_scalar_type(2**63-1) wanted = np.dtype('uint64') assert_equal(wanted, dt) def test_object(self): dt = np.min_scalar_type(2**64) wanted = np.dtype('O') assert_equal(wanted, dt) from numpy.core._internal import _dtype_from_pep3118 class TestPEP3118Dtype(object): def _check(self, spec, wanted): dt = np.dtype(wanted) actual = _dtype_from_pep3118(spec) assert_equal(actual, dt, err_msg="spec %r != dtype %r" % (spec, wanted)) def test_native_padding(self): align = np.dtype('i').alignment for j in range(8): if j == 0: s = 'bi' else: s = 'b%dxi' % j self._check('@'+s, {'f0': ('i1', 0), 'f1': ('i', align*(1 + j//align))}) self._check('='+s, {'f0': ('i1', 0), 'f1': ('i', 1+j)}) def test_native_padding_2(self): # Native padding should work also for structs and sub-arrays self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) def test_trailing_padding(self): # Trailing padding should be included, *and*, the item size # should match the alignment if in aligned mode align = np.dtype('i').alignment size = np.dtype('i').itemsize def aligned(n): return align*(1 + (n-1)//align) base = dict(formats=['i'], names=['f0']) self._check('ix', dict(itemsize=aligned(size + 1), **base)) self._check('ixx', dict(itemsize=aligned(size + 2), **base)) self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) self._check('i7x', dict(itemsize=aligned(size + 7), **base)) self._check('^ix', dict(itemsize=size + 1, **base)) self._check('^ixx', dict(itemsize=size + 2, **base)) self._check('^ixxx', dict(itemsize=size + 3, **base)) self._check('^ixxxx', dict(itemsize=size + 4, **base)) self._check('^i7x', dict(itemsize=size + 7, **base)) def test_native_padding_3(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) def test_padding_with_array_inside_struct(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) def test_byteorder_inside_struct(self): # The byte order after @T{=i} should be '=', not '@'. # Check this by noting the absence of native alignment. self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), 'f1': ('i', 5)}) def test_intra_padding(self): # Natively aligned sub-arrays may require some internal padding align = np.dtype('i').alignment size = np.dtype('i').itemsize def aligned(n): return (align*(1 + (n-1)//align)) self._check('(3)T{ix}', (dict( names=['f0'], formats=['i'], offsets=[0], itemsize=aligned(size + 1) ), (3,))) def test_char_vs_string(self): dt = np.dtype('c') self._check('c', dt) dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) self._check('4c4s', dt) def test_field_order(self): # gh-9053 - previously, we relied on dictionary key order self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) def test_unnamed_fields(self): self._check('ii', [('f0', 'i'), ('f1', 'i')]) self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) self._check('i', 'i') self._check('i:f0:', [('f0', 'i')]) class TestNewBufferProtocol(object): """ Test PEP3118 buffers """ def _check_roundtrip(self, obj): obj = np.asarray(obj) x = memoryview(obj) y = np.asarray(x) y2 = np.array(x) assert_(not y.flags.owndata) assert_(y2.flags.owndata) assert_equal(y.dtype, obj.dtype) assert_equal(y.shape, obj.shape) assert_array_equal(obj, y) assert_equal(y2.dtype, obj.dtype) assert_equal(y2.shape, obj.shape) assert_array_equal(obj, y2) def test_roundtrip(self): x = np.array([1, 2, 3, 4, 5], dtype='i4') self._check_roundtrip(x) x = np.array([[1, 2], [3, 4]], dtype=np.float64) self._check_roundtrip(x) x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] self._check_roundtrip(x) dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'aaaa', 'bbbb', b'xxx', True, 1.0)], dtype=dt) self._check_roundtrip(x) x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='>i2') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='') x = np.zeros(4, dtype=dt) self._check_roundtrip(x) def test_roundtrip_scalar(self): # Issue #4015. self._check_roundtrip(0) def test_invalid_buffer_format(self): # datetime64 cannot be used fully in a buffer yet # Should be fixed in the next Numpy major release dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) a = np.empty(3, dt) assert_raises((ValueError, BufferError), memoryview, a) assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) def test_export_simple_1d(self): x = np.array([1, 2, 3, 4, 5], dtype='i') y = memoryview(x) assert_equal(y.format, 'i') assert_equal(y.shape, (5,)) assert_equal(y.ndim, 1) assert_equal(y.strides, (4,)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_simple_nd(self): x = np.array([[1, 2], [3, 4]], dtype=np.float64) y = memoryview(x) assert_equal(y.format, 'd') assert_equal(y.shape, (2, 2)) assert_equal(y.ndim, 2) assert_equal(y.strides, (16, 8)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 8) def test_export_discontiguous(self): x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] y = memoryview(x) assert_equal(y.format, 'f') assert_equal(y.shape, (3, 3)) assert_equal(y.ndim, 2) assert_equal(y.strides, (36, 4)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_record(self): dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'aaaa', 'bbbb', b' ', True, 1.0)], dtype=dt) y = memoryview(x) assert_equal(y.shape, (1,)) assert_equal(y.ndim, 1) assert_equal(y.suboffsets, EMPTY) sz = sum([np.dtype(b).itemsize for a, b in dt]) if np.dtype('l').itemsize == 4: assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') else: assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): assert_equal(y.strides, (sz,)) assert_equal(y.itemsize, sz) def test_export_subarray(self): x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) y = memoryview(x) assert_equal(y.format, 'T{(2,2)i:a:}') assert_equal(y.shape, EMPTY) assert_equal(y.ndim, 0) assert_equal(y.strides, EMPTY) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 16) def test_export_endian(self): x = np.array([1, 2, 3], dtype='>i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, '>i') else: assert_equal(y.format, 'i') x = np.array([1, 2, 3], dtype=' 2: with assert_raises_regex( NotImplementedError, r"Unrepresentable .* 'u' \(UCS-2 strings\)" ): raise exc.__cause__ def test_ctypes_integer_via_memoryview(self): # gh-11150, due to bpo-10746 for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}: value = c_integer(42) with warnings.catch_warnings(record=True): warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) np.asarray(value) def test_ctypes_struct_via_memoryview(self): # gh-10528 class foo(ctypes.Structure): _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)] f = foo(a=1, b=2) with warnings.catch_warnings(record=True): warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) arr = np.asarray(f) assert_equal(arr['a'], 1) assert_equal(arr['b'], 2) f.a = 3 assert_equal(arr['a'], 3) class TestArrayAttributeDeletion(object): def test_multiarray_writable_attributes_deletion(self): # ticket #2046, should not seqfault, raise AttributeError a = np.ones(2) attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] with suppress_warnings() as sup: sup.filter(DeprecationWarning, "Assigning the 'data' attribute") for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_not_writable_attributes_deletion(self): a = np.ones(2) attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base", "ctypes", "T", "__array_interface__", "__array_struct__", "__array_priority__", "__array_finalize__"] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_writable_attribute_deletion(self): a = np.ones(2).flags attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable'] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_not_writable_attribute_deletion(self): a = np.ones(2).flags attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran", "owndata", "fnc", "forc", "behaved", "carray", "farray", "num"] for s in attr: assert_raises(AttributeError, delattr, a, s) class TestArrayInterface(): class Foo(object): def __init__(self, value): self.value = value self.iface = {'typestr': 'f8'} def __float__(self): return float(self.value) @property def __array_interface__(self): return self.iface f = Foo(0.5) @pytest.mark.parametrize('val, iface, expected', [ (f, {}, 0.5), ([f], {}, [0.5]), ([f, f], {}, [0.5, 0.5]), (f, {'shape': ()}, 0.5), (f, {'shape': None}, TypeError), (f, {'shape': (1, 1)}, [[0.5]]), (f, {'shape': (2,)}, ValueError), (f, {'strides': ()}, 0.5), (f, {'strides': (2,)}, ValueError), (f, {'strides': 16}, TypeError), ]) def test_scalar_interface(self, val, iface, expected): # Test scalar coercion within the array interface self.f.iface = {'typestr': 'f8'} self.f.iface.update(iface) if HAS_REFCOUNT: pre_cnt = sys.getrefcount(np.dtype('f8')) if isinstance(expected, type): assert_raises(expected, np.array, val) else: result = np.array(val) assert_equal(np.array(val), expected) assert result.dtype == 'f8' del result if HAS_REFCOUNT: post_cnt = sys.getrefcount(np.dtype('f8')) assert_equal(pre_cnt, post_cnt) def test_interface_no_shape(): class ArrayLike(object): array = np.array(1) __array_interface__ = array.__array_interface__ assert_equal(np.array(ArrayLike()), 1) def test_array_interface_itemsize(): # See gh-6361 my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], 'offsets': [0, 8], 'itemsize': 16}) a = np.ones(10, dtype=my_dtype) descr_t = np.dtype(a.__array_interface__['descr']) typestr_t = np.dtype(a.__array_interface__['typestr']) assert_equal(descr_t.itemsize, typestr_t.itemsize) def test_array_interface_empty_shape(): # See gh-7994 arr = np.array([1, 2, 3]) interface1 = dict(arr.__array_interface__) interface1['shape'] = () class DummyArray1(object): __array_interface__ = interface1 # NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting # the interface data to bytes would invoke the bug this tests for, that # __array_interface__ with shape=() is not allowed if the data is an object # exposing the buffer interface interface2 = dict(interface1) interface2['data'] = arr[0].tobytes() class DummyArray2(object): __array_interface__ = interface2 arr1 = np.asarray(DummyArray1()) arr2 = np.asarray(DummyArray2()) arr3 = arr[:1].reshape(()) assert_equal(arr1, arr2) assert_equal(arr1, arr3) def test_flat_element_deletion(): it = np.ones(3).flat try: del it[1] del it[1:2] except TypeError: pass except Exception: raise AssertionError def test_scalar_element_deletion(): a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) assert_raises(ValueError, a[0].__delitem__, 'x') class TestMemEventHook(object): def test_mem_seteventhook(self): # The actual tests are within the C code in # multiarray/_multiarray_tests.c.src _multiarray_tests.test_pydatamem_seteventhook_start() # force an allocation and free of a numpy array # needs to be larger then limit of small memory cacher in ctors.c a = np.zeros(1000) del a gc.collect() _multiarray_tests.test_pydatamem_seteventhook_end() class TestMapIter(object): def test_mapiter(self): # The actual tests are within the C code in # multiarray/_multiarray_tests.c.src a = np.arange(12).reshape((3, 4)).astype(float) index = ([1, 1, 2, 0], [0, 0, 2, 3]) vals = [50, 50, 30, 16] _multiarray_tests.test_inplace_increment(a, index, vals) assert_equal(a, [[0.00, 1., 2.0, 19.], [104., 5., 6.0, 7.0], [8.00, 9., 40., 11.]]) b = np.arange(6).astype(float) index = (np.array([1, 2, 0]),) vals = [50, 4, 100.1] _multiarray_tests.test_inplace_increment(b, index, vals) assert_equal(b, [100.1, 51., 6., 3., 4., 5.]) class TestAsCArray(object): def test_1darray(self): array = np.arange(24, dtype=np.double) from_c = _multiarray_tests.test_as_c_array(array, 3) assert_equal(array[3], from_c) def test_2darray(self): array = np.arange(24, dtype=np.double).reshape(3, 8) from_c = _multiarray_tests.test_as_c_array(array, 2, 4) assert_equal(array[2, 4], from_c) def test_3darray(self): array = np.arange(24, dtype=np.double).reshape(2, 3, 4) from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3) assert_equal(array[1, 2, 3], from_c) class TestConversion(object): def test_array_scalar_relational_operation(self): # All integer for dt1 in np.typecodes['AllInteger']: assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) for dt2 in np.typecodes['AllInteger']: assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) # Unsigned integers for dt1 in 'BHILQP': assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) # Unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) # Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) for dt2 in 'bhlqp' + np.typecodes['Float']: assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) def test_to_bool_scalar(self): assert_equal(bool(np.array([False])), False) assert_equal(bool(np.array([True])), True) assert_equal(bool(np.array([[42]])), True) assert_raises(ValueError, bool, np.array([1, 2])) class NotConvertible(object): def __bool__(self): raise NotImplementedError __nonzero__ = __bool__ # python 2 assert_raises(NotImplementedError, bool, np.array(NotConvertible())) assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) self_containing = np.array([None]) self_containing[0] = self_containing try: Error = RecursionError except NameError: Error = RuntimeError # python < 3.5 assert_raises(Error, bool, self_containing) # previously stack overflow self_containing[0] = None # resolve circular reference def test_to_int_scalar(self): # gh-9972 means that these aren't always the same int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array([1])), 1) assert_equal(int_func(np.array([0])), 0) assert_equal(int_func(np.array([[42]])), 42) assert_raises(TypeError, int_func, np.array([1, 2])) # gh-9972 assert_equal(4, int_func(np.array('4'))) assert_equal(5, int_func(np.bytes_(b'5'))) assert_equal(6, int_func(np.unicode_(u'6'))) class HasTrunc: def __trunc__(self): return 3 assert_equal(3, int_func(np.array(HasTrunc()))) assert_equal(3, int_func(np.array([HasTrunc()]))) class NotConvertible(object): def __int__(self): raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) assert_raises(NotImplementedError, int_func, np.array([NotConvertible()])) class TestWhere(object): def test_basic(self): dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, np.longdouble, np.clongdouble] for dt in dts: c = np.ones(53, dtype=bool) assert_equal(np.where( c, dt(0), dt(1)), dt(0)) assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) assert_equal(np.where(True, dt(0), dt(1)), dt(0)) assert_equal(np.where(False, dt(0), dt(1)), dt(1)) d = np.ones_like(c).astype(dt) e = np.zeros_like(d) r = d.astype(dt) c[7] = False r[7] = e[7] assert_equal(np.where(c, e, e), e) assert_equal(np.where(c, d, e), r) assert_equal(np.where(c, d, e[0]), r) assert_equal(np.where(c, d[0], e), r) assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) def test_exotic(self): # object assert_array_equal(np.where(True, None, None), np.array(None)) # zero sized m = np.array([], dtype=bool).reshape(0, 3) b = np.array([], dtype=np.float64).reshape(0, 3) assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) # object cast d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, 1.267, 0.229, -1.39, 0.487]) nan = float('NaN') e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], dtype=object) m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) r = e[:] r[np.where(m)] = d[np.where(m)] assert_array_equal(np.where(m, d, e), r) r = e[:] r[np.where(~m)] = d[np.where(~m)] assert_array_equal(np.where(m, e, d), r) assert_array_equal(np.where(m, e, e), e) # minimal dtype result with NaN scalar (e.g required by pandas) d = np.array([1., 2.], dtype=np.float32) e = float('NaN') assert_equal(np.where(True, d, e).dtype, np.float32) e = float('Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) e = float('-Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) # also check upcast e = float(1e150) assert_equal(np.where(True, d, e).dtype, np.float64) def test_ndim(self): c = [True, False] a = np.zeros((2, 25)) b = np.ones((2, 25)) r = np.where(np.array(c)[:,np.newaxis], a, b) assert_array_equal(r[0], a[0]) assert_array_equal(r[1], b[0]) a = a.T b = b.T r = np.where(c, a, b) assert_array_equal(r[:,0], a[:,0]) assert_array_equal(r[:,1], b[:,0]) def test_dtype_mix(self): c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) a = np.uint32(1) b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) assert_equal(np.where(c, a, b), r) a = a.astype(np.float32) b = b.astype(np.int64) assert_equal(np.where(c, a, b), r) # non bool mask c = c.astype(int) c[c != 0] = 34242324 assert_equal(np.where(c, a, b), r) # invert tmpmask = c != 0 c[c == 0] = 41247212 c[tmpmask] = 0 assert_equal(np.where(c, b, a), r) def test_foreign(self): c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) a = np.ones(1, dtype='>i4') b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) assert_equal(np.where(c, a, b), r) b = b.astype('>f8') assert_equal(np.where(c, a, b), r) a = a.astype('i4') assert_equal(np.where(c, a, b), r) def test_error(self): c = [True, True] a = np.ones((4, 5)) b = np.ones((5, 5)) assert_raises(ValueError, np.where, c, a, a) assert_raises(ValueError, np.where, c[0], a, b) def test_string(self): # gh-4778 check strings are properly filled with nulls a = np.array("abc") b = np.array("x" * 753) assert_equal(np.where(True, a, b), "abc") assert_equal(np.where(False, b, a), "abc") # check native datatype sized strings a = np.array("abcd") b = np.array("x" * 8) assert_equal(np.where(True, a, b), "abcd") assert_equal(np.where(False, b, a), "abcd") def test_empty_result(self): # pass empty where result through an assignment which reads the data of # empty arrays, error detectable with valgrind, see gh-8922 x = np.zeros((1, 1)) ibad = np.vstack(np.where(x == 99.)) assert_array_equal(ibad, np.atleast_2d(np.array([[],[]], dtype=np.intp))) def test_largedim(self): # invalid read regression gh-9304 shape = [10, 2, 3, 4, 5, 6] np.random.seed(2) array = np.random.rand(*shape) for i in range(10): benchmark = array.nonzero() result = array.nonzero() assert_array_equal(benchmark, result) if not IS_PYPY: # sys.getsizeof() is not valid on PyPy class TestSizeOf(object): def test_empty_array(self): x = np.array([]) assert_(sys.getsizeof(x) > 0) def check_array(self, dtype): elem_size = dtype(0).itemsize for length in [10, 50, 100, 500]: x = np.arange(length, dtype=dtype) assert_(sys.getsizeof(x) > length * elem_size) def test_array_int32(self): self.check_array(np.int32) def test_array_int64(self): self.check_array(np.int64) def test_array_float32(self): self.check_array(np.float32) def test_array_float64(self): self.check_array(np.float64) def test_view(self): d = np.ones(100) assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) def test_reshape(self): d = np.ones(100) assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) def test_resize(self): d = np.ones(100) old = sys.getsizeof(d) d.resize(50) assert_(old > sys.getsizeof(d)) d.resize(150) assert_(old < sys.getsizeof(d)) def test_error(self): d = np.ones(100) assert_raises(TypeError, d.__sizeof__, "a") class TestHashing(object): def test_arrays_not_hashable(self): x = np.ones(3) assert_raises(TypeError, hash, x) def test_collections_hashable(self): x = np.array([]) assert_(not isinstance(x, collections_abc.Hashable)) class TestArrayPriority(object): # This will go away when __array_priority__ is settled, meanwhile # it serves to check unintended changes. op = operator binary_ops = [ op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, op.ge, op.lt, op.le, op.ne, op.eq ] # See #7949. Don't use "/" operator With -3 switch, since python reports it # as a DeprecationWarning if sys.version_info[0] < 3 and not sys.py3kwarning: binary_ops.append(op.div) class Foo(np.ndarray): __array_priority__ = 100. def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) class Bar(np.ndarray): __array_priority__ = 101. def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) class Other(object): __array_priority__ = 1000. def _all(self, other): return self.__class__() __add__ = __radd__ = _all __sub__ = __rsub__ = _all __mul__ = __rmul__ = _all __pow__ = __rpow__ = _all __div__ = __rdiv__ = _all __mod__ = __rmod__ = _all __truediv__ = __rtruediv__ = _all __floordiv__ = __rfloordiv__ = _all __and__ = __rand__ = _all __xor__ = __rxor__ = _all __or__ = __ror__ = _all __lshift__ = __rlshift__ = _all __rshift__ = __rrshift__ = _all __eq__ = _all __ne__ = _all __gt__ = _all __ge__ = _all __lt__ = _all __le__ = _all def test_ndarray_subclass(self): a = np.array([1, 2]) b = self.Bar([1, 2]) for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Bar), msg) assert_(isinstance(f(b, a), self.Bar), msg) def test_ndarray_other(self): a = np.array([1, 2]) b = self.Other() for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Other), msg) assert_(isinstance(f(b, a), self.Other), msg) def test_subclass_subclass(self): a = self.Foo([1, 2]) b = self.Bar([1, 2]) for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Bar), msg) assert_(isinstance(f(b, a), self.Bar), msg) def test_subclass_other(self): a = self.Foo([1, 2]) b = self.Other() for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Other), msg) assert_(isinstance(f(b, a), self.Other), msg) class TestBytestringArrayNonzero(object): def test_empty_bstring_array_is_falsey(self): assert_(not np.array([''], dtype=str)) def test_whitespace_bstring_array_is_falsey(self): a = np.array(['spam'], dtype=str) a[0] = ' \0\0' assert_(not a) def test_all_null_bstring_array_is_falsey(self): a = np.array(['spam'], dtype=str) a[0] = '\0\0\0\0' assert_(not a) def test_null_inside_bstring_array_is_truthy(self): a = np.array(['spam'], dtype=str) a[0] = ' \0 \0' assert_(a) class TestUnicodeArrayNonzero(object): def test_empty_ustring_array_is_falsey(self): assert_(not np.array([''], dtype=np.unicode)) def test_whitespace_ustring_array_is_falsey(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = ' \0\0' assert_(not a) def test_all_null_ustring_array_is_falsey(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = '\0\0\0\0' assert_(not a) def test_null_inside_ustring_array_is_truthy(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = ' \0 \0' assert_(a) class TestFormat(object): def test_0d(self): a = np.array(np.pi) assert_equal('{:0.3g}'.format(a), '3.14') assert_equal('{:0.3g}'.format(a[()]), '3.14') def test_1d_no_format(self): a = np.array([np.pi]) assert_equal('{}'.format(a), str(a)) def test_1d_format(self): # until gh-5543, ensure that the behaviour matches what it used to be a = np.array([np.pi]) if sys.version_info[:2] >= (3, 4): assert_raises(TypeError, '{:30}'.format, a) else: with suppress_warnings() as sup: sup.filter(PendingDeprecationWarning) res = '{:30}'.format(a) dst = object.__format__(a, '30') assert_equal(res, dst) class TestCTypes(object): def test_ctypes_is_available(self): test_arr = np.array([[1, 2, 3], [4, 5, 6]]) assert_equal(ctypes, test_arr.ctypes._ctypes) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) def test_ctypes_is_not_available(self): from numpy.core import _internal _internal.ctypes = None try: test_arr = np.array([[1, 2, 3], [4, 5, 6]]) assert_(isinstance(test_arr.ctypes._ctypes, _internal._missing_ctypes)) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) finally: _internal.ctypes = ctypes def _make_readonly(x): x.flags.writeable = False return x @pytest.mark.parametrize('arr', [ np.array([1, 2, 3]), np.array([['one', 'two'], ['three', 'four']]), np.array((1, 2), dtype='i4,i4'), np.zeros((2,), dtype= np.dtype(dict( formats=['2, [44, 55]) assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) # hit one of the failing paths assert_raises(ValueError, np.place, a, a>20, []) def test_put_noncontiguous(self): a = np.arange(6).reshape(2,3).T # force non-c-contiguous np.put(a, [0, 2], [44, 55]) assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) def test_putmask_noncontiguous(self): a = np.arange(6).reshape(2,3).T # force non-c-contiguous # uses arr_putmask np.putmask(a, a>2, a**2) assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) def test_take_mode_raise(self): a = np.arange(6, dtype='int') out = np.empty(2, dtype='int') np.take(a, [0, 2], out=out, mode='raise') assert_equal(out, np.array([0, 2])) def test_choose_mod_raise(self): a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) out = np.empty((3,3), dtype='int') choices = [-10, 10] np.choose(a, choices, out=out, mode='raise') assert_equal(out, np.array([[ 10, -10, 10], [-10, 10, -10], [ 10, -10, 10]])) def test_flatiter__array__(self): a = np.arange(9).reshape(3,3) b = a.T.flat c = b.__array__() # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics del c def test_dot_out(self): # if HAVE_CBLAS, will use WRITEBACKIFCOPY a = np.arange(9, dtype=float).reshape(3,3) b = np.dot(a, a, out=a) assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) def test_view_assign(self): from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve arr = np.arange(9).reshape(3, 3).T arr_wb = npy_create_writebackifcopy(arr) assert_(arr_wb.flags.writebackifcopy) assert_(arr_wb.base is arr) arr_wb[...] = -100 npy_resolve(arr_wb) # arr changes after resolve, even though we assigned to arr_wb assert_equal(arr, -100) # after resolve, the two arrays no longer reference each other assert_(arr_wb.ctypes.data != 0) assert_equal(arr_wb.base, None) # assigning to arr_wb does not get transferred to arr arr_wb[...] = 100 assert_equal(arr, -100) def test_dealloc_warning(self): with suppress_warnings() as sup: sup.record(RuntimeWarning) arr = np.arange(9).reshape(3, 3) v = arr.T _multiarray_tests.npy_abuse_writebackifcopy(v) assert len(sup.log) == 1 def test_view_discard_refcount(self): from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard arr = np.arange(9).reshape(3, 3).T orig = arr.copy() if HAS_REFCOUNT: arr_cnt = sys.getrefcount(arr) arr_wb = npy_create_writebackifcopy(arr) assert_(arr_wb.flags.writebackifcopy) assert_(arr_wb.base is arr) arr_wb[...] = -100 npy_discard(arr_wb) # arr remains unchanged after discard assert_equal(arr, orig) # after discard, the two arrays no longer reference each other assert_(arr_wb.ctypes.data != 0) assert_equal(arr_wb.base, None) if HAS_REFCOUNT: assert_equal(arr_cnt, sys.getrefcount(arr)) # assigning to arr_wb does not get transferred to arr arr_wb[...] = 100 assert_equal(arr, orig) class TestArange(object): def test_infinite(self): assert_raises_regex( ValueError, "size exceeded", np.arange, 0, np.inf ) def test_nan_step(self): assert_raises_regex( ValueError, "cannot compute length", np.arange, 0, 1, np.nan ) def test_zero_step(self): assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) # empty range assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) class TestArrayFinalize(object): """ Tests __array_finalize__ """ def test_receives_base(self): # gh-11237 class SavesBase(np.ndarray): def __array_finalize__(self, obj): self.saved_base = self.base a = np.array(1).view(SavesBase) assert_(a.saved_base is a.base) def test_lifetime_on_error(self): # gh-11237 class RaisesInFinalize(np.ndarray): def __array_finalize__(self, obj): # crash, but keep this object alive raise Exception(self) # a plain object can't be weakref'd class Dummy(object): pass # get a weak reference to an object within an array obj_arr = np.array(Dummy()) obj_ref = weakref.ref(obj_arr[()]) # get an array that crashed in __array_finalize__ with assert_raises(Exception) as e: obj_arr.view(RaisesInFinalize) if sys.version_info.major == 2: # prevent an extra reference being kept sys.exc_clear() obj_subarray = e.exception.args[0] del e assert_(isinstance(obj_subarray, RaisesInFinalize)) # reference should still be held by obj_arr gc.collect() assert_(obj_ref() is not None, "object should not already be dead") del obj_arr gc.collect() assert_(obj_ref() is not None, "obj_arr should not hold the last reference") del obj_subarray gc.collect() assert_(obj_ref() is None, "no references should remain") def test_orderconverter_with_nonASCII_unicode_ordering(): # gh-7475 a = np.arange(5) assert_raises(ValueError, a.flatten, order=u'\xe2') def test_equal_override(): # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which # did not respect overrides with __array_priority__ or __array_ufunc__. # The PR fixed this for __array_priority__ and __array_ufunc__ = None. class MyAlwaysEqual(object): def __eq__(self, other): return "eq" def __ne__(self, other): return "ne" class MyAlwaysEqualOld(MyAlwaysEqual): __array_priority__ = 10000 class MyAlwaysEqualNew(MyAlwaysEqual): __array_ufunc__ = None array = np.array([(0, 1), (2, 3)], dtype='i4,i4') for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew: my_always_equal = my_always_equal_cls() assert_equal(my_always_equal == array, 'eq') assert_equal(array == my_always_equal, 'eq') assert_equal(my_always_equal != array, 'ne') assert_equal(array != my_always_equal, 'ne') def test_npymath_complex(): # Smoketest npymath functions from numpy.core._multiarray_tests import ( npy_cabs, npy_carg) funcs = {npy_cabs: np.absolute, npy_carg: np.angle} vals = (1, np.inf, -np.inf, np.nan) types = (np.complex64, np.complex128, np.clongdouble) for fun, npfun in funcs.items(): for x, y in itertools.product(vals, vals): for t in types: z = t(complex(x, y)) got = fun(z) expected = npfun(z) assert_allclose(got, expected) def test_npymath_real(): # Smoketest npymath functions from numpy.core._multiarray_tests import ( npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) funcs = {npy_log10: np.log10, npy_cosh: np.cosh, npy_sinh: np.sinh, npy_tan: np.tan, npy_tanh: np.tanh} vals = (1, np.inf, -np.inf, np.nan) types = (np.float32, np.float64, np.longdouble) with np.errstate(all='ignore'): for fun, npfun in funcs.items(): for x, t in itertools.product(vals, types): z = t(x) got = fun(z) expected = npfun(z) assert_allclose(got, expected) def test_uintalignment_and_alignment(): # alignment code needs to satisfy these requrements: # 1. numpy structs match C struct layout # 2. ufuncs/casting is safe wrt to aligned access # 3. copy code is safe wrt to "uint alidned" access # # Complex types are the main problem, whose alignment may not be the same # as their "uint alignment". # # This test might only fail on certain platforms, where uint64 alignment is # not equal to complex64 alignment. The second 2 tests will only fail # for DEBUG=1. d1 = np.dtype('u1,c8', align=True) d2 = np.dtype('u4,c8', align=True) d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True) assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True) assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True) assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False) # check that C struct matches numpy struct size s = _multiarray_tests.get_struct_alignments() for d, (alignment, size) in zip([d1,d2,d3], s): assert_equal(d.alignment, alignment) assert_equal(d.itemsize, size) # check that ufuncs don't complain in debug mode # (this is probably OK if the aligned flag is true above) src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often np.exp(src) # assert fails? # check that copy code doesn't complain in debug mode dst = np.zeros((2,2), dtype='c8') dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails? class TestAlignment(object): # adapted from scipy._lib.tests.test__util.test__aligned_zeros # Checks that unusual memory alignments don't trip up numpy. # In particular, check RELAXED_STRIDES don't trip alignment assertions in # NDEBUG mode for size-0 arrays (gh-12503) def check(self, shape, dtype, order, align): err_msg = repr((shape, dtype, order, align)) x = _aligned_zeros(shape, dtype, order, align=align) if align is None: align = np.dtype(dtype).alignment assert_equal(x.__array_interface__['data'][0] % align, 0) if hasattr(shape, '__len__'): assert_equal(x.shape, shape, err_msg) else: assert_equal(x.shape, (shape,), err_msg) assert_equal(x.dtype, dtype) if order == "C": assert_(x.flags.c_contiguous, err_msg) elif order == "F": if x.size > 0: assert_(x.flags.f_contiguous, err_msg) elif order is None: assert_(x.flags.c_contiguous, err_msg) else: raise ValueError() def test_various_alignments(self): for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: for n in [0, 1, 3, 11]: for order in ["C", "F", None]: for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: if dtype == 'O': # object dtype can't be misaligned continue for shape in [n, (1, 2, 3, n)]: self.check(shape, np.dtype(dtype), order, align) def test_strided_loop_alignments(self): # particularly test that complex64 and float128 use right alignment # code-paths, since these are particularly problematic. It is useful to # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run. for align in [1, 2, 4, 8, 12, 16, None]: xf64 = _aligned_zeros(3, np.float64) xc64 = _aligned_zeros(3, np.complex64, align=align) xf128 = _aligned_zeros(3, np.longdouble, align=align) # test casting, both to and from misaligned with suppress_warnings() as sup: sup.filter(np.ComplexWarning, "Casting complex values") xc64.astype('f8') xf64.astype(np.complex64) test = xc64 + xf64 xf128.astype('f8') xf64.astype(np.longdouble) test = xf128 + xf64 test = xf128 + xc64 # test copy, both to and from misaligned # contig copy xf64[:] = xf64.copy() xc64[:] = xc64.copy() xf128[:] = xf128.copy() # strided copy xf64[::2] = xf64[::2].copy() xc64[::2] = xc64[::2].copy() xf128[::2] = xf128[::2].copy() def test_getfield(): a = np.arange(32, dtype='uint16') if sys.byteorder == 'little': i = 0 j = 1 else: i = 1 j = 0 b = a.getfield('int8', i) assert_equal(b, a) b = a.getfield('int8', j) assert_equal(b, 0) pytest.raises(ValueError, a.getfield, 'uint8', -1) pytest.raises(ValueError, a.getfield, 'uint8', 16) pytest.raises(ValueError, a.getfield, 'uint64', 0)