Skip to content
Snippets Groups Projects
test_array.py 44.6 KiB
Newer Older
  • Learn to ignore specific revisions
  • #! /usr/bin/env python
    
    __copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
    
    __license__ = """
    Permission is hereby granted, free of charge, to any person obtaining a copy
    of this software and associated documentation files (the "Software"), to deal
    in the Software without restriction, including without limitation the rights
    to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    copies of the Software, and to permit persons to whom the Software is
    furnished to do so, subject to the following conditions:
    
    The above copyright notice and this permission notice shall be included in
    all copies or substantial portions of the Software.
    
    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    THE SOFTWARE.
    """
    
    
    # avoid spurious: pytest.mark.parametrize is not callable
    # pylint: disable=not-callable
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    import numpy as np
    
    import numpy.linalg as la
    import sys
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    import pyopencl as cl
    import pyopencl.array as cl_array
    
    import pyopencl.cltypes as cltypes
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    import pyopencl.tools as cl_tools
    
    from pyopencl.tools import (  # noqa
            pytest_generate_tests_for_pyopencl as pytest_generate_tests)
    
    from pyopencl.characterize import has_double_support, has_struct_arg_count_bug
    
    from pyopencl.clrandom import RanluxGenerator, PhiloxGenerator, ThreefryGenerator
    
    
    _PYPY = cl._PYPY
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    # {{{ helpers
    
    TO_REAL = {
            np.dtype(np.complex64): np.float32,
            np.dtype(np.complex128): np.float64
            }
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    def general_clrand(queue, shape, dtype):
        from pyopencl.clrandom import rand as clrand
    
        dtype = np.dtype(dtype)
        if dtype.kind == "c":
            real_dtype = dtype.type(0).real.dtype
            return clrand(queue, shape, real_dtype) + 1j*clrand(queue, shape, real_dtype)
        else:
            return clrand(queue, shape, dtype)
    
    
    
    def make_random_array(queue, dtype, size):
        from pyopencl.clrandom import rand
    
        dtype = np.dtype(dtype)
        if dtype.kind == "c":
            real_dtype = TO_REAL[dtype]
            return (rand(queue, shape=(size,), dtype=real_dtype).astype(dtype)
    
                    + rand(queue, shape=(size,), dtype=real_dtype).astype(dtype)
                    * dtype.type(1j))
    
        else:
            return rand(queue, shape=(size,), dtype=dtype)
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    # {{{ dtype-related
    
    
    def test_basic_complex(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        from pyopencl.clrandom import rand
    
        size = 500
    
    
        ary = (rand(queue, shape=(size,), dtype=np.float32).astype(np.complex64)
    
                + rand(queue, shape=(size,), dtype=np.float32).astype(np.complex64) * 1j)
    
        c = np.complex64(5+7j)
    
    
        host_ary = ary.get()
    
        assert la.norm((ary*c).get() - c*host_ary) < 1e-5 * la.norm(host_ary)
    
    def test_mix_complex(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        size = 10
    
        dtypes = [
                (np.float32, np.complex64),
                #(np.int32, np.complex64),
                ]
    
    
        dev = context.devices[0]
        if has_double_support(dev) and has_struct_arg_count_bug(dev) == "apple":
            dtypes.extend([
                (np.float32, np.float64),
                ])
        elif has_double_support(dev):
    
            dtypes.extend([
                (np.float32, np.float64),
                (np.float32, np.complex128),
                (np.float64, np.complex64),
                (np.float64, np.complex128),
                ])
    
    
        from operator import add, mul, sub, truediv
        for op in [add, sub, mul, truediv, pow]:
    
            for dtype_a0, dtype_b0 in dtypes:
                for dtype_a, dtype_b in [
                        (dtype_a0, dtype_b0),
                        (dtype_b0, dtype_a0),
                        ]:
                    for is_scalar_a, is_scalar_b in [
                            (False, False),
                            (False, True),
                            (True, False),
                            ]:
                        if is_scalar_a:
                            ary_a = make_random_array(queue, dtype_a, 1).get()[0]
                            host_ary_a = ary_a
                        else:
                            ary_a = make_random_array(queue, dtype_a, size)
                            host_ary_a = ary_a.get()
    
                        if is_scalar_b:
                            ary_b = make_random_array(queue, dtype_b, 1).get()[0]
                            host_ary_b = ary_b
                        else:
                            ary_b = make_random_array(queue, dtype_b, size)
                            host_ary_b = ary_b.get()
    
    
                        print(op, dtype_a, dtype_b, is_scalar_a, is_scalar_b)
    
                        dev_result = op(ary_a, ary_b).get()
                        host_result = op(host_ary_a, host_ary_b)
    
                        if host_result.dtype != dev_result.dtype:
                            # This appears to be a numpy bug, where we get
                            # served a Python complex that is really a
                            # smaller numpy complex.
    
    
                            print("HOST_DTYPE: {} DEV_DTYPE: {}".format(
    
                                    host_result.dtype, dev_result.dtype))
    
    
                            dev_result = dev_result.astype(host_result.dtype)
    
    
                        err = la.norm(host_result-dev_result)/la.norm(host_result)
    
                        correct = err < 1e-4
    
                            print(host_result)
                            print(dev_result)
                            print(host_result - dev_result)
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    def test_pow_neg1_vs_inv(ctx_factory):
        ctx = ctx_factory()
        queue = cl.CommandQueue(ctx)
    
        device = ctx.devices[0]
        if not has_double_support(device):
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
            from pytest import skip
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
            skip("double precision not supported on %s" % device)
    
        if has_struct_arg_count_bug(device) == "apple":
            from pytest import xfail
            xfail("apple struct arg counting broken")
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    
        a_dev = make_random_array(queue, np.complex128, 20000)
    
        res1 = (a_dev ** (-1)).get()
        res2 = (1/a_dev).get()
        ref = 1/a_dev.get()
    
        assert la.norm(res1-ref, np.inf) / la.norm(ref) < 1e-13
        assert la.norm(res2-ref, np.inf) / la.norm(ref) < 1e-13
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    def test_vector_fill(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
    
        a_gpu = cl_array.Array(queue, 100, dtype=cltypes.float4)
        a_gpu.fill(cltypes.make_float4(0.0, 0.0, 1.0, 0.0))
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        a = a_gpu.get()
    
        assert a.dtype == cltypes.float4
    
        a_gpu = cl_array.zeros(queue, 100, dtype=cltypes.float4)
    
    zachjweiner's avatar
    zachjweiner committed
    def test_zeros_large_array(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        dev = queue.device
    
        size = 2**28 + 1
        if dev.address_bits == 64 and dev.max_mem_alloc_size >= 8 * size:
    
    zachjweiner's avatar
    zachjweiner committed
            # this shouldn't hang/cause errors
            # see https://github.com/inducer/pyopencl/issues/395
    
            a_gpu = cl_array.zeros(queue, (size,), dtype="float64")
    
    zachjweiner's avatar
    zachjweiner committed
            # run a couple kernels to ensure no propagated runtime errors
            a_gpu[...] = 1.
            a_gpu = 2 * a_gpu - 3
        else:
            pass
    
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    def test_absrealimag(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
    
        def real(x):
            return x.real
    
        def imag(x):
            return x.imag
    
        def conj(x):
            return x.conj()
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    
        n = 111
        for func in [abs, real, imag, conj]:
            for dtype in [np.int32, np.float32, np.complex64]:
                print(func, dtype)
                a = -make_random_array(queue, dtype, n)
    
                host_res = func(a.get())
                dev_res = func(a).get()
    
                correct = np.allclose(dev_res, host_res)
                if not correct:
                    print(dev_res)
                    print(host_res)
                    print(dev_res-host_res)
                assert correct
    
    
    
    def test_custom_type_zeros(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        if not (
                queue._get_cl_version() >= (1, 2)
                and cl.get_cl_header_version() >= (1, 2)):
            pytest.skip("CL1.2 not available")
    
        dtype = np.dtype([
            ("cur_min", np.int32),
            ("cur_max", np.int32),
            ("pad", np.int32),
            ])
    
        from pyopencl.tools import get_or_register_dtype, match_dtype_to_c_struct
    
        name = "mmc_type"
        dtype, c_decl = match_dtype_to_c_struct(queue.device, name, dtype)
        dtype = get_or_register_dtype(name, dtype)
    
        n = 1000
        z_dev = cl.array.zeros(queue, n, dtype=dtype)
    
        z = z_dev.get()
    
        assert np.array_equal(np.zeros(n, dtype), z)
    
    
    
    def test_custom_type_fill(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        from pyopencl.characterize import has_struct_arg_count_bug
        if has_struct_arg_count_bug(queue.device):
            pytest.skip("device has LLVM arg counting bug")
    
        dtype = np.dtype([
            ("cur_min", np.int32),
            ("cur_max", np.int32),
            ("pad", np.int32),
            ])
    
        from pyopencl.tools import get_or_register_dtype, match_dtype_to_c_struct
    
        name = "mmc_type"
        dtype, c_decl = match_dtype_to_c_struct(queue.device, name, dtype)
        dtype = get_or_register_dtype(name, dtype)
    
        n = 1000
        z_dev = cl.array.empty(queue, n, dtype=dtype)
        z_dev.fill(np.zeros((), dtype))
    
        z = z_dev.get()
    
        assert np.array_equal(np.zeros(n, dtype), z)
    
    
    
    def test_custom_type_take_put(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        dtype = np.dtype([
            ("cur_min", np.int32),
            ("cur_max", np.int32),
            ])
    
        from pyopencl.tools import get_or_register_dtype, match_dtype_to_c_struct
    
        name = "tp_type"
        dtype, c_decl = match_dtype_to_c_struct(queue.device, name, dtype)
        dtype = get_or_register_dtype(name, dtype)
    
        n = 100
        z = np.empty(100, dtype)
        z["cur_min"] = np.arange(n)
        z["cur_max"] = np.arange(n)**2
    
        z_dev = cl.array.to_device(queue, z)
        ind = cl.array.arange(queue, n, step=3, dtype=np.int32)
    
        z_ind_ref = z[ind.get()]
        z_ind = z_dev[ind]
    
        assert np.array_equal(z_ind.get(), z_ind_ref)
    
    
    def test_rmul_yields_right_type(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        a = np.array([1, 2, 3, 4, 5]).astype(np.float32)
        a_gpu = cl_array.to_device(queue, a)
    
        two_a = 2*a_gpu
        assert isinstance(two_a, cl_array.Array)
    
        two_a = np.float32(2)*a_gpu
        assert isinstance(two_a, cl_array.Array)
    
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    def test_pow_array(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        a = np.array([1, 2, 3, 4, 5]).astype(np.float32)
        a_gpu = cl_array.to_device(queue, a)
    
        result = pow(a_gpu, a_gpu).get()
    
        assert (np.abs(a ** a - result) < 3e-3).all()
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    
        result = (a_gpu ** a_gpu).get()
    
        assert (np.abs(pow(a, a) - result) < 3e-3).all()
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    
    
    def test_pow_number(ctx_factory):
    
        queue = cl.CommandQueue(context)
    
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        a_gpu = cl_array.to_device(queue, a)
    
        result = pow(a_gpu, 2).get()
        assert (np.abs(a ** 2 - result) < 1e-3).all()
    
        """Test the muliplication of an array with a scalar. """
    
    
        queue = cl.CommandQueue(context)
    
        for sz in [10, 50000]:
            for dtype, scalars in [
    
                    (np.float32, [2]),
                    (np.complex64, [2j]),
                    ]:
    
                for scalar in scalars:
    
                    a_gpu = make_random_array(queue, dtype, sz)
                    a = a_gpu.get()
                    a_mult = (scalar * a_gpu).get()
    
                    assert (a * scalar == a_mult).all()
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
    
    
    def test_multiply_array(ctx_factory):
    
        """Test the multiplication of two arrays."""
    
    
        queue = cl.CommandQueue(context)
    
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
    
        a_gpu = cl_array.to_device(queue, a)
        b_gpu = cl_array.to_device(queue, a)
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        a_squared = (b_gpu * a_gpu).get()
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        assert (a * a == a_squared).all()
    
    def test_addition_array(ctx_factory):
    
        """Test the addition of two arrays."""
    
    
        queue = cl.CommandQueue(context)
    
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
    
        a_gpu = cl_array.to_device(queue, a)
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        a_added = (a_gpu + a_gpu).get()
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        assert (a + a == a_added).all()
    
    def test_addition_scalar(ctx_factory):
    
        """Test the addition of an array and a scalar."""
    
    
        queue = cl.CommandQueue(context)
    
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
    
        a_gpu = cl_array.to_device(queue, a)
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        a_added = (7 + a_gpu).get()
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        assert (7 + a == a_added).all()
    
    @pytest.mark.parametrize(("dtype_a", "dtype_b"),
            [
                (np.float32, np.float32),
                (np.float32, np.int32),
                (np.int32, np.int32),
                (np.int64, np.int32),
                (np.int64, np.uint32),
                ])
    def test_subtract_array(ctx_factory, dtype_a, dtype_b):
    
        """Test the substraction of two arrays."""
        #test data
    
        a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(dtype_a)
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        b = np.array([10, 20, 30, 40, 50,
    
                      60, 70, 80, 90, 100]).astype(dtype_b)
    
        queue = cl.CommandQueue(context)
    
    
        a_gpu = cl_array.to_device(queue, a)
        b_gpu = cl_array.to_device(queue, b)
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        result = (a_gpu - b_gpu).get()
        assert (a - b == result).all()
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        result = (b_gpu - a_gpu).get()
        assert (b - a == result).all()
    
    def test_substract_scalar(ctx_factory):
    
        """Test the substraction of an array and a scalar."""
    
    
        queue = cl.CommandQueue(context)
    
        #test data
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
    
    
        #convert a to a gpu object
    
        a_gpu = cl_array.to_device(queue, a)
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        result = (a_gpu - 7).get()
        assert (a - 7 == result).all()
    
    Nicolas Pinto's avatar
    Nicolas Pinto committed
        result = (7 - a_gpu).get()
        assert (7 - a == result).all()
    
    def test_divide_scalar(ctx_factory):
    
        """Test the division of an array and a scalar."""
    
    
        queue = cl.CommandQueue(context)
    
    
        if queue.device.platform.name == "Apple":
            pytest.xfail("Apple CL compiler crashes on this.")
    
    
        dtypes = (np.uint8, np.uint16, np.uint32,
                      np.int8, np.int16, np.int32,
                      np.float32, np.complex64)
        from pyopencl.characterize import has_double_support
        if has_double_support(queue.device):
            dtypes = dtypes + (np.float64, np.complex128)
    
        from itertools import product
    
        for dtype_a, dtype_s in product(dtypes, repeat=2):
            a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(dtype_a)
            s = dtype_s(40)
            a_gpu = cl_array.to_device(queue, a)
    
            b = a / s
            b_gpu = a_gpu / s
            assert (np.abs(b_gpu.get() - b) < 1e-3).all()
            assert b_gpu.dtype is b.dtype
    
            c = s / a
            c_gpu = s / a_gpu
            assert (np.abs(c_gpu.get() - c) < 1e-3).all()
            assert c_gpu.dtype is c.dtype
    
    def test_divide_array(ctx_factory):
    
        """Test the division of an array and a scalar. """
    
    
        queue = cl.CommandQueue(context)
    
    
        dtypes = (np.float32, np.complex64)
        from pyopencl.characterize import has_double_support
        if has_double_support(queue.device):
            dtypes = dtypes + (np.float64, np.complex128)
    
        from itertools import product
    
        for dtype_a, dtype_b in product(dtypes, repeat=2):
    
            a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(dtype_a)
            b = np.array([10, 10, 10, 10, 10, 10, 10, 10, 10, 10]).astype(dtype_b)
    
            a_gpu = cl_array.to_device(queue, a)
            b_gpu = cl_array.to_device(queue, b)
            c = a / b
            c_gpu = (a_gpu / b_gpu)
            assert (np.abs(c_gpu.get() - c) < 1e-3).all()
            assert c_gpu.dtype is c.dtype
    
            d = b / a
            d_gpu = (b_gpu / a_gpu)
            assert (np.abs(d_gpu.get() - d) < 1e-3).all()
            assert d_gpu.dtype is d.dtype
    
    Martin Weigert's avatar
    Martin Weigert committed
    
    
    def test_divide_inplace_scalar(ctx_factory):
        """Test inplace division of arrays and a scalar."""
    
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
    
        if queue.device.platform.name == "Apple":
            pytest.xfail("Apple CL compiler crashes on this.")
    
    
        dtypes = (np.uint8, np.uint16, np.uint32,
                      np.int8, np.int16, np.int32,
                      np.float32, np.complex64)
        from pyopencl.characterize import has_double_support
        if has_double_support(queue.device):
            dtypes = dtypes + (np.float64, np.complex128)
    
        from itertools import product
    
        for dtype_a, dtype_s in product(dtypes, repeat=2):
    
            a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(dtype_a)
            s = dtype_s(40)
    
            a_gpu = cl_array.to_device(queue, a)
    
    
            # ensure the same behavior as inplace numpy.ndarray division
            try:
                a /= s
            except TypeError:
                with np.testing.assert_raises(TypeError):
                    a_gpu /= s
            else:
                a_gpu /= s
                assert (np.abs(a_gpu.get() - a) < 1e-3).all()
                assert a_gpu.dtype is a.dtype
    
    Martin Weigert's avatar
    Martin Weigert committed
    
    
    Martin Weigert's avatar
    Martin Weigert committed
    def test_divide_inplace_array(ctx_factory):
    
        """Test inplace division of arrays."""
    
    Martin Weigert's avatar
    Martin Weigert committed
    
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
    
        dtypes = (np.uint8, np.uint16, np.uint32,
                      np.int8, np.int16, np.int32,
                      np.float32, np.complex64)
        from pyopencl.characterize import has_double_support
        if has_double_support(queue.device):
            dtypes = dtypes + (np.float64, np.complex128)
    
        from itertools import product
    
        for dtype_a, dtype_b in product(dtypes, repeat=2):
            print(dtype_a, dtype_b)
            a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(dtype_a)
            b = np.array([10, 10, 10, 10, 10, 10, 10, 10, 10, 10]).astype(dtype_b)
    
    Martin Weigert's avatar
    Martin Weigert committed
    
            a_gpu = cl_array.to_device(queue, a)
            b_gpu = cl_array.to_device(queue, b)
    
    
            # ensure the same behavior as inplace numpy.ndarray division
            try:
                a_gpu /= b_gpu
            except TypeError:
                # pass for now, as numpy casts differently for in-place and out-place
                # true_divide
                pass
                # with np.testing.assert_raises(TypeError):
                #     a /= b
            else:
                a /= b
                assert (np.abs(a_gpu.get() - a) < 1e-3).all()
                assert a_gpu.dtype is a.dtype
    
    Martin Weigert's avatar
    Martin Weigert committed
    
    
    def test_bitwise(ctx_factory):
    
    Matt Wala's avatar
    Matt Wala committed
        if _PYPY:
            pytest.xfail("numpypy: missing bitwise ops")
    
    
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        from itertools import product
    
        dtypes = [np.dtype(t) for t in (np.int64, np.int32, np.int16, np.int8)]
    
    
        from pyopencl.clrandom import rand as clrand
    
    
        for a_dtype, b_dtype in product(dtypes, dtypes):
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
            ary_len = 16
    
            int32_min = np.iinfo(np.int32).min
            int32_max = np.iinfo(np.int32).max
    
            a_dev = clrand(
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
                queue, (ary_len,), a=int32_min, b=1+int32_max, dtype=np.int64
                ).astype(a_dtype)
    
            b_dev = clrand(
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
                queue, (ary_len,), a=int32_min, b=1+int32_max, dtype=np.int64
                ).astype(b_dtype)
    
    
            a = a_dev.get()
            b = b_dev.get()
    
            s = int(clrand(queue, (), a=int32_min, b=1+int32_max, dtype=np.int64)
                     .astype(b_dtype).get())
    
    
            import operator as o
    
            for op in [o.and_, o.or_, o.xor]:
                res_dev = op(a_dev, b_dev)
                res = op(a, b)
    
                assert (res_dev.get() == res).all()
    
                res_dev = op(a_dev, s)
                res = op(a, s)
    
                assert (res_dev.get() == res).all()
    
                res_dev = op(s, b_dev)
                res = op(s, b)
    
                assert (res_dev.get() == res).all()
    
            for op in [o.iand, o.ior, o.ixor]:
                res_dev = a_dev.copy()
    
                op_res = op(res_dev, b_dev)
                assert op_res is res_dev
    
    
                res = a.copy()
                op(res, b)
    
                assert (res_dev.get() == res).all()
    
                res_dev = a_dev.copy()
    
                op_res = op(res_dev, s)
                assert op_res is res_dev
    
                res = a.copy()
                op(res, s)
    
                assert (res_dev.get() == res).all()
    
    
            # Test unary ~
            res_dev = ~a_dev
    
    Matt Wala's avatar
    Matt Wala committed
            res = ~a  # pylint:disable=invalid-unary-operand-type
    
            assert (res_dev.get() == res).all()
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    # {{{ RNG
    
    @pytest.mark.parametrize("rng_class",
            [RanluxGenerator, PhiloxGenerator, ThreefryGenerator])
    
    @pytest.mark.parametrize("ary_size", [300, 301, 302, 303, 10007, 1000000])
    
    def test_random_float_in_range(ctx_factory, rng_class, ary_size, plot_hist=False):
    
        queue = cl.CommandQueue(context)
    
    
        device = queue.device
        if device.platform.vendor == "The pocl project" \
                and device.type & cl.device_type.GPU \
                and rng_class is RanluxGenerator:
            pytest.xfail("ranlux test fails on POCL + Nvidia,"
                    "at least the Titan V, as of pocl 1.6, 2021-01-20")
    
    
        if has_double_support(context.devices[0]):
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
            dtypes = [np.float32, np.float64]
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
            dtypes = [np.float32]
    
        if rng_class is RanluxGenerator:
            gen = rng_class(queue, 5120)
        else:
            gen = rng_class(context)
    
        for dtype in dtypes:
            print(dtype)
            ran = cl_array.zeros(queue, ary_size, dtype)
            gen.fill_uniform(ran)
    
            if plot_hist:
                import matplotlib.pyplot as pt
                pt.hist(ran.get(), 30)
                pt.show()
    
            assert (0 <= ran.get()).all()
            assert (ran.get() <= 1).all()
    
            if rng_class is RanluxGenerator:
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
                gen.synchronize(queue)
    
            ran = cl_array.zeros(queue, ary_size, dtype)
            gen.fill_uniform(ran, a=4, b=7)
    
            ran_host = ran.get()
    
            for cond in [4 <= ran_host,  ran_host <= 7]:
                good = cond.all()
                if not good:
                    print(np.where(~cond))
                    print(ran_host[~cond])
                assert good
    
            ran = gen.normal(queue, ary_size, dtype, mu=10, sigma=3)
    
            if plot_hist:
                import matplotlib.pyplot as pt
                pt.hist(ran.get(), 30)
                pt.show()
    
    
    @pytest.mark.parametrize("dtype", [np.int32, np.int64])
    
    @pytest.mark.parametrize("rng_class",
            [RanluxGenerator, PhiloxGenerator, ThreefryGenerator])
    def test_random_int_in_range(ctx_factory, rng_class, dtype, plot_hist=False):
    
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
    
        if queue.device.platform.vendor == "The pocl project" \
                and queue.device.type & cl.device_type.GPU \
                and rng_class is RanluxGenerator:
            pytest.xfail("ranlux test fails on POCL + Nvidia,"
                    "at least the Titan V, as of pocl 1.6, 2021-01-20")
    
    
        if rng_class is RanluxGenerator:
            gen = rng_class(queue, 5120)
        else:
            gen = rng_class(context)
    
        # if (dtype == np.int64
        #         and context.devices[0].platform.vendor.startswith("Advanced Micro")):
        #     pytest.xfail("AMD miscompiles 64-bit RNG math")
    
        ran = gen.uniform(queue, (10000007,), dtype, a=200, b=300).get()
        assert (200 <= ran).all()
        assert (ran < 300).all()
    
        print(np.min(ran), np.max(ran))
        assert np.max(ran) > 295
    
        if plot_hist:
            from matplotlib import pyplot as pt
            pt.hist(ran)
            pt.show()
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    # }}}
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    # {{{ misc
    
    
    def test_numpy_integer_shape(ctx_factory):
    
        try:
            list(np.int32(17))
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        except Exception:
    
            pass
        else:
            from pytest import skip
            skip("numpy implementation does not handle scalar correctly.")
    
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        cl_array.empty(queue, np.int32(17), np.float32)
        cl_array.empty(queue, (np.int32(17), np.int32(17)), np.float32)
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    def test_len(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
        a_cpu = cl_array.to_device(queue, a)
        assert len(a_cpu) == 10
    
    
    def test_stride_preservation(ctx_factory):
    
        if _PYPY:
            pytest.xfail("numpypy: no array creation from __array_interface__")
    
    
        queue = cl.CommandQueue(context)
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        a = np.random.rand(3, 3)
        at = a.T
        print(at.flags.f_contiguous, at.flags.c_contiguous)
        at_gpu = cl_array.to_device(queue, at)
        print(at_gpu.flags.f_contiguous, at_gpu.flags.c_contiguous)
        assert np.allclose(at_gpu.get(), at)
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    def test_nan_arithmetic(ctx_factory):
    
        queue = cl.CommandQueue(context)
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        def make_nan_contaminated_vector(size):
            shape = (size,)
            a = np.random.randn(*shape).astype(np.float32)
            from random import randrange
            for i in range(size // 10):
    
                a[randrange(0, size)] = float("nan")
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
            return a
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        size = 1 << 20
    
        a = make_nan_contaminated_vector(size)
        a_gpu = cl_array.to_device(queue, a)
        b = make_nan_contaminated_vector(size)
        b_gpu = cl_array.to_device(queue, b)
    
        ab = a * b
        ab_gpu = (a_gpu * b_gpu).get()
    
        assert (np.isnan(ab) == np.isnan(ab_gpu)).all()
    
    def test_mem_pool_with_arrays(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        mem_pool = cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue))
    
        a_dev = cl_array.arange(queue, 2000, dtype=np.float32, allocator=mem_pool)
    
        b_dev = cl_array.to_device(queue, np.arange(2000), allocator=mem_pool) + 4000
    
        assert a_dev.allocator is mem_pool
        assert b_dev.allocator is mem_pool
    
    
    def test_view(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        a = np.arange(128).reshape(8, 16).astype(np.float32)
        a_dev = cl_array.to_device(queue, a)
    
        # same dtype
        view = a_dev.view()
        assert view.shape == a_dev.shape and view.dtype == a_dev.dtype
    
        # larger dtype
        view = a_dev.view(np.complex64)
        assert view.shape == (8, 8) and view.dtype == np.complex64
    
        # smaller dtype
        view = a_dev.view(np.int16)
        assert view.shape == (8, 32) and view.dtype == np.int16
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    
    def test_diff(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        from pyopencl.clrandom import rand as clrand
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        ary_len = 20000
        a_dev = clrand(queue, (ary_len,), dtype=np.float32)
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        a = a_dev.get()
    
        err = la.norm(
    
                cl.array.diff(a_dev).get() - np.diff(a))
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        assert err < 1e-4
    
    
    Matt Wala's avatar
    Matt Wala committed
    
    def test_copy(ctx_factory):
        context = ctx_factory()
        queue1 = cl.CommandQueue(context)
        queue2 = cl.CommandQueue(context)
    
        # Test copy
    
        arr = cl.array.zeros(queue1, 100, np.int32)
        arr_copy = arr.copy()
    
        assert (arr == arr_copy).all().get()
        assert arr.data != arr_copy.data
    
    Matt Wala's avatar
    Matt Wala committed
    
        # Test queue association
    
        arr_copy = arr.copy(queue=queue2)
    
    Matt Wala's avatar
    Matt Wala committed
    
        arr_copy = arr.copy(queue=None)
    
    Matt Wala's avatar
    Matt Wala committed
    
        arr_copy = arr.with_queue(None).copy(queue=queue1)
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    # }}}
    
    # {{{ slices, concatenation
    
    
        if _PYPY:
            pytest.xfail("numpypy: spurious as_strided failure")
    
    
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
    
        from pyopencl.clrandom import rand as clrand
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        ary_len = 20000
        a_gpu = clrand(queue, (ary_len,), dtype=tp)
        b_gpu = clrand(queue, (ary_len,), dtype=tp)
    
        a = a_gpu.get()
    
        b = b_gpu.get()
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
        from random import randrange
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
            start = randrange(ary_len)
            end = randrange(start, ary_len)
    
            a_gpu_slice = tp(2)*a_gpu[start:end]
            a_slice = tp(2)*a[start:end]
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    
            assert la.norm(a_gpu_slice.get() - a_slice) == 0
    
        for i in range(20):
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
            start = randrange(ary_len)
            end = randrange(start, ary_len)
    
            a_gpu[start:end] = tp(2)*b[start:end]
            a[start:end] = tp(2)*b[start:end]
    
    
            assert la.norm(a_gpu.get() - a) == 0
    
        for i in range(20):
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
            start = randrange(ary_len)
            end = randrange(start, ary_len)
    
            a_gpu[start:end] = tp(2)*b_gpu[start:end]
            a[start:end] = tp(2)*b[start:end]
    
    
            assert la.norm(a_gpu.get() - a) == 0
    
    
    Andreas Klöckner's avatar
    Andreas Klöckner committed
    def test_concatenate(ctx_factory):
        context = ctx_factory()
        queue = cl.CommandQueue(context)
    
        from pyopencl.clrandom import rand as clrand