#! /usr/bin/env python __copyright__ = "Copyright (C) 2009 Andreas Kloeckner" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np import numpy.linalg as la import sys import pytools.test from pytools import memoize def have_cl(): try: import pyopencl return True except: return False if have_cl(): import pyopencl as cl import pyopencl.array as cl_array import pyopencl.tools as cl_tools from pyopencl.tools import pytest_generate_tests_for_pyopencl \ as pytest_generate_tests from pyopencl.characterize import has_double_support # {{{ helpers TO_REAL = { np.dtype(np.complex64): np.float32, np.dtype(np.complex128): np.float64 } def general_clrand(queue, shape, dtype): from pyopencl.clrandom import rand as clrand dtype = np.dtype(dtype) if dtype.kind == "c": real_dtype = dtype.type(0).real.dtype return clrand(queue, shape, real_dtype) + 1j*clrand(queue, shape, real_dtype) else: return clrand(queue, shape, dtype) def make_random_array(queue, dtype, size): from pyopencl.clrandom import rand dtype = np.dtype(dtype) if dtype.kind == "c": real_dtype = TO_REAL[dtype] return (rand(queue, shape=(size,), dtype=real_dtype).astype(dtype) + dtype.type(1j) * rand(queue, shape=(size,), dtype=real_dtype).astype(dtype)) else: return rand(queue, shape=(size,), dtype=dtype) # }}} # {{{ dtype-related @pytools.test.mark_test.opencl def test_basic_complex(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) from pyopencl.clrandom import rand size = 500 ary = (rand(queue, shape=(size,), dtype=np.float32).astype(np.complex64) + 1j* rand(queue, shape=(size,), dtype=np.float32).astype(np.complex64)) c = np.complex64(5+7j) host_ary = ary.get() assert la.norm((c*ary).get() - c*host_ary) < 1e-5 * la.norm(host_ary) @pytools.test.mark_test.opencl def test_mix_complex(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) size = 10 dtypes = [ (np.float32, np.complex64), #(np.int32, np.complex64), ] if has_double_support(context.devices[0]): dtypes.extend([ (np.float32, np.float64), (np.float32, np.complex128), (np.float64, np.complex64), (np.float64, np.complex128), ]) from operator import add, mul, sub, truediv for op in [add, sub, mul, truediv, pow]: for dtype_a0, dtype_b0 in dtypes: for dtype_a, dtype_b in [ (dtype_a0, dtype_b0), (dtype_b0, dtype_a0), ]: for is_scalar_a, is_scalar_b in [ (False, False), (False, True), (True, False), ]: if is_scalar_a: ary_a = make_random_array(queue, dtype_a, 1).get()[0] host_ary_a = ary_a else: ary_a = make_random_array(queue, dtype_a, size) host_ary_a = ary_a.get() if is_scalar_b: ary_b = make_random_array(queue, dtype_b, 1).get()[0] host_ary_b = ary_b else: ary_b = make_random_array(queue, dtype_b, size) host_ary_b = ary_b.get() print(op, dtype_a, dtype_b, is_scalar_a, is_scalar_b) dev_result = op(ary_a, ary_b).get() host_result = op(host_ary_a, host_ary_b) if host_result.dtype != dev_result.dtype: # This appears to be a numpy bug, where we get # served a Python complex that is really a # smaller numpy complex. print("HOST_DTYPE: %s DEV_DTYPE: %s" % ( host_result.dtype, dev_result.dtype)) dev_result = dev_result.astype(host_result.dtype) err = la.norm(host_result-dev_result)/la.norm(host_result) print(err) correct = err < 1e-5 if not correct: print(host_result) print(dev_result) print(host_result - dev_result) assert correct @pytools.test.mark_test.opencl def test_pow_neg1_vs_inv(ctx_factory): ctx = ctx_factory() queue = cl.CommandQueue(ctx) device = ctx.devices[0] if not has_double_support(device): from py.test import skip skip("double precision not supported on %s" % device) a_dev = make_random_array(queue, np.complex128, 20000) res1 = (a_dev ** (-1)).get() res2 = (1/a_dev).get() ref = 1/a_dev.get() assert la.norm(res1-ref, np.inf) / la.norm(ref) < 1e-13 assert la.norm(res2-ref, np.inf) / la.norm(ref) < 1e-13 @pytools.test.mark_test.opencl def test_vector_fill(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) a_gpu = cl_array.Array(queue, 100, dtype=cl_array.vec.float4) a_gpu.fill(cl_array.vec.make_float4(0.0, 0.0, 1.0, 0.0)) a = a_gpu.get() assert a.dtype is cl_array.vec.float4 a_gpu = cl_array.zeros(queue, 100, dtype=cl_array.vec.float4) @pytools.test.mark_test.opencl def test_absrealimag(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) def real(x): return x.real def imag(x): return x.imag def conj(x): return x.conj() n = 111 for func in [abs, real, imag, conj]: for dtype in [np.int32, np.float32, np.complex64]: print(func, dtype) a = -make_random_array(queue, dtype, n) host_res = func(a.get()) dev_res = func(a).get() correct = np.allclose(dev_res, host_res) if not correct: print(dev_res) print(host_res) print(dev_res-host_res) assert correct # }}} # {{{ operands @pytools.test.mark_test.opencl def test_pow_array(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) a = np.array([1, 2, 3, 4, 5]).astype(np.float32) a_gpu = cl_array.to_device(queue, a) result = pow(a_gpu, a_gpu).get() assert (np.abs(a ** a - result) < 1e-3).all() result = (a_gpu ** a_gpu).get() assert (np.abs(pow(a, a) - result) < 1e-3).all() @pytools.test.mark_test.opencl def test_pow_number(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32) a_gpu = cl_array.to_device(queue, a) result = pow(a_gpu, 2).get() assert (np.abs(a ** 2 - result) < 1e-3).all() @pytools.test.mark_test.opencl def test_multiply(ctx_factory): """Test the muliplication of an array with a scalar. """ context = ctx_factory() queue = cl.CommandQueue(context) for sz in [10, 50000]: for dtype, scalars in [ (np.float32, [2]), (np.complex64, [2j]), ]: for scalar in scalars: a_gpu = make_random_array(queue, dtype, sz) a = a_gpu.get() a_mult = (scalar * a_gpu).get() assert (a * scalar == a_mult).all() @pytools.test.mark_test.opencl def test_multiply_array(ctx_factory): """Test the multiplication of two arrays.""" context = ctx_factory() queue = cl.CommandQueue(context) a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32) a_gpu = cl_array.to_device(queue, a) b_gpu = cl_array.to_device(queue, a) a_squared = (b_gpu * a_gpu).get() assert (a * a == a_squared).all() @pytools.test.mark_test.opencl def test_addition_array(ctx_factory): """Test the addition of two arrays.""" context = ctx_factory() queue = cl.CommandQueue(context) a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32) a_gpu = cl_array.to_device(queue, a) a_added = (a_gpu + a_gpu).get() assert (a + a == a_added).all() @pytools.test.mark_test.opencl def test_addition_scalar(ctx_factory): """Test the addition of an array and a scalar.""" context = ctx_factory() queue = cl.CommandQueue(context) a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32) a_gpu = cl_array.to_device(queue, a) a_added = (7 + a_gpu).get() assert (7 + a == a_added).all() @pytools.test.mark_test.opencl def test_substract_array(ctx_factory): """Test the substraction of two arrays.""" #test data a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32) b = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(np.float32) context = ctx_factory() queue = cl.CommandQueue(context) a_gpu = cl_array.to_device(queue, a) b_gpu = cl_array.to_device(queue, b) result = (a_gpu - b_gpu).get() assert (a - b == result).all() result = (b_gpu - a_gpu).get() assert (b - a == result).all() @pytools.test.mark_test.opencl def test_substract_scalar(ctx_factory): """Test the substraction of an array and a scalar.""" context = ctx_factory() queue = cl.CommandQueue(context) #test data a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32) #convert a to a gpu object a_gpu = cl_array.to_device(queue, a) result = (a_gpu - 7).get() assert (a - 7 == result).all() result = (7 - a_gpu).get() assert (7 - a == result).all() @pytools.test.mark_test.opencl def test_divide_scalar(ctx_factory): """Test the division of an array and a scalar.""" context = ctx_factory() queue = cl.CommandQueue(context) a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32) a_gpu = cl_array.to_device(queue, a) result = (a_gpu / 2).get() assert (a / 2 == result).all() result = (2 / a_gpu).get() assert (np.abs(2 / a - result) < 1e-5).all() @pytools.test.mark_test.opencl def test_divide_array(ctx_factory): """Test the division of an array and a scalar. """ context = ctx_factory() queue = cl.CommandQueue(context) #test data a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(np.float32) b = np.array([10, 10, 10, 10, 10, 10, 10, 10, 10, 10]).astype(np.float32) a_gpu = cl_array.to_device(queue, a) b_gpu = cl_array.to_device(queue, b) a_divide = (a_gpu / b_gpu).get() assert (np.abs(a / b - a_divide) < 1e-3).all() a_divide = (b_gpu / a_gpu).get() assert (np.abs(b / a - a_divide) < 1e-3).all() # }}} # {{{ RNG @pytools.test.mark_test.opencl def test_random(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) from pyopencl.clrandom import RanluxGenerator if has_double_support(context.devices[0]): dtypes = [np.float32, np.float64] else: dtypes = [np.float32] gen = RanluxGenerator(queue, 5120) for ary_size in [300, 301, 302, 303, 10007]: for dtype in dtypes: ran = cl_array.zeros(queue, ary_size, dtype) gen.fill_uniform(ran) assert (0 < ran.get()).all() assert (ran.get() < 1).all() gen.synchronize(queue) ran = cl_array.zeros(queue, ary_size, dtype) gen.fill_uniform(ran, a=4, b=7) assert (4 < ran.get()).all() assert (ran.get() < 7).all() ran = gen.normal(queue, (10007,), dtype, mu=4, sigma=3) dtypes = [np.int32] for dtype in dtypes: ran = gen.uniform(queue, (10000007,), dtype, a=200, b=300) assert (200 <= ran.get()).all() assert (ran.get() < 300).all() #from matplotlib import pyplot as pt #pt.hist(ran.get()) #pt.show() # }}} # {{{ misc @pytools.test.mark_test.opencl def test_numpy_integer_shape(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) cl_array.empty(queue, np.int32(17), np.float32) cl_array.empty(queue, (np.int32(17), np.int32(17)), np.float32) @pytools.test.mark_test.opencl def test_len(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32) a_cpu = cl_array.to_device(queue, a) assert len(a_cpu) == 10 @pytools.test.mark_test.opencl def test_stride_preservation(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) A = np.random.rand(3, 3) AT = A.T print(AT.flags.f_contiguous, AT.flags.c_contiguous) AT_GPU = cl_array.to_device(queue, AT) print(AT_GPU.flags.f_contiguous, AT_GPU.flags.c_contiguous) assert np.allclose(AT_GPU.get(), AT) @pytools.test.mark_test.opencl def test_nan_arithmetic(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) def make_nan_contaminated_vector(size): shape = (size,) a = np.random.randn(*shape).astype(np.float32) from random import randrange for i in range(size // 10): a[randrange(0, size)] = float('nan') return a size = 1 << 20 a = make_nan_contaminated_vector(size) a_gpu = cl_array.to_device(queue, a) b = make_nan_contaminated_vector(size) b_gpu = cl_array.to_device(queue, b) ab = a * b ab_gpu = (a_gpu * b_gpu).get() assert (np.isnan(ab) == np.isnan(ab_gpu)).all() @pytools.test.mark_test.opencl def test_mem_pool_with_arrays(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) mem_pool = cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)) a_dev = cl_array.arange(queue, 2000, dtype=np.float32, allocator=mem_pool) b_dev = cl_array.to_device(queue, np.arange(2000), allocator=mem_pool) + 4000 result = cl_array.dot(a_dev, b_dev) assert a_dev.allocator is mem_pool assert b_dev.allocator is mem_pool assert result.allocator is mem_pool @pytools.test.mark_test.opencl def test_view(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) a = np.arange(128).reshape(8, 16).astype(np.float32) a_dev = cl_array.to_device(queue, a) # same dtype view = a_dev.view() assert view.shape == a_dev.shape and view.dtype == a_dev.dtype # larger dtype view = a_dev.view(np.complex64) assert view.shape == (8, 8) and view.dtype == np.complex64 # smaller dtype view = a_dev.view(np.int16) assert view.shape == (8, 32) and view.dtype == np.int16 # }}} @pytools.test.mark_test.opencl def no_test_slice(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) from pyopencl.clrandom import rand as clrand l = 20000 a_gpu = clrand(queue, (l,)) a = a_gpu.get() from random import randrange for i in range(200): start = randrange(l) end = randrange(start, l) a_gpu_slice = a_gpu[start:end] a_slice = a[start:end] assert la.norm(a_gpu_slice.get() - a_slice) == 0 if __name__ == "__main__": # make sure that import failures get reported, instead of skipping the # tests. import pyopencl as cl import sys if len(sys.argv) > 1: exec(sys.argv[1]) else: from py.test.cmdline import main main([__file__]) # vim: filetype=pyopencl:fdm=marker