From c7718d2023a47cf79b571b260099bf4742320944 Mon Sep 17 00:00:00 2001 From: Alexandru Fikl <alexfikl@gmail.com> Date: Sat, 6 Aug 2022 09:20:28 +0300 Subject: [PATCH] use np.random.default_rng with a seed consistently --- doc/algorithm.rst | 3 +- examples/ipython-demo.ipynb | 397 ++++++++++++++++++------------------ test/test_algorithm.py | 6 +- test/test_array.py | 60 +++--- test/test_clmath.py | 12 +- test/test_enqueue_copy.py | 22 +- test/test_wrapper.py | 32 ++- 7 files changed, 275 insertions(+), 257 deletions(-) diff --git a/doc/algorithm.rst b/doc/algorithm.rst index a3cad802..aa21fbeb 100644 --- a/doc/algorithm.rst +++ b/doc/algorithm.rst @@ -241,7 +241,8 @@ Here's a usage example:: knl = InclusiveScanKernel(context, np.int32, "a+b") n = 2**20-2**18+5 - host_data = np.random.randint(0, 10, n).astype(np.int32) + rng = np.random.default_rng(seed=42) + host_data = rng.integers(0, 10, size=n, dtype=np.int32) dev_data = cl_array.to_device(queue, host_data) knl(dev_data) diff --git a/examples/ipython-demo.ipynb b/examples/ipython-demo.ipynb index bf646511..b27617ed 100644 --- a/examples/ipython-demo.ipynb +++ b/examples/ipython-demo.ipynb @@ -1,202 +1,203 @@ { - "metadata": { - "name": "", - "signature": "sha256:81f3deed7cdc26b0fc756b3ee1eb6e8f9b1be96304ddfc6ff484d223c2b8a942" - }, - "nbformat": 3, - "nbformat_minor": 0, - "worksheets": [ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "cc7d0709", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "from __future__ import division\n", + "import numpy as np\n", + "import pyopencl as cl\n", + "import pyopencl.array" + ] + }, + { + "cell_type": "markdown", + "id": "8ac8d7bb", + "metadata": {}, + "source": [ + "Load the PyOpenCL IPython extension:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7023ca2f", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "%load_ext pyopencl.ipython_ext" + ] + }, + { + "cell_type": "markdown", + "id": "9544b53c", + "metadata": {}, + "source": [ + "Create an OpenCL context and a command queue:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fac17999", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "ctx = cl.create_some_context(interactive=True)\n", + "queue = cl.CommandQueue(ctx)" + ] + }, + { + "cell_type": "markdown", + "id": "a29daf04", + "metadata": {}, + "source": [ + "-----\n", + "\n", + "Define an OpenCL kernel using the `%%cl_kernel` magic:" + ] + }, { - "cells": [ - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from __future__ import division\n", - "import numpy as np\n", - "import pyopencl as cl\n", - "import pyopencl.array" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stderr", - "text": [ - "/usr/lib/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/dist-packages/enthought': missing __init__.py\n", - " file, filename, etc = imp.find_module(subname, path)\n" - ] - } - ], - "prompt_number": 1 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Load the PyOpenCL IPython extension:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "%load_ext pyopencl.ipython_ext" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 3 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Create an OpenCL context and a command queue:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "ctx = cl.create_some_context(interactive=True)\n", - "queue = cl.CommandQueue(ctx)" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "Choose platform:\n", - "[0] <pyopencl.Platform 'AMD Accelerated Parallel Processing' at 0x7fc14f1b0080>\n", - "[1] <pyopencl.Platform 'Intel(R) OpenCL' at 0x32aed00>\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "stream": "stdout", - "text": [ - "Choice [0]:0\n" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "Set the environment variable PYOPENCL_CTX='0' to avoid being asked again.\n" - ] - } - ], - "prompt_number": 5 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "-----\n", - "\n", - "Define an OpenCL kernel using the `%%cl_kernel` magic:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "%%cl_kernel -o \"-cl-fast-relaxed-math\"\n", - "\n", - "__kernel void sum_vector(__global const float *a,\n", - "__global const float *b, __global float *c)\n", - "{\n", - " int gid = get_global_id(0);\n", - " c[gid] = a[gid] + b[gid];\n", - "}" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 6 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This looks for `cl_ctx` or `ctx` in the user namespace to find a PyOpenCL context.\n", - "\n", - "Kernel names are automatically injected into the user namespace, so we can just use `sum_vector` from Python below.\n", - "\n", - "Now create some data to work on:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "n = 10000\n", - "\n", - "a = cl.array.empty(queue, n, dtype=np.float32)\n", - "a.fill(15)\n", - "\n", - "b_host = np.random.randn(n).astype(np.float32)\n", - "b = cl.array.to_device(queue, b_host)\n", - "\n", - "c = cl.array.empty_like(a)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 7 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Run the kernel:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "sum_vector(queue, (n,), None, a.data, b.data, c.data)" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "metadata": {}, - "output_type": "pyout", - "prompt_number": 8, - "text": [ - "<pyopencl._cl.Event at 0x7fc14f3fdf30>" - ] - } - ], - "prompt_number": 8 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Check the result using `numpy` operations:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "assert (c.get() == b_host + 15).all()" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 9 + "cell_type": "code", + "execution_count": null, + "id": "65c7e6c9", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false } - ], - "metadata": {} + }, + "outputs": [], + "source": [ + "%%cl_kernel -o \"-cl-fast-relaxed-math\"\n", + "\n", + "__kernel void sum_vector(__global const float *a,\n", + "__global const float *b, __global float *c)\n", + "{\n", + " int gid = get_global_id(0);\n", + " c[gid] = a[gid] + b[gid];\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "cfb57357", + "metadata": {}, + "source": [ + "This looks for `cl_ctx` or `ctx` in the user namespace to find a PyOpenCL context.\n", + "\n", + "Kernel names are automatically injected into the user namespace, so we can just use `sum_vector` from Python below.\n", + "\n", + "Now create some data to work on:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d80ff38", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "n = 10000\n", + "\n", + "a = cl.array.empty(queue, n, dtype=np.float32)\n", + "a.fill(15)\n", + "\n", + "b_host = np.random.randn(n).astype(np.float32)\n", + "b = cl.array.to_device(queue, b_host)\n", + "\n", + "c = cl.array.empty_like(a)" + ] + }, + { + "cell_type": "markdown", + "id": "61fccb61", + "metadata": {}, + "source": [ + "Run the kernel:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ba991b3", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "sum_vector(queue, (n,), None, a.data, b.data, c.data)" + ] + }, + { + "cell_type": "markdown", + "id": "11a55b38", + "metadata": {}, + "source": [ + "Check the result using `numpy` operations:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee3560c1", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "assert (c.get() == b_host + 15).all()" + ] } - ] -} \ No newline at end of file + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/test/test_algorithm.py b/test/test_algorithm.py index baee3c66..40fe854b 100644 --- a/test/test_algorithm.py +++ b/test/test_algorithm.py @@ -571,8 +571,9 @@ def test_scan(ctx_factory, dtype, scan_cls): knl = scan_cls(context, dtype, "a+b", "0") + rng = np.random.default_rng(seed=42) for n in scan_test_counts: - host_data = np.random.randint(0, 10, n).astype(dtype) + host_data = rng.integers(0, 10, n, dtype=dtype) dev_data = cl_array.to_device(queue, host_data) # /!\ fails on Nv GT2?? for some drivers @@ -617,7 +618,8 @@ def test_scan_with_vectorargs_with_offsets(ctx_factory, scan_cls): n = 20 - host_data = np.random.randint(0, 10, n).astype(float) + rng = np.random.default_rng(seed=42) + host_data = rng.integers(0, 10, n).astype(np.float64) dev_data = cl.array.to_device(queue, host_data) segment_data = np.zeros(n, dtype=int) dev_segment_data = cl.array.to_device(queue, segment_data) diff --git a/test/test_array.py b/test/test_array.py index db59902f..ffb0714c 100644 --- a/test/test_array.py +++ b/test/test_array.py @@ -757,8 +757,6 @@ def test_bitwise(ctx_factory): for a_dtype, b_dtype in product(dtypes, dtypes): ary_len = 16 - np.random.seed(10) - int32_min = np.iinfo(np.int32).min int32_max = np.iinfo(np.int32).max @@ -974,7 +972,9 @@ def test_stride_preservation(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) - a = np.random.rand(3, 3) + rng = np.random.default_rng(seed=42) + a = rng.random(size=(3, 3)) + at = a.T print(at.flags.f_contiguous, at.flags.c_contiguous) at_gpu = cl_array.to_device(queue, at) @@ -989,10 +989,11 @@ def test_stride_preservation(ctx_factory): def test_nan_arithmetic(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) + rng = np.random.default_rng(seed=42) def make_nan_contaminated_vector(size): - shape = (size,) - a = np.random.randn(*shape).astype(np.float32) + a = rng.standard_normal(size=(size,), dtype=np.float32) + from random import randrange for _i in range(size // 10): a[randrange(0, size)] = float("nan") @@ -1468,9 +1469,10 @@ def test_newaxis(ctx_factory): def test_squeeze(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) + rng = np.random.default_rng(seed=42) shape = (40, 2, 5, 100) - a_cpu = np.random.random(size=shape) + a_cpu = rng.random(size=shape) a_gpu = cl_array.to_device(queue, a_cpu) # Slice with length 1 on dimensions 0 and 1 @@ -1532,11 +1534,12 @@ def test_fancy_indexing(ctx_factory): pytest.xfail("numpypy: multi value setting is not supported") context = ctx_factory() queue = cl.CommandQueue(context) + rng = np.random.default_rng(seed=42) n = 2 ** 20 + 2**18 + 22 numpy_dest = np.zeros(n, dtype=np.int32) numpy_idx = np.arange(n, dtype=np.int32) - np.random.shuffle(numpy_idx) + rng.shuffle(numpy_idx) numpy_src = 20000+np.arange(n, dtype=np.int32) cl_dest = cl_array.to_device(queue, numpy_dest) @@ -1598,7 +1601,8 @@ def test_get_async(ctx_factory): pytest.xfail("the async get test fails on POCL + Nvidia," "at least the K40, as of pocl 1.6, 2021-01-20") - a = np.random.rand(10**6).astype(np.dtype("float32")) + rng = np.random.default_rng(seed=42) + a = rng.random(10**6, dtype=np.float32) a_gpu = cl_array.to_device(queue, a) b = a + a**5 + 1 b_gpu = a_gpu + a_gpu**5 + 1 @@ -1631,7 +1635,9 @@ def test_outoforderqueue_get(ctx_factory): properties=cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE) except Exception: pytest.skip("out-of-order queue not available") - a = np.random.rand(10**6).astype(np.dtype("float32")) + + rng = np.random.default_rng(seed=42) + a = rng.random(10**6, dtype=np.float32) a_gpu = cl_array.to_device(queue, a) b_gpu = a_gpu + a_gpu**5 + 1 b1 = b_gpu.get() # testing that this waits for events @@ -1650,7 +1656,9 @@ def test_outoforderqueue_copy(ctx_factory): properties=cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE) except Exception: pytest.skip("out-of-order queue not available") - a = np.random.rand(10**6).astype(np.dtype("float32")) + + rng = np.random.default_rng(seed=42) + a = rng.random(10**6, dtype=np.float32) a_gpu = cl_array.to_device(queue, a) c_gpu = a_gpu**2 - 7 b_gpu = c_gpu.copy() # testing that this waits for and creates events @@ -1673,8 +1681,11 @@ def test_outoforderqueue_indexing(ctx_factory): properties=cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE) except Exception: pytest.skip("out-of-order queue not available") - a = np.random.rand(10**6).astype(np.dtype("float32")) - i = (8e5 + 1e5 * np.random.rand(10**5)).astype(np.dtype("int32")) + + rng = np.random.default_rng(seed=42) + a = rng.random(10**6, dtype=np.float32) + i = (8e5 + 1e5 * rng.random(10**5)).astype(np.int32) + a_gpu = cl_array.to_device(queue, a) i_gpu = cl_array.to_device(queue, i) c_gpu = (a_gpu**2)[i_gpu - 10000] @@ -1701,7 +1712,9 @@ def test_outoforderqueue_reductions(ctx_factory): except Exception: pytest.skip("out-of-order queue not available") # 0/1 values to avoid accumulated rounding error - a = (np.random.rand(10**6) > 0.5).astype(np.dtype("float32")) + rng = np.random.default_rng(seed=42) + a = (rng.random(10**6) > 0.5).astype(np.float32) + a[800000] = 10 # all<5 looks true until near the end a_gpu = cl_array.to_device(queue, a) b1 = cl_array.sum(a_gpu).get() @@ -1792,8 +1805,7 @@ def test_stack(ctx_factory, input_dims, order): shape = (2, 2, 2)[:input_dims] axis = -1 if order == "F" else 0 - from numpy.random import default_rng - rng = default_rng() + rng = np.random.default_rng(seed=42) x_in = rng.random(size=shape) y_in = rng.random(size=shape) x_in = x_in if order == "C" else np.asfortranarray(x_in) @@ -1827,9 +1839,10 @@ def test_assign_different_strides(ctx_factory): # {{{ test_branch_operations_on_pure_scalars def test_branch_operations_on_pure_scalars(): - x = np.random.rand() - y = np.random.rand() - cond = np.random.choice([False, True]) + rng = np.random.default_rng(seed=42) + x = rng.random() + y = rng.random() + cond = rng.choice([False, True]) np.testing.assert_allclose(np.maximum(x, y), cl_array.maximum(x, y)) @@ -1892,7 +1905,8 @@ def test_slice_copy(ctx_factory): cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) - x = cl.array.to_device(queue, np.random.rand(96, 27)) + rng = np.random.default_rng(seed=42) + x = cl.array.to_device(queue, rng.random(size=(96, 27))) y = x[::8, ::3] with pytest.raises(RuntimeError): y.copy() @@ -1907,7 +1921,8 @@ def test_ravel(ctx_factory, order): ctx = ctx_factory() cq = cl.CommandQueue(ctx) - x = np.random.randn(10, 4) + rng = np.random.default_rng(seed=42) + x = rng.standard_normal(size=(10, 4)) if order == "F": x = np.asfortranarray(x) @@ -1951,12 +1966,11 @@ def test_arithmetic_on_non_scalars(ctx_factory): @pytest.mark.parametrize("which", ("add", "sub", "mul", "truediv")) def test_arithmetic_with_device_scalars(ctx_factory, which): import operator - from numpy.random import default_rng ctx = ctx_factory() cq = cl.CommandQueue(ctx) - rng = default_rng() + rng = np.random.default_rng(seed=42) ndim = rng.integers(1, 5) shape = tuple(rng.integers(2, 7) for i in range(ndim)) @@ -1982,7 +1996,7 @@ def test_if_positive_with_scalars(ctx_factory, then_type, else_type): ctx = ctx_factory() cq = cl.CommandQueue(ctx) - rng = np.random.default_rng() + rng = np.random.default_rng(seed=42) shape = (512,) criterion_np = rng.random(shape) diff --git a/test/test_clmath.py b/test/test_clmath.py index 409875f8..e5032dc1 100644 --- a/test/test_clmath.py +++ b/test/test_clmath.py @@ -334,10 +334,10 @@ def test_complex_bessel(ctx_factory, ref_src): v = 40 n = 10**5 - np.random.seed(13) + rng = np.random.default_rng(seed=13) z = ( np.logspace(-5, 2, n) - * np.exp(1j * 2 * np.pi * np.random.rand(n))) + * np.exp(1j * 2 * np.pi * rng.random(n))) if ref_src == "pyfmmlib": pyfmmlib = pytest.importorskip("pyfmmlib") @@ -404,11 +404,11 @@ def test_hankel_01_complex(ctx_factory, ref_src): from pytest import skip skip("no double precision support--cannot test complex bessel function") + rng = np.random.default_rng(seed=11) n = 10**6 - np.random.seed(11) z = ( np.logspace(-5, 2, n) - * np.exp(1j * 2 * np.pi * np.random.rand(n))) + * np.exp(1j * 2 * np.pi * rng.random(n))) def get_err(check, ref): return np.max(np.abs(check-ref)) / np.max(np.abs(ref)) @@ -454,7 +454,9 @@ def test_outoforderqueue_clmath(ctx_factory): properties=cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE) except Exception: pytest.skip("out-of-order queue not available") - a = np.random.rand(10**6).astype(np.dtype("float32")) + + rng = np.random.default_rng(seed=42) + a = rng.random(10**6, dtype=np.float32) a_gpu = cl_array.to_device(queue, a) # testing that clmath functions wait for and create events b_gpu = clmath.fabs(clmath.sin(a_gpu * 5)) diff --git a/test/test_enqueue_copy.py b/test/test_enqueue_copy.py index 402bc8b9..9e2ca278 100644 --- a/test/test_enqueue_copy.py +++ b/test/test_enqueue_copy.py @@ -73,15 +73,8 @@ def test_enqueue_copy_rect_2d(ctx_factory, honor_skip=True): buf_out_shp = 300, 400 # shape of 2nd device buffer # Create host array of random values. - h_ary_in = \ - np.array( - np.random.randint( - 0, - 256, - np.product(ary_in_shp) - ), - dtype=np.uint8 - ).reshape(ary_in_shp) + rng = np.random.default_rng(seed=42) + h_ary_in = rng.integers(0, 256, ary_in_shp, dtype=np.uint8) # Create device buffers d_in_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY, size=np.product(buf_in_shp)) @@ -169,15 +162,8 @@ def test_enqueue_copy_rect_3d(ctx_factory, honor_skip=True): buf_out_shp = 300, 400, 40 # shape of 2nd device buffer # Create host array of random values. - h_ary_in = \ - np.array( - np.random.randint( - 0, - 256, - np.product(ary_in_shp) - ), - dtype=np.uint8 - ).reshape(ary_in_shp) + rng = np.random.default_rng(seed=42) + h_ary_in = rng.integers(0, 256, ary_in_shp, dtype=np.uint8) # Create device buffers d_in_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY, size=np.product(buf_in_shp)) diff --git a/test/test_wrapper.py b/test/test_wrapper.py index e7d87b29..22f27f74 100644 --- a/test/test_wrapper.py +++ b/test/test_wrapper.py @@ -353,7 +353,8 @@ def test_that_python_args_fail(ctx_factory): { a[get_global_id(0)] *= (b+c); } """).build() - a = np.random.rand(50000) + rng = np.random.default_rng(seed=42) + a = rng.random(50000) queue = cl.CommandQueue(context) mf = cl.mem_flags a_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a) @@ -413,7 +414,9 @@ def test_image_2d(ctx_factory): """).build() num_channels = 1 - a = np.random.rand(1024, 512, num_channels).astype(np.float32) + + rng = np.random.default_rng(seed=42) + a = rng.random((1024, 512, num_channels), dtype=np.float32) if num_channels == 1: a = a[:, :, 0] @@ -489,7 +492,9 @@ def test_image_3d(ctx_factory): num_channels = 2 shape = (3, 4, 2) - a = np.random.random(shape + (num_channels,)).astype(np.float32) + + rng = np.random.default_rng(seed=42) + a = rng.random(size=shape + (num_channels,), dtype=np.float32) queue = cl.CommandQueue(context) try: @@ -532,7 +537,8 @@ def test_copy_buffer(ctx_factory): queue = cl.CommandQueue(context) mf = cl.mem_flags - a = np.random.rand(50000).astype(np.float32) + rng = np.random.default_rng(seed=42) + a = rng.random(50000, dtype=np.float32) b = np.empty_like(a) buf1 = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a) @@ -811,7 +817,8 @@ def test_buffer_get_host_array(ctx_factory): ctx = ctx_factory() mf = cl.mem_flags - host_buf = np.random.rand(25).astype(np.float32) + rng = np.random.default_rng(seed=42) + host_buf = rng.random(25, dtype=np.float32) buf = cl.Buffer(ctx, mf.READ_WRITE | mf.USE_HOST_PTR, hostbuf=host_buf) host_buf2 = buf.get_host_array(25, np.float32) assert (host_buf == host_buf2).all() @@ -827,7 +834,7 @@ def test_buffer_get_host_array(ctx_factory): except cl.LogicError: pass - host_buf = np.random.rand(25).astype(np.float32) + host_buf = rng.random(25, dtype=np.float32) buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=host_buf) try: host_buf2 = buf.get_host_array(25, np.float32) @@ -867,8 +874,9 @@ def test_event_set_callback(ctx_factory): if ctx._get_cl_version() < (1, 1): pytest.skip("OpenCL 1.1 or newer required for set_callback") - a_np = np.random.rand(50000).astype(np.float32) - b_np = np.random.rand(50000).astype(np.float32) + rng = np.random.default_rng(seed=42) + a_np = rng.random(50000, dtype=np.float32) + b_np = rng.random(50000, dtype=np.float32) got_called = [] @@ -926,7 +934,9 @@ def test_global_offset(ctx_factory): """).build() n = 50 - a = np.random.rand(n).astype(np.float32) + + rng = np.random.default_rng(seed=42) + a = rng.random(n, dtype=np.float32) queue = cl.CommandQueue(context) mf = cl.mem_flags @@ -954,7 +964,9 @@ def test_sub_buffers(ctx_factory): queue = cl.CommandQueue(ctx) n = 30000 - a = (np.random.rand(n) * 100).astype(np.uint8) + + rng = np.random.default_rng(seed=42) + a = (rng.random(n) * 100).astype(np.uint8) mf = cl.mem_flags a_buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a) -- GitLab