from __future__ import division __copyright__ = "Copyright (C) 2012 Andreas Kloeckner" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import sys import numpy as np import loopy as lp import pyopencl as cl import pyopencl.clrandom # noqa import pytest import logging logger = logging.getLogger(__name__) try: import faulthandler except ImportError: pass else: faulthandler.enable() from pyopencl.tools import pytest_generate_tests_for_pyopencl \ as pytest_generate_tests __all__ = [ "pytest_generate_tests", "cl" # 'cl.create_some_context' ] def test_complicated_subst(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[i]: 0<=i bb = a[i] - b[i] c[i] = bb """, [ lp.GlobalArg("a", np.float32, shape=("n",)), lp.GlobalArg("b", np.float32, shape=("n",)), lp.GlobalArg("c", np.float32, shape=("n",)), lp.ValueArg("n", np.int32), ], assumptions="n>=1") for k in lp.generate_loop_schedules(knl): code = lp.generate_code(k) assert "double" not in code def test_sized_and_complex_literals(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[i]: 0<=i aa = 5jf <> bb = 5j a[i] = imag(aa) b[i] = imag(bb) c[i] = 5f """, [ lp.GlobalArg("a", np.float32, shape=("n",)), lp.GlobalArg("b", np.float32, shape=("n",)), lp.GlobalArg("c", np.float32, shape=("n",)), lp.ValueArg("n", np.int32), ], assumptions="n>=1") lp.auto_test_vs_ref(knl, ctx, knl, parameters=dict(n=5)) def test_simple_side_effect(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[i,j]: 0<=i,j<100}", """ a[i] = a[i] + 1 """, [lp.GlobalArg("a", np.float32, shape=(100,))] ) kernel_gen = lp.generate_loop_schedules(knl) for gen_knl in kernel_gen: print gen_knl compiled = lp.CompiledKernel(ctx, gen_knl) print compiled.code def test_nonsense_reduction(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[i]: 0<=i<100}", """ a[i] = sum(i, 2) """, [lp.GlobalArg("a", np.float32, shape=(100,))] ) import pytest with pytest.raises(RuntimeError): list(lp.generate_loop_schedules(knl)) def test_owed_barriers(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[i]: 0<=i<100}", [ " z[i] = a[i]" ], [lp.GlobalArg("a", np.float32, shape=(100,))] ) knl = lp.tag_inames(knl, dict(i="l.0")) kernel_gen = lp.generate_loop_schedules(knl) for gen_knl in kernel_gen: compiled = lp.CompiledKernel(ctx, gen_knl) print compiled.code def test_wg_too_small(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[i]: 0<=i<100}", [ " z[i] = a[i] {id=copy}" ], [lp.GlobalArg("a", np.float32, shape=(100,))], local_sizes={0: 16}) knl = lp.tag_inames(knl, dict(i="l.0")) kernel_gen = lp.generate_loop_schedules(knl) import pytest for gen_knl in kernel_gen: with pytest.raises(RuntimeError): lp.CompiledKernel(ctx, gen_knl).get_code() def test_join_inames(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[i,j]: 0<=i,j<16}", [ "b[i,j] = 2*a[i,j]" ], [ lp.GlobalArg("a", np.float32, shape=(16, 16,)), lp.GlobalArg("b", np.float32, shape=(16, 16,)) ], ) ref_knl = knl knl = lp.add_prefetch(knl, "a", sweep_inames=["i", "j"]) knl = lp.join_inames(knl, ["a_dim_0", "a_dim_1"]) lp.auto_test_vs_ref(ref_knl, ctx, knl) def test_divisibility_assumption(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "[n] -> {[i]: 0<=i z[i] = a[i] + a[i]**2" ], [lp.GlobalArg("a", np.float32, shape=(100,))], local_sizes={0: 16}) knl = lp.split_iname(knl, "i", 16, inner_tag="l.0") knl = lp.add_prefetch(knl, "a", []) kernel_gen = lp.generate_loop_schedules(knl) for gen_knl in kernel_gen: compiled = lp.CompiledKernel(ctx, gen_knl) print compiled.code def test_stencil(ctx_factory): ctx = ctx_factory() # n=32 causes corner case behavior in size calculations for temprorary (a # non-unifiable, two-constant-segments PwAff as the base index) n = 256 knl = lp.make_kernel(ctx.devices[0], "{[i,j]: 0<= i,j < %d}" % n, [ "a_offset(ii, jj) := a[ii+1, jj+1]", "z[i,j] = -2*a_offset(i,j)" " + a_offset(i,j-1)" " + a_offset(i,j+1)" " + a_offset(i-1,j)" " + a_offset(i+1,j)" ], [ lp.GlobalArg("a", np.float32, shape=(n+2, n+2,)), lp.GlobalArg("z", np.float32, shape=(n+2, n+2,)) ]) ref_knl = knl def variant_1(knl): knl = lp.split_iname(knl, "i", 16, outer_tag="g.1", inner_tag="l.1") knl = lp.split_iname(knl, "j", 16, outer_tag="g.0", inner_tag="l.0") knl = lp.add_prefetch(knl, "a", ["i_inner", "j_inner"]) knl = lp.set_loop_priority(knl, ["a_dim_0_outer", "a_dim_1_outer"]) return knl def variant_2(knl): knl = lp.split_iname(knl, "i", 16, outer_tag="g.1", inner_tag="l.1") knl = lp.split_iname(knl, "j", 16, outer_tag="g.0", inner_tag="l.0") knl = lp.add_prefetch(knl, "a", ["i_inner", "j_inner"], fetch_bounding_box=True) knl = lp.set_loop_priority(knl, ["a_dim_0_outer", "a_dim_1_outer"]) return knl for variant in [variant_1, variant_2]: lp.auto_test_vs_ref(ref_knl, ctx, variant(knl), fills_entire_output=False, print_ref_code=False, op_count=[n*n], op_label=["cells"]) def test_stencil_with_overfetch(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[i,j]: 0<= i,j < n}", [ "a_offset(ii, jj) := a[ii+2, jj+2]", "z[i,j] = -2*a_offset(i,j)" " + a_offset(i,j-1)" " + a_offset(i,j+1)" " + a_offset(i-1,j)" " + a_offset(i+1,j)" " + a_offset(i,j-2)" " + a_offset(i,j+2)" " + a_offset(i-2,j)" " + a_offset(i+2,j)" ], assumptions="n>=1") knl = lp.add_and_infer_dtypes(knl, dict(a=np.float32)) ref_knl = knl def variant_overfetch(knl): knl = lp.split_iname(knl, "i", 16, outer_tag="g.1", inner_tag="l.1", slabs=(1, 1)) knl = lp.split_iname(knl, "j", 16, outer_tag="g.0", inner_tag="l.0", slabs=(1, 1)) knl = lp.add_prefetch(knl, "a", ["i_inner", "j_inner"], fetch_bounding_box=True) knl = lp.set_loop_priority(knl, ["a_dim_0_outer", "a_dim_1_outer"]) return knl for variant in [variant_overfetch]: n = 200 lp.auto_test_vs_ref(ref_knl, ctx, variant(knl), fills_entire_output=False, print_ref_code=False, op_count=[n*n], parameters=dict(n=n), op_label=["cells"]) def test_eq_constraint(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[i,j]: 0<= i,j < 32}", [ "a[i] = b[i]" ], [ lp.GlobalArg("a", np.float32, shape=(1000,)), lp.GlobalArg("b", np.float32, shape=(1000,)) ]) knl = lp.split_iname(knl, "i", 16, outer_tag="g.0") knl = lp.split_iname(knl, "i_inner", 16, outer_tag=None, inner_tag="l.0") kernel_gen = lp.generate_loop_schedules(knl) for knl in kernel_gen: print lp.generate_code(knl) def test_argmax(ctx_factory): dtype = np.dtype(np.float32) ctx = ctx_factory() queue = cl.CommandQueue(ctx) order = "C" n = 10000 knl = lp.make_kernel(ctx.devices[0], "{[i]: 0<=i<%d}" % n, [ "<> result = argmax(i, fabs(a[i]))", "max_idx = result.index", "max_val = result.value", ], [ lp.GlobalArg("a", dtype, shape=(n,), order=order), lp.GlobalArg("max_idx", np.int32, shape=(), order=order), lp.GlobalArg("max_val", dtype, shape=(), order=order), ]) a = np.random.randn(10000).astype(dtype) cknl = lp.CompiledKernel(ctx, knl) evt, (max_idx, max_val) = cknl(queue, a=a, out_host=True) assert max_val == np.max(np.abs(a)) assert max_idx == np.where(np.abs(a) == max_val)[-1] # {{{ code generator fuzzing def make_random_value(): from random import randrange, uniform v = randrange(3) if v == 0: while True: z = randrange(-1000, 1000) if z: return z elif v == 1: return uniform(-10, 10) else: cval = uniform(-10, 10) + 1j*uniform(-10, 10) if randrange(0, 2) == 0: return np.complex128(cval) else: return np.complex128(cval) def make_random_expression(var_values, size): from random import randrange import pymbolic.primitives as p v = randrange(1500) size[0] += 1 if v < 500 and size[0] < 40: term_count = randrange(2, 5) if randrange(2) < 1: cls = p.Sum else: cls = p.Product return cls(tuple( make_random_expression(var_values, size) for i in range(term_count))) elif v < 750: return make_random_value() elif v < 1000: var_name = "var_%d" % len(var_values) assert var_name not in var_values var_values[var_name] = make_random_value() return p.Variable(var_name) elif v < 1250: # Cannot use '-' because that destroys numpy constants. return p.Sum(( make_random_expression(var_values, size), - make_random_expression(var_values, size))) elif v < 1500: # Cannot use '/' because that destroys numpy constants. return p.Quotient( make_random_expression(var_values, size), make_random_expression(var_values, size)) def generate_random_fuzz_examples(count): for i in xrange(count): size = [0] var_values = {} expr = make_random_expression(var_values, size) yield expr, var_values def test_fuzz_code_generator(ctx_factory): ctx = ctx_factory() queue = cl.CommandQueue(ctx) #from expr_fuzz import get_fuzz_examples for expr, var_values in generate_random_fuzz_examples(50): #for expr, var_values in get_fuzz_examples(): from pymbolic import evaluate try: true_value = evaluate(expr, var_values) except ZeroDivisionError: continue def get_dtype(x): if isinstance(x, (complex, np.complexfloating)): return np.complex128 else: return np.float64 knl = lp.make_kernel(ctx.devices[0], "{ : }", [lp.ExpressionInstruction("value", expr)], [lp.GlobalArg("value", np.complex128, shape=())] + [ lp.ValueArg(name, get_dtype(val)) for name, val in var_values.iteritems() ]) ck = lp.CompiledKernel(ctx, knl) evt, (lp_value,) = ck(queue, out_host=True, **var_values) err = abs(true_value-lp_value)/abs(true_value) if abs(err) > 1e-10: print 80*"-" print "WRONG: rel error=%g" % err print "true=%r" % true_value print "loopy=%r" % lp_value print 80*"-" print ck.code print 80*"-" print var_values print 80*"-" print repr(expr) print 80*"-" print expr print 80*"-" 1/0 # }}} def test_empty_reduction(ctx_factory): dtype = np.dtype(np.float32) ctx = ctx_factory() queue = cl.CommandQueue(ctx) knl = lp.make_kernel(ctx.devices[0], [ "{[i]: 0<=i<20}", "[i] -> {[j]: 0<=j<0}" ], [ "a[i] = sum(j, j)", ], [ lp.GlobalArg("a", dtype, (20,)), ]) cknl = lp.CompiledKernel(ctx, knl) evt, (a,) = cknl(queue) assert (a.get() == 0).all() def test_nested_dependent_reduction(ctx_factory): dtype = np.dtype(np.int32) ctx = ctx_factory() queue = cl.CommandQueue(ctx) knl = lp.make_kernel(ctx.devices[0], [ "{[i]: 0<=i sumlen = l[i]", "a[i] = sum(j, j)", ], [ lp.ValueArg("n", np.int32), lp.GlobalArg("a", dtype, ("n",)), lp.GlobalArg("l", np.int32, ("n",)), ]) cknl = lp.CompiledKernel(ctx, knl) n = 330 l = np.arange(n, dtype=np.int32) evt, (a,) = cknl(queue, l=l, n=n, out_host=True) tgt_result = (2*l-1)*2*l/2 assert (a == tgt_result).all() def test_multi_nested_dependent_reduction(ctx_factory): dtype = np.dtype(np.int32) ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], [ "{[itgt]: 0 <= itgt < ntgts}", "{[isrc_box]: 0 <= isrc_box < nboxes}", "{[isrc]: 0 <= isrc < npart}" ], [ "<> npart = nparticles_per_box[isrc_box]", "a[itgt] = sum((isrc_box, isrc), 1)", ], [ lp.ValueArg("n", np.int32), lp.GlobalArg("a", dtype, ("n",)), lp.GlobalArg("nparticles_per_box", np.int32, ("nboxes",)), lp.ValueArg("ntgts", np.int32), lp.ValueArg("nboxes", np.int32), ], assumptions="ntgts>=1") cknl = lp.CompiledKernel(ctx, knl) print cknl.code # FIXME: Actually test functionality. def test_recursive_nested_dependent_reduction(ctx_factory): dtype = np.dtype(np.int32) ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], [ "{[itgt]: 0 <= itgt < ntgts}", "{[isrc_box]: 0 <= isrc_box < nboxes}", "{[isrc]: 0 <= isrc < npart}" ], [ "<> npart = nparticles_per_box[isrc_box]", "<> boxsum = sum(isrc, isrc+isrc_box+itgt)", "a[itgt] = sum(isrc_box, boxsum)", ], [ lp.ValueArg("n", np.int32), lp.GlobalArg("a", dtype, ("n",)), lp.GlobalArg("nparticles_per_box", np.int32, ("nboxes",)), lp.ValueArg("ntgts", np.int32), lp.ValueArg("nboxes", np.int32), ], assumptions="ntgts>=1") cknl = lp.CompiledKernel(ctx, knl) print cknl.get_code() # FIXME: Actually test functionality. def test_dependent_loop_bounds(ctx_factory): dtype = np.dtype(np.float32) ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], [ "{[i]: 0<=i row_len = a_rowstarts[i+1] - a_rowstarts[i]", "a_sum[i] = sum(jj, a_values[[a_rowstarts[i]+jj]])", ], [ lp.GlobalArg("a_rowstarts", np.int32, shape=lp.auto), lp.GlobalArg("a_indices", np.int32, shape=lp.auto), lp.GlobalArg("a_values", dtype), lp.GlobalArg("a_sum", dtype, shape=lp.auto), lp.ValueArg("n", np.int32), ], assumptions="n>=1 and row_len>=1") cknl = lp.CompiledKernel(ctx, knl) print "---------------------------------------------------" print cknl.get_highlighted_code() print "---------------------------------------------------" def test_dependent_loop_bounds_2(ctx_factory): dtype = np.dtype(np.float32) ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], [ "{[i]: 0<=i row_start = a_rowstarts[i]", "<> row_len = a_rowstarts[i+1] - row_start", "ax[i] = sum(jj, a_values[[row_start+jj]])", ], [ lp.GlobalArg("a_rowstarts", np.int32, shape=lp.auto), lp.GlobalArg("a_indices", np.int32, shape=lp.auto), lp.GlobalArg("a_values", dtype, strides=(1,)), lp.GlobalArg("ax", dtype, shape=lp.auto), lp.ValueArg("n", np.int32), ], assumptions="n>=1 and row_len>=1") knl = lp.split_iname(knl, "i", 128, outer_tag="g.0", inner_tag="l.0") cknl = lp.CompiledKernel(ctx, knl) print "---------------------------------------------------" print cknl.get_highlighted_code() print "---------------------------------------------------" def test_dependent_loop_bounds_3(ctx_factory): # The point of this test is that it shows a dependency between # domains that is exclusively mediated by the row_len temporary. # It also makes sure that row_len gets read before any # conditionals use it. dtype = np.dtype(np.float32) ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], [ "{[i]: 0<=i row_len = a_row_lengths[i]", "a[i,jj] = 1", ], [ lp.GlobalArg("a_row_lengths", np.int32, shape=lp.auto), lp.GlobalArg("a", dtype, shape=("n,n"), order="C"), lp.ValueArg("n", np.int32), ]) assert knl.parents_per_domain()[1] == 0 knl = lp.split_iname(knl, "i", 128, outer_tag="g.0", inner_tag="l.0") cknl = lp.CompiledKernel(ctx, knl) print "---------------------------------------------------" print cknl.get_highlighted_code() print "---------------------------------------------------" knl_bad = lp.split_iname(knl, "jj", 128, outer_tag="g.1", inner_tag="l.1") import pytest with pytest.raises(RuntimeError): list(lp.generate_loop_schedules(knl_bad)) def test_independent_multi_domain(ctx_factory): dtype = np.dtype(np.float32) ctx = ctx_factory() queue = cl.CommandQueue(ctx) knl = lp.make_kernel(ctx.devices[0], [ "{[i]: 0<=i {[i]: 0<=i znirp = n", "a[i] = 1", ], [ lp.GlobalArg("a", dtype, shape=("n"), order="C"), lp.ValueArg("n", np.int32), ]) cknl = lp.CompiledKernel(ctx, knl) n = 20000 evt, (a,) = cknl(queue, n=n, out_host=True) assert a.shape == (n,) assert (a == 1).all() def test_equality_constraints(ctx_factory): dtype = np.float32 ctx = ctx_factory() order = "C" n = 10 knl = lp.make_kernel(ctx.devices[0], [ "[n] -> {[i,j]: 0<=i,j {[i,j]: 0<=i,j a[i] = 5+i+j", ], []) knl = lp.tag_inames(knl, dict(i="l.0", j="ilp")) for k in lp.generate_loop_schedules(knl): assert k.temporary_variables["a"].shape == (16, 17) def test_ilp_write_race_avoidance_private(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{[j]: 0<=j<16 }", [ "<> a = 5+j", ], []) knl = lp.tag_inames(knl, dict(j="ilp")) for k in lp.generate_loop_schedules(knl): assert k.temporary_variables["a"].shape == (16,) # }}} def test_write_parameter(ctx_factory): dtype = np.float32 ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], [ "{[i,j]: 0<=i,j src_ibox = source_boxes[isrc_box] <> isrc_start = box_source_starts[src_ibox] <> isrc_end = isrc_start+box_source_counts_nonchild[src_ibox] <> strength = strengths[isrc] {id=set_strength} """, [ lp.GlobalArg("box_source_starts,box_source_counts_nonchild", None, shape=None), lp.GlobalArg("strengths", None, shape="nsources"), "..."]) print knl assert "isrc_box" in knl.insn_inames("set_strength") print lp.CompiledKernel(ctx, knl).get_highlighted_code( dict( source_boxes=np.int32, box_source_starts=np.int32, box_source_counts_nonchild=np.int32, strengths=np.float64, nsources=np.int32, )) def test_inames_deps_from_write_subscript(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], [ "{[i,j]: 0<=i,j src_ibox = source_boxes[i] something = 5 a[src_ibox] = sum(j, something) {id=myred} """, [ lp.GlobalArg("box_source_starts,box_source_counts_nonchild,a", None, shape=None), "..."]) print knl assert "i" in knl.insn_inames("myred") def test_split_reduction(ctx_factory): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], [ "{[i,j,k]: 0<=i,j,k xi = qpts[1, i2] {inames=+el} <> s = 1-xi <> r = xi/s <> aind = 0 {id=aind_init,inames=+i2:el} <> w = s**(deg-alpha1) {id=init_w} tmp[el,alpha1,i2] = tmp[el,alpha1,i2] + w * coeffs[aind] \ {id=write_tmp,inames=+alpha2} w = w * r * ( deg - alpha1 - alpha2 ) / (1 + alpha2) \ {id=update_w,dep=init_w:write_tmp} aind = aind + 1 \ {id=aind_incr,\ dep=aind_init:write_tmp:update_w, \ inames=+el:i2:alpha1:alpha2} """, [ # Must declare coeffs to have "no" shape, to keep loopy # from trying to figure it out the shape automatically. lp.GlobalArg("coeffs", None, shape=None), "..." ], assumptions="deg>=0 and nels>=1" ) knl = lp.fix_parameters(knl, nqp1d=7, deg=4) knl = lp.split_iname(knl, "el", 16, inner_tag="l.0") knl = lp.split_iname(knl, "el_outer", 2, outer_tag="g.0", inner_tag="ilp", slabs=(0, 1)) knl = lp.tag_inames(knl, dict(i2="l.1", alpha1="unr", alpha2="unr")) print lp.CompiledKernel(ctx, knl).get_highlighted_code( dict( qpts=np.float32, coeffs=np.float32, tmp=np.float32, )) def test_rob_stroud_bernstein_full(ctx_factory): #logging.basicConfig(level=logging.DEBUG) ctx = ctx_factory() # NOTE: result would have to be zero-filled beforehand knl = lp.make_kernel(ctx.devices[0], "{[el, i2, alpha1,alpha2, i1_2, alpha1_2, i2_2]: \ 0 <= el < nels and \ 0 <= i2 < nqp1d and \ 0 <= alpha1 <= deg and 0 <= alpha2 <= deg-alpha1 and\ \ 0 <= i1_2 < nqp1d and \ 0 <= alpha1_2 <= deg and \ 0 <= i2_2 < nqp1d \ }", """ <> xi = qpts[1, i2] {inames=+el} <> s = 1-xi <> r = xi/s <> aind = 0 {id=aind_init,inames=+i2:el} <> w = s**(deg-alpha1) {id=init_w} <> tmp[alpha1,i2] = tmp[alpha1,i2] + w * coeffs[aind] \ {id=write_tmp,inames=+alpha2} w = w * r * ( deg - alpha1 - alpha2 ) / (1 + alpha2) \ {id=update_w,dep=init_w:write_tmp} aind = aind + 1 \ {id=aind_incr,\ dep=aind_init:write_tmp:update_w, \ inames=+el:i2:alpha1:alpha2} <> xi2 = qpts[0, i1_2] {dep=aind_incr,inames=+el} <> s2 = 1-xi2 <> r2 = xi2/s2 <> w2 = s2**deg result[el, i1_2, i2_2] = result[el, i1_2, i2_2] + \ w2 * tmp[alpha1_2, i2_2] \ {inames=el:alpha1_2:i1_2:i2_2} w2 = w2 * r2 * (deg-alpha1_2) / (1+alpha1_2) """, [ # Must declare coeffs to have "no" shape, to keep loopy # from trying to figure it out the shape automatically. lp.GlobalArg("coeffs", None, shape=None), "..." ], assumptions="deg>=0 and nels>=1" ) knl = lp.fix_parameters(knl, nqp1d=7, deg=4) if 0: knl = lp.split_iname(knl, "el", 16, inner_tag="l.0") knl = lp.split_iname(knl, "el_outer", 2, outer_tag="g.0", inner_tag="ilp", slabs=(0, 1)) knl = lp.tag_inames(knl, dict(i2="l.1", alpha1="unr", alpha2="unr")) print lp.CompiledKernel(ctx, knl).get_highlighted_code( dict( qpts=np.float32, tmp=np.float32, coeffs=np.float32, result=np.float32, )) @pytest.mark.parametrize("vec_len", [2, 3, 4, 8, 16]) def test_vector_types(ctx_factory, vec_len): ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{ [i,j]: 0<=i my_a = a[i,j] {id=read_a} <> a_less_than_zero = my_a < 0 {dep=read_a,inames=i:j} my_a = 2*my_a {id=twice_a,dep=read_a,if=a_less_than_zero} my_a = my_a+1 {id=aplus,dep=twice_a,if=a_less_than_zero} out[i,j] = 2*my_a {dep=aplus} """, [ lp.GlobalArg("a", np.float32, shape=lp.auto), lp.GlobalArg("out", np.float32, shape=lp.auto), "..." ]) ref_knl = knl lp.auto_test_vs_ref(ref_knl, ctx, knl, parameters=dict( n=200 )) def test_ilp_loop_bound(ctx_factory): # The salient bit of this test is that a joint bound on (outer, inner) # from a split occurs in a setting where the inner loop has been ilp'ed. # In 'normal' parallel loops, the inner index is available for conditionals # throughout. In ILP'd loops, not so much. ctx = ctx_factory() knl = lp.make_kernel(ctx.devices[0], "{ [i,j,k]: 0<=i,j,k 1: exec(sys.argv[1]) else: from py.test.cmdline import main main([__file__]) # vim: foldmethod=marker