From f591dac06746a5c49e3174c5ab9eeb227c1f99a0 Mon Sep 17 00:00:00 2001 From: jdsteve2 <jdsteve2@illinois.edu> Date: Wed, 24 Jan 2018 16:46:56 -0600 Subject: [PATCH] fixed flake8 issues --- loopy/statistics.py | 4 +- test/test_statistics.py | 86 ++++++++++++++++++++--------------------- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/loopy/statistics.py b/loopy/statistics.py index 19fa3d71a..92762156b 100755 --- a/loopy/statistics.py +++ b/loopy/statistics.py @@ -1375,8 +1375,8 @@ def get_mem_access_map(knl, numpy_types=True, count_redundant_work=False, for size in local_size: s = aff_to_expr(size) if not isinstance(s, int): - raise LoopyError("Cannot count insn with subgroup granularity, " - "group size is not integer: %s" + raise LoopyError("Cannot count insn with subgroup " + "granularity, group size is not integer: %s" % (local_size)) group_size *= s diff --git a/test/test_statistics.py b/test/test_statistics.py index c2fb4ffe2..7d1b6df0d 100644 --- a/test/test_statistics.py +++ b/test/test_statistics.py @@ -281,9 +281,9 @@ def test_mem_access_counter_basic(): count_granularity='subgroup') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f32l == (3*n*m*ell)*n_groups*subgroups_per_group # these are uniform - assert f64l == (2*n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f32l == (3*n*m*ell)*n_groups*subgroups_per_group + assert f64l == (2*n*m)*n_groups*subgroups_per_group f32s = mem_map[lp.MemAccess('global', np.dtype(np.float32), stride=0, direction='store', variable='c', @@ -294,9 +294,9 @@ def test_mem_access_counter_basic(): count_granularity='subgroup') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f32s == (n*m*ell)*n_groups*subgroups_per_group # these are uniform - assert f64s == (n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f32s == (n*m*ell)*n_groups*subgroups_per_group + assert f64s == (n*m)*n_groups*subgroups_per_group def test_mem_access_counter_reduction(): @@ -332,16 +332,16 @@ def test_mem_access_counter_reduction(): count_granularity='subgroup') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f32l == (2*n*m*ell)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f32l == (2*n*m*ell)*n_groups*subgroups_per_group f32s = mem_map[lp.MemAccess('global', np.dtype(np.float32), stride=0, direction='store', variable='c', count_granularity='subgroup') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f32s == (n*ell)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f32s == (n*ell)*n_groups*subgroups_per_group ld_bytes = mem_map.filter_by(mtype=['global'], direction=['load'] ).to_bytes().eval_and_sum(params) @@ -391,10 +391,10 @@ def test_mem_access_counter_logic(): direction='store') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f32_g_l == (2*n*m)*n_groups*subgroups_per_group # these are uniform - assert f64_g_l == (n*m)*n_groups*subgroups_per_group # these are uniform - assert f64_g_s == (n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f32_g_l == (2*n*m)*n_groups*subgroups_per_group + assert f64_g_l == (n*m)*n_groups*subgroups_per_group + assert f64_g_s == (n*m)*n_groups*subgroups_per_group def test_mem_access_counter_specialops(): @@ -442,9 +442,9 @@ def test_mem_access_counter_specialops(): count_granularity='subgroup') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f32 == (2*n*m*ell)*n_groups*subgroups_per_group # these are uniform - assert f64 == (2*n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f32 == (2*n*m*ell)*n_groups*subgroups_per_group + assert f64 == (2*n*m)*n_groups*subgroups_per_group f32 = mem_map[lp.MemAccess('global', np.float32, stride=0, direction='store', variable='c', @@ -455,16 +455,16 @@ def test_mem_access_counter_specialops(): count_granularity='subgroup') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f32 == (n*m*ell)*n_groups*subgroups_per_group # these are uniform - assert f64 == (n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f32 == (n*m*ell)*n_groups*subgroups_per_group + assert f64 == (n*m)*n_groups*subgroups_per_group filtered_map = mem_map.filter_by(direction=['load'], variable=['a', 'g'], count_granularity='subgroup') tot = filtered_map.eval_and_sum(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert tot == (n*m*ell + n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert tot == (n*m*ell + n*m)*n_groups*subgroups_per_group def test_mem_access_counter_bitwise(): @@ -514,8 +514,8 @@ def test_mem_access_counter_bitwise(): count_granularity='subgroup') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert i32 == (4*n*m+2*n*m*ell)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert i32 == (4*n*m+2*n*m*ell)*n_groups*subgroups_per_group i32 = mem_map[lp.MemAccess('global', np.int32, stride=0, direction='store', variable='c', @@ -526,8 +526,8 @@ def test_mem_access_counter_bitwise(): count_granularity='subgroup') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert i32 == (n*m+n*m*ell)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert i32 == (n*m+n*m*ell)*n_groups*subgroups_per_group def test_mem_access_counter_mixed(): @@ -586,9 +586,9 @@ def test_mem_access_counter_mixed(): count_granularity='workitem') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f64uniform == (2*n*m)*n_groups*subgroups_per_group # /subgroup_size for uniform - assert f32uniform == (m*n)*n_groups*subgroups_per_group # /subgroup_size for uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f64uniform == (2*n*m)*n_groups*subgroups_per_group + assert f32uniform == (m*n)*n_groups*subgroups_per_group assert f32nonconsec == 3*n*m*ell @@ -602,8 +602,8 @@ def test_mem_access_counter_mixed(): count_granularity='workitem') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f64uniform == m*n*n_groups*subgroups_per_group # /subgroup_size for uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f64uniform == m*n*n_groups*subgroups_per_group assert f32nonconsec == n*m*ell @@ -962,15 +962,15 @@ def test_summations_and_filters(): count_granularity=['subgroup'] ).eval_and_sum(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert loads_a == (2*n*m*ell)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert loads_a == (2*n*m*ell)*n_groups*subgroups_per_group global_stores = mem_map.filter_by(mtype=['global'], direction=['store'], count_granularity=['subgroup'] ).eval_and_sum(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert global_stores == (n*m*ell + n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert global_stores == (n*m*ell + n*m)*n_groups*subgroups_per_group ld_bytes = mem_map.filter_by(mtype=['global'], direction=['load'], count_granularity=['subgroup'] @@ -979,9 +979,9 @@ def test_summations_and_filters(): count_granularity=['subgroup'] ).to_bytes().eval_and_sum(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert ld_bytes == (4*n*m*ell*3 + 8*n*m*2)*n_groups*subgroups_per_group # these are uniform - assert st_bytes == (4*n*m*ell + 8*n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert ld_bytes == (4*n*m*ell*3 + 8*n*m*2)*n_groups*subgroups_per_group + assert st_bytes == (4*n*m*ell + 8*n*m)*n_groups*subgroups_per_group # ignore stride and variable names in this map reduced_map = mem_map.group_by('mtype', 'dtype', 'direction') @@ -990,9 +990,9 @@ def test_summations_and_filters(): f64lall = reduced_map[lp.MemAccess('global', np.float64, direction='load') ].eval_with_dict(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert f32lall== (3*n*m*ell)*n_groups*subgroups_per_group # these are uniform - assert f64lall == (2*n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert f32lall== (3*n*m*ell)*n_groups*subgroups_per_group + assert f64lall == (2*n*m)*n_groups*subgroups_per_group op_map = lp.get_op_map(knl, count_redundant_work=True) #for k, v in op_map.items(): @@ -1026,8 +1026,8 @@ def test_summations_and_filters(): key.direction == 'load' s1f64l = mem_map.filter_by_func(func_filter).eval_and_sum(params) - # (count-per-sub-group*n_groups*subgroups_per_group) - assert s1f64l == (2*n*m)*n_groups*subgroups_per_group # these are uniform + # uniform: (count-per-sub-group)*n_groups*subgroups_per_group + assert s1f64l == (2*n*m)*n_groups*subgroups_per_group def test_strided_footprint(): -- GitLab