diff --git a/examples/cost.py b/examples/cost.py index de9ba668f6ce8bc0eb8b2dfb6b572e05e83ef960..73422a7c1187197d8f18f2a1dd055f01b50a01c3 100644 --- a/examples/cost.py +++ b/examples/cost.py @@ -94,17 +94,17 @@ def calibrate_cost_model(ctx): from pytential.qbx.cost import CostModel, estimate_calibration_params - perf_model = CostModel(use_target_specific_qbx=True) + cost_model = CostModel(use_target_specific_qbx=True) model_results = [] timing_results = [] for lpot_source in training_geometries(queue): - lpot_source = lpot_source.copy(cost_model=perf_model) + lpot_source = lpot_source.copy(cost_model=cost_model) bound_op = get_bound_op(lpot_source) sigma = get_test_density(queue, lpot_source) - perf_S = bound_op.get_modeled_cost(queue, sigma=sigma) + cost_S = bound_op.get_modeled_cost(queue, sigma=sigma) # Warm-up run. bound_op.eval(queue, {"sigma": sigma}) @@ -113,26 +113,26 @@ def calibrate_cost_model(ctx): timing_data = {} bound_op.eval(queue, {"sigma": sigma}, timing_data=timing_data) - model_results.append(one(perf_S.values())) + model_results.append(one(cost_S.values())) timing_results.append(one(timing_data.values())) calibration_params = ( estimate_calibration_params(model_results, timing_results)) - return perf_model.with_calibration_params(calibration_params) + return cost_model.with_calibration_params(calibration_params) -def test_cost_model(ctx, perf_model): +def test_cost_model(ctx, cost_model): queue = cl.CommandQueue(ctx) for lpot_source in test_geometries(queue): - lpot_source = lpot_source.copy(cost_model=perf_model) + lpot_source = lpot_source.copy(cost_model=cost_model) bound_op = get_bound_op(lpot_source) sigma = get_test_density(queue, lpot_source) - perf_S = bound_op.get_modeled_cost(queue, sigma=sigma) + cost_S = bound_op.get_modeled_cost(queue, sigma=sigma) model_result = ( - one(perf_S.values()) + one(cost_S.values()) .get_predicted_times(merge_close_lists=True)) # Warm-up run. diff --git a/test/test_cost_model.py b/test/test_cost_model.py index 2f13809b08c578c9e12adb8526625e852ee2be50..0eee59afaa9cc57e94ae71390724576841a0d5aa 100644 --- a/test/test_cost_model.py +++ b/test/test_cost_model.py @@ -142,15 +142,15 @@ def test_cost_model(ctx_getter, dim, use_target_specific_qbx): sym_op_S = sym.S(k_sym, sigma_sym, qbx_forced_limit=+1) op_S = bind(lpot_source, sym_op_S) - perf_S = op_S.get_modeled_cost(queue, sigma=sigma) - assert len(perf_S) == 1 + cost_S = op_S.get_modeled_cost(queue, sigma=sigma) + assert len(cost_S) == 1 sym_op_S_plus_D = ( sym.S(k_sym, sigma_sym, qbx_forced_limit=+1) + sym.D(k_sym, sigma_sym)) op_S_plus_D = bind(lpot_source, sym_op_S_plus_D) - perf_S_plus_D = op_S_plus_D.get_modeled_cost(queue, sigma=sigma) - assert len(perf_S_plus_D) == 2 + cost_S_plus_D = op_S_plus_D.get_modeled_cost(queue, sigma=sigma) + assert len(cost_S_plus_D) == 2 # }}} @@ -178,22 +178,22 @@ def test_cost_model_parameter_gathering(ctx_getter): sym_op_S = sym.S(k_sym, sigma_sym, qbx_forced_limit=+1, k=sym.var("k")) op_S = bind(lpot_source, sym_op_S) - perf_S = one(op_S.get_modeled_cost(queue, sigma=sigma, k=k).values()) + cost_S = one(op_S.get_modeled_cost(queue, sigma=sigma, k=k).values()) geo_data = lpot_source.qbx_fmm_geometry_data( target_discrs_and_qbx_sides=((lpot_source.density_discr, 1),)) tree = geo_data.tree() - assert perf_S.params["p_qbx"] == QBX_ORDER - assert perf_S.params["nlevels"] == tree.nlevels - assert perf_S.params["nsources"] == tree.nsources - assert perf_S.params["ntargets"] == tree.ntargets - assert perf_S.params["ncenters"] == geo_data.ncenters + assert cost_S.params["p_qbx"] == QBX_ORDER + assert cost_S.params["nlevels"] == tree.nlevels + assert cost_S.params["nsources"] == tree.nsources + assert cost_S.params["ntargets"] == tree.ntargets + assert cost_S.params["ncenters"] == geo_data.ncenters for level in range(tree.nlevels): assert ( - perf_S.params["p_fmm_lev%d" % level] + cost_S.params["p_fmm_lev%d" % level] == fmm_level_to_order(k_sym, {"k": 2}, tree, level)) # }}} @@ -405,13 +405,13 @@ def test_cost_model_correctness(ctx_getter, dim, off_surface, cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - perf_model = ( + cost_model = ( CostModel( use_target_specific_qbx=use_target_specific_qbx, translation_cost_model_factory=OpCountingTranslationCostModel)) lpot_source = get_lpot_source(queue, dim).copy( - cost_model=perf_model, + cost_model=cost_model, _use_target_specific_qbx=use_target_specific_qbx) # Construct targets. @@ -437,7 +437,7 @@ def test_cost_model_correctness(ctx_getter, dim, off_surface, sigma = get_density(queue, lpot_source) from pytools import one - perf_S = one(op_S.get_modeled_cost(queue, sigma=sigma).values()) + cost_S = one(op_S.get_modeled_cost(queue, sigma=sigma).values()) # Run FMM with ConstantOneWrangler. This can't be done with pytential's # high-level interface, so call the FMM driver directly. @@ -457,7 +457,7 @@ def test_cost_model_correctness(ctx_getter, dim, off_surface, # Check constant one wrangler for correctness. assert (potential == nnodes).all() - modeled_time = perf_S.get_predicted_times(merge_close_lists=True) + modeled_time = cost_S.get_predicted_times(merge_close_lists=True) # Check that the cost model matches the timing data returned by the # constant one wrangler. @@ -513,7 +513,7 @@ def test_cost_model_order_varying_by_level(ctx_getter): sigma = get_density(queue, lpot_source) - perf_constant = one( + cost_constant = one( bind(lpot_source, sym_op) .get_modeled_cost(queue, sigma=sigma).values()) @@ -521,13 +521,13 @@ def test_cost_model_order_varying_by_level(ctx_getter): # {{{ varying level to order - varying_order_params = perf_constant.params.copy() + varying_order_params = cost_constant.params.copy() - nlevels = perf_constant.params["nlevels"] + nlevels = cost_constant.params["nlevels"] for level in range(nlevels): varying_order_params["p_fmm_lev%d" % level] = nlevels - level - perf_varying = perf_constant.with_params(varying_order_params) + cost_varying = cost_constant.with_params(varying_order_params) # }}} @@ -535,8 +535,8 @@ def test_cost_model_order_varying_by_level(ctx_getter): # case should have larger cost. assert ( - sum(perf_varying.get_predicted_times().values()) - > sum(perf_constant.get_predicted_times().values())) + sum(cost_varying.get_predicted_times().values()) + > sum(cost_constant.get_predicted_times().values())) # }}}