Newer
Older
for key, value in profile_data.items():
if isinstance(value, TimingFutureList):
print(key, value.elapsed())
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
# {{{ paper outputs
def get_example_stepper(queue, dims=2, order=3, use_fusion=True,
exec_mapper_factory=ExecutionMapper,
return_ic=False):
op, discr = get_strong_wave_op_with_discr(queue.context, dims=dims, order=3)
if not use_fusion:
bound_op = bind(
discr, op.sym_operator(),
exec_mapper_factory=exec_mapper_factory)
stepper = RK4TimeStepper(
queue, discr, "w", bound_op, 1 + discr.dim,
get_strong_wave_component,
exec_mapper_factory=exec_mapper_factory)
else:
stepper = FusedRK4TimeStepper(
queue, discr, "w", op.sym_operator(), 1 + discr.dim,
get_strong_wave_component,
exec_mapper_factory=exec_mapper_factory)
if return_ic:
from pytools.obj_array import join_fields
ic = join_fields(discr.zeros(queue),
[discr.zeros(queue) for i in range(discr.dim)])
return stepper, ic
return stepper
def latex_table(table_format, header, rows):
result = []
_ = result.append
_(rf"\begin{{tabular}}{{{table_format}}}")
_(r"\toprule")
_(" & ".join(rf"\multicolumn{{1}}{{c}}{{{item}}}" for item in header) + r" \\")
_(r"\midrule")
for row in rows:
_(" & ".join(row) + r" \\")
_(r"\bottomrule")
_(r"\end{tabular}")
return "\n".join(result)
def ascii_table(table_format, header, rows):
from pytools import Table
table = Table()
table.add_row(header)
for input_row in rows:
row = []
for item in input_row:
if item.startswith(r"\num{"):
# Strip \num{...} formatting
row.append(item[5:-1])
else:
row.append(item)
table.add_row(row)
return str(table)
if not PAPER_OUTPUT:
table = ascii_table
else:
table = latex_table
def problem_stats(order=3):
cl_ctx = cl.create_some_context()
outf = open_output_file("grudge-problem-stats.txt")
_, dg_discr_2d = get_strong_wave_op_with_discr(cl_ctx, dims=2, order=order)
print("Number of 2D elements:", dg_discr_2d.mesh.nelements, file=outf)
vol_discr_2d = dg_discr_2d.discr_from_dd("vol")
dofs_2d = {group.nunit_nodes for group in vol_discr_2d.groups}
print("Number of DOFs per 2D element:", one(dofs_2d), file=outf)
_, dg_discr_3d = get_strong_wave_op_with_discr(cl_ctx, dims=3, order=order)
print("Number of 3D elements:", dg_discr_3d.mesh.nelements, file=outf)
vol_discr_3d = dg_discr_3d.discr_from_dd("vol")
dofs_3d = {group.nunit_nodes for group in vol_discr_3d.groups}
print("Number of DOFs per 3D element:", one(dofs_3d), file=outf)
logger.info("Wrote '%s'", outf.name)
def statement_counts_table():
cl_ctx = cl.create_some_context()
queue = cl.CommandQueue(cl_ctx)
fused_stepper = get_example_stepper(queue, use_fusion=True)
stepper = get_example_stepper(queue, use_fusion=False)
outf = open_output_file("statement-counts.tex")
if not PAPER_OUTPUT:
print("==== Statement Counts ====", file=outf)
"lr",
("Operator", "Grudge Node Count"),
(
("Time integration: baseline",
r"\num{%d}" % len(stepper.bound_op.eval_code.instructions)),
("Right-hand side: baseline",
r"\num{%d}" % len(stepper.grudge_bound_op.eval_code.instructions)),
("Inlined operator",
r"\num{%d}" % len(fused_stepper.bound_op.eval_code.instructions))
)),
file=outf)
logger.info("Wrote '%s'", outf.name)
@memoize(key=lambda queue, dims: dims)
def mem_ops_results(queue, dims):
exec_mapper_factory=ExecutionMapperWithMemOpCounting)
stepper, ic = get_example_stepper(
queue,
use_fusion=False,
exec_mapper_factory=ExecutionMapperWithMemOpCounting,
return_ic=True)
t_start = 0
dt = 0.02
t_end = 0.02
for (_, _, profile_data) in stepper.run(
ic, t_start, dt, t_end, return_profile_data=True):
pass
result["nonfused_bytes_read"] = profile_data["bytes_read"]
result["nonfused_bytes_written"] = profile_data["bytes_written"]
result["nonfused_bytes_total"] = \
result["nonfused_bytes_read"] \
+ result["nonfused_bytes_written"]
result["nonfused_bytes_read_by_scalar_assignments"] = \
profile_data["bytes_read_by_scalar_assignments"]
result["nonfused_bytes_written_by_scalar_assignments"] = \
profile_data["bytes_written_by_scalar_assignments"]
result["nonfused_bytes_total_by_scalar_assignments"] = \
result["nonfused_bytes_read_by_scalar_assignments"] \
+ result["nonfused_bytes_written_by_scalar_assignments"]
for (_, _, profile_data) in fused_stepper.run(
ic, t_start, dt, t_end, return_profile_data=True):
pass
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
result["fused_bytes_read"] = profile_data["bytes_read"]
result["fused_bytes_written"] = profile_data["bytes_written"]
result["fused_bytes_total"] = \
result["fused_bytes_read"] \
+ result["fused_bytes_written"]
result["fused_bytes_read_by_scalar_assignments"] = \
profile_data["bytes_read_by_scalar_assignments"]
result["fused_bytes_written_by_scalar_assignments"] = \
profile_data["bytes_written_by_scalar_assignments"]
result["fused_bytes_total_by_scalar_assignments"] = \
result["fused_bytes_read_by_scalar_assignments"] \
+ result["fused_bytes_written_by_scalar_assignments"]
return result
def scalar_assignment_percent_of_total_mem_ops_table():
cl_ctx = cl.create_some_context()
queue = cl.CommandQueue(cl_ctx)
result2d = mem_ops_results(queue, 2)
result3d = mem_ops_results(queue, 3)
outf = open_output_file("scalar-assignments-mem-op-percentage.tex")
if not PAPER_OUTPUT:
print("==== Scalar Assigment % of Total Mem Ops ====", file=outf)
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
"lr",
("Operator",
r"\parbox{1in}{\centering \% Memory Ops. Due to Scalar Assignments}"),
(
("2D: Baseline",
"%.1f" % (
100 * result2d["nonfused_bytes_total_by_scalar_assignments"]
/ result2d["nonfused_bytes_total"])),
("2D: Inlined",
"%.1f" % (
100 * result2d["fused_bytes_total_by_scalar_assignments"]
/ result2d["fused_bytes_total"])),
("3D: Baseline",
"%.1f" % (
100 * result3d["nonfused_bytes_total_by_scalar_assignments"]
/ result3d["nonfused_bytes_total"])),
("3D: Inlined",
"%.1f" % (
100 * result3d["fused_bytes_total_by_scalar_assignments"]
/ result3d["fused_bytes_total"])),
)),
file=outf)
logger.info("Wrote '%s'", outf.name)
def scalar_assignment_effect_of_fusion_mem_ops_table():
cl_ctx = cl.create_some_context()
queue = cl.CommandQueue(cl_ctx)
result2d = mem_ops_results(queue, 2)
result3d = mem_ops_results(queue, 3)
outf = open_output_file("scalar-assignments-fusion-impact.tex")
if not PAPER_OUTPUT:
print("==== Scalar Assigment Inlining Impact ====", file=outf)
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
"lrrrr",
("Operator",
r"Bytes Read",
r"Bytes Written",
r"Total",
r"\% of Baseline"),
(
("2D: Baseline",
r"\num{%d}" % (
result2d["nonfused_bytes_read_by_scalar_assignments"]),
r"\num{%d}" % (
result2d["nonfused_bytes_written_by_scalar_assignments"]),
r"\num{%d}" % (
result2d["nonfused_bytes_total_by_scalar_assignments"]),
"100"),
("2D: Inlined",
r"\num{%d}" % (
result2d["fused_bytes_read_by_scalar_assignments"]),
r"\num{%d}" % (
result2d["fused_bytes_written_by_scalar_assignments"]),
r"\num{%d}" % (
result2d["fused_bytes_total_by_scalar_assignments"]),
r"%.1f" % (
100 * result2d["fused_bytes_total_by_scalar_assignments"]
/ result2d["nonfused_bytes_total_by_scalar_assignments"])),
("3D: Baseline",
r"\num{%d}" % (
result3d["nonfused_bytes_read_by_scalar_assignments"]),
r"\num{%d}" % (
result3d["nonfused_bytes_written_by_scalar_assignments"]),
r"\num{%d}" % (
result3d["nonfused_bytes_total_by_scalar_assignments"]),
"100"),
("3D: Inlined",
r"\num{%d}" % (
result3d["fused_bytes_read_by_scalar_assignments"]),
r"\num{%d}" % (
result3d["fused_bytes_written_by_scalar_assignments"]),
r"\num{%d}" % (
result3d["fused_bytes_total_by_scalar_assignments"]),
r"%.1f" % (
100 * result3d["fused_bytes_total_by_scalar_assignments"]
/ result3d["nonfused_bytes_total_by_scalar_assignments"])),
)),
file=outf)
logger.info("Wrote '%s'", outf.name)
Andreas Klöckner
committed
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
if not SKIP_TESTS:
# Run tests.
from py.test import main
result = main([__file__])
assert result == 0
# Run examples.
problem_stats()
statement_counts_table()
scalar_assignment_percent_of_total_mem_ops_table()
scalar_assignment_effect_of_fusion_mem_ops_table()