From c0844c5a386fbbc80a5bb1ea99ec43a2180d74b5 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 19 Sep 2017 22:45:27 -0500 Subject: [PATCH 001/260] Distribute the source array for FMM --- boxtree/dfmm.py | 88 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 boxtree/dfmm.py diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py new file mode 100644 index 0000000..79bc426 --- /dev/null +++ b/boxtree/dfmm.py @@ -0,0 +1,88 @@ +from __future__ import division + +__copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ + Copyright (C) 2017 Hao Gao" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import logging +logger = logging.getLogger(__name__) + +import numpy as np +import hpx + +@hpx.create_action() +def main(sources, num_particles_per_block): + + # {{{ Distribute source array + + num_particles = sources.shape[0] + import math + num_block = math.ceil(num_particles / num_particles_per_block) + d_sources = hpx.GlobalMemory.alloc_cyclic(num_block, + (num_particles_per_block, 2), sources.dtype) + finished_copy = hpx.And(num_block) + for i in range(num_block): + d_sources[i].set(sources[i*num_particles_per_block : (i+1)*num_particles_per_block], + sync='async', rsync_lco=finished_copy) + finished_copy.wait() + + # }}} + + # WIP: this is a placeholder + potentials = np.empty((num_particles,), dtype=float) + hpx.exit(array=potentials) + +def ddrive_fmm(traversal, expansion_wrangler, src_weights, num_particles_per_block=10000, + hpx_options=[]): + """Distributed implementation of top-level driver routine for a fast + multipole calculation. + + :arg traversal: A :class:`boxtree.traversal.FMMTraversalInfo` instance. + :arg expansion_wrangler: An object exhibiting the + :class:`ExpansionWranglerInterface`. + :arg src_weights: Source 'density/weights/charges'. + Passed unmodified to *expansion_wrangler*. + :arg hpx_options: Options for HPX runtime. Pass directly to hpx.init. + + Returns the potentials computed by *expansion_wrangler*. + """ + wrangler = expansion_wrangler + logger.info("start fmm") + + logger.debug("reorder source weights") + src_weights = wrangler.reorder_sources(src_weights) + + logger.debug("start hpx runtime") + hpx.init(argv=hpx_options) + + # launch the main action + sources = np.stack([wrangler.tree.sources[0], wrangler.tree.sources[1]], + axis=-1) + num_particles = sources.shape[0] + potentials = hpx.run(main, sources, num_particles_per_block, + shape=(num_particles,), dtype=float) + + logger.debug("finalize hpx runtime") + hpx.finalize() + + return potentials + -- GitLab From e11f35fd0c38834eca61587e82ba620f7b24443e Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 16 Oct 2017 23:55:34 -0500 Subject: [PATCH 002/260] Add a test script for distributed FMM so that the result can compare --- test/test_dfmm.py | 70 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 test/test_dfmm.py diff --git a/test/test_dfmm.py b/test/test_dfmm.py new file mode 100644 index 0000000..56db48d --- /dev/null +++ b/test/test_dfmm.py @@ -0,0 +1,70 @@ +import numpy as np +import sys +from mpi4py import MPI + +# Get MPI information +comm = MPI.COMM_WORLD +rank = comm.Get_rank() + +# Parameters +dims = 2 +nsources = 3000 +ntargets = 1000 +dtype = np.float64 + +# Generate particles and run shared-memory parallelism on rank 0 +if rank == 0: + # Configure PyOpenCL + import pyopencl as cl + ctx = cl.create_some_context() + queue = cl.CommandQueue(ctx) + + # Generate random particles and source weights + from boxtree.tools import make_normal_particle_array as p_normal + sources = p_normal(queue, nsources, dims, dtype, seed=15) + targets = p_normal(queue, ntargets, dims, dtype, seed=18) + np.array([2, 0, 0])[:dims] + + from boxtree.tools import particle_array_to_host + sources_host = particle_array_to_host(sources) + targets_host = particle_array_to_host(targets) + + from pyopencl.clrandom import PhiloxGenerator + rng = PhiloxGenerator(queue.context, seed=20) + sources_weights = rng.uniform(queue, nsources, dtype=np.float64).get() + + # Display sources and targets + if "--display" in sys.argv: + import matplotlib.pyplot as plt + plt.plot(sources_host[:, 0], sources_host[:, 1], "bo") + plt.plot(targets_host[:, 0], targets_host[:, 1], "ro") + plt.show() + + # Calculate potentials in naive algorithm + import numpy.linalg as la + distances = la.norm(sources_host.reshape(1, nsources, 2) - \ + targets_host.reshape(ntargets, 1, 2), + ord=2, axis=2) + pot_naive = np.sum(-np.log(distances)*sources_weights, axis=1) + + # Build the tree and interaction lists + from boxtree import TreeBuilder + tb = TreeBuilder(ctx) + tree, _ = tb(queue, sources, targets=targets, max_particles_in_box=30, debug=True) + + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(ctx) + trav, _ = tg(queue, tree, debug=True) + trav = trav.get(queue=queue) + + # Get pyfmmlib expansion wrangler + from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler + def fmm_level_to_nterms(tree, level): + return 20 + wrangler = FMMLibExpansionWrangler(trav.tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) + + # Compute FMM using shared memory parallelism + from boxtree.fmm import drive_fmm + pot_fmm = drive_fmm(trav, wrangler, sources_weights)* 2 * np.pi + print(la.norm(pot_fmm - pot_naive, ord=2)) + +# Next: Compute FMM using distributed memory parallelism -- GitLab From 74e58373f2cd1e155838b4b886e8877a5329d089 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 17 Oct 2017 12:31:42 -0500 Subject: [PATCH 003/260] Distribute source particles across all ranks --- boxtree/dfmm.py | 97 +++++++++++++++++++++++-------------------------- 1 file changed, 46 insertions(+), 51 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index 79bc426..aace408 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -26,63 +26,58 @@ THE SOFTWARE. import logging logger = logging.getLogger(__name__) +from mpi4py import MPI import numpy as np -import hpx -@hpx.create_action() -def main(sources, num_particles_per_block): +def drive_dfmm(traversal, expansion_wrangler, src_weights): - # {{{ Distribute source array - - num_particles = sources.shape[0] - import math - num_block = math.ceil(num_particles / num_particles_per_block) - d_sources = hpx.GlobalMemory.alloc_cyclic(num_block, - (num_particles_per_block, 2), sources.dtype) - finished_copy = hpx.And(num_block) - for i in range(num_block): - d_sources[i].set(sources[i*num_particles_per_block : (i+1)*num_particles_per_block], - sync='async', rsync_lco=finished_copy) - finished_copy.wait() + # {{{ Get MPI information - # }}} + comm = MPI.COMM_WORLD + current_rank = comm.Get_rank() + total_rank = comm.Get_size() - # WIP: this is a placeholder - potentials = np.empty((num_particles,), dtype=float) - hpx.exit(array=potentials) - -def ddrive_fmm(traversal, expansion_wrangler, src_weights, num_particles_per_block=10000, - hpx_options=[]): - """Distributed implementation of top-level driver routine for a fast - multipole calculation. - - :arg traversal: A :class:`boxtree.traversal.FMMTraversalInfo` instance. - :arg expansion_wrangler: An object exhibiting the - :class:`ExpansionWranglerInterface`. - :arg src_weights: Source 'density/weights/charges'. - Passed unmodified to *expansion_wrangler*. - :arg hpx_options: Options for HPX runtime. Pass directly to hpx.init. - - Returns the potentials computed by *expansion_wrangler*. - """ - wrangler = expansion_wrangler - logger.info("start fmm") - - logger.debug("reorder source weights") - src_weights = wrangler.reorder_sources(src_weights) - - logger.debug("start hpx runtime") - hpx.init(argv=hpx_options) + # }}} - # launch the main action - sources = np.stack([wrangler.tree.sources[0], wrangler.tree.sources[1]], - axis=-1) - num_particles = sources.shape[0] - potentials = hpx.run(main, sources, num_particles_per_block, - shape=(num_particles,), dtype=float) + # {{{ Distribute problem parameters - logger.debug("finalize hpx runtime") - hpx.finalize() + if current_rank == 0: + tree = traversal.tree + parameters = {"nsources":tree.nsources, + "dimensions":tree.sources.shape[0], + "coord_dtype":tree.coord_dtype} + else: + parameters = None + parameters = comm.bcast(parameters, root=0) + + # }}} - return potentials + # {{{ Distribute source particles + num_sources_per_rank = (parameters["nsources"] + total_rank - 1) // total_rank + sources = [] + + for i in range(parameters["dimensions"]): + # Prepare send buffer + if current_rank == 0: + sendbuf = np.empty((num_sources_per_rank * total_rank,), + dtype=parameters['coord_dtype']) + sendbuf[:parameters["nsources"]] = tree.sources[i] + else: + sendbuf = None + + # Prepare receive buffer + recvbuf = np.empty((num_sources_per_rank,), dtype=parameters['coord_dtype']) + + # Scatter send buffer + comm.Scatter(sendbuf, recvbuf, root=0) + + # Trim the receive buffer for the last rank + if current_rank == total_rank - 1: + num_sources_current_rank = parameters["nsources"] - \ + num_sources_per_rank * (total_rank - 1) + sources.append(recvbuf[:num_sources_current_rank]) + else: + sources.append(recvbuf) + + # }}} -- GitLab From 2fb275099607ce65ed23a7477f8354ec2a68b20d Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 18 Oct 2017 22:07:07 -0500 Subject: [PATCH 004/260] Update test script --- test/test_dfmm.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/test/test_dfmm.py b/test/test_dfmm.py index 56db48d..2a6c724 100644 --- a/test/test_dfmm.py +++ b/test/test_dfmm.py @@ -2,16 +2,21 @@ import numpy as np import sys from mpi4py import MPI -# Get MPI information -comm = MPI.COMM_WORLD -rank = comm.Get_rank() - # Parameters dims = 2 -nsources = 3000 -ntargets = 1000 +nsources = 30 +ntargets = 10 dtype = np.float64 +# Get the current rank +comm = MPI.COMM_WORLD +rank = comm.Get_rank() + +# Initialization +trav = None +sources_weights = None +wrangler = None + # Generate particles and run shared-memory parallelism on rank 0 if rank == 0: # Configure PyOpenCL @@ -39,7 +44,7 @@ if rank == 0: plt.plot(targets_host[:, 0], targets_host[:, 1], "ro") plt.show() - # Calculate potentials in naive algorithm + # Calculate potentials using direct evaluation import numpy.linalg as la distances = la.norm(sources_host.reshape(1, nsources, 2) - \ targets_host.reshape(ntargets, 1, 2), @@ -67,4 +72,9 @@ if rank == 0: pot_fmm = drive_fmm(trav, wrangler, sources_weights)* 2 * np.pi print(la.norm(pot_fmm - pot_naive, ord=2)) -# Next: Compute FMM using distributed memory parallelism +# Compute FMM using distributed memory parallelism +from boxtree.dfmm import drive_dfmm +# Note: The drive_dfmm interface works as follows: +# Rank 0 passes the correct trav, wrangler, and sources_weights +# All other ranks pass None to these arguments +pot_dfmm = drive_dfmm(trav, wrangler, sources_weights) -- GitLab From a55da65a2e64da27c5ca32ea40a078b839eb71cc Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 22 Oct 2017 20:01:51 -0500 Subject: [PATCH 005/260] Construct ancestor mask for all boxes in a rank --- boxtree/dfmm.py | 81 ++++++++++++++++++++++++++++++------------------- 1 file changed, 50 insertions(+), 31 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index aace408..35261f0 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -39,45 +39,64 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): # }}} - # {{{ Distribute problem parameters + # {{{ Distribute tree parameters if current_rank == 0: tree = traversal.tree - parameters = {"nsources":tree.nsources, + # TODO: distribute more parameters of the tree + parameters = {"sources_are_targets": tree.sources_are_targets, + "sources_have_extent": tree.sources_have_extent, + "nsources":tree.nsources, + "nboxes":tree.box_source_starts.shape[0], "dimensions":tree.sources.shape[0], - "coord_dtype":tree.coord_dtype} + "coord_dtype":tree.coord_dtype, + "box_id_dtype":tree.box_id_dtype} else: parameters = None parameters = comm.bcast(parameters, root=0) # }}} - # {{{ Distribute source particles - - num_sources_per_rank = (parameters["nsources"] + total_rank - 1) // total_rank - sources = [] - - for i in range(parameters["dimensions"]): - # Prepare send buffer - if current_rank == 0: - sendbuf = np.empty((num_sources_per_rank * total_rank,), - dtype=parameters['coord_dtype']) - sendbuf[:parameters["nsources"]] = tree.sources[i] - else: - sendbuf = None - - # Prepare receive buffer - recvbuf = np.empty((num_sources_per_rank,), dtype=parameters['coord_dtype']) - - # Scatter send buffer - comm.Scatter(sendbuf, recvbuf, root=0) - - # Trim the receive buffer for the last rank - if current_rank == total_rank - 1: - num_sources_current_rank = parameters["nsources"] - \ - num_sources_per_rank * (total_rank - 1) - sources.append(recvbuf[:num_sources_current_rank]) - else: - sources.append(recvbuf) - + # {{{ Fill tree parameters to the locally essentail tree + + from boxtree import Tree + letree = Tree() + # TODO: add more parameters to the locally essential tree + letree.sources_are_targets = parameters["sources_are_targets"] + + # }}} + + # {{{ Construct locally essential tree mask for each rank + + # Problem: Current implementation divides all boxes with targets evenly across all + # ranks. This scheme is subject to significant load imbalance. A better way to do + # this is to assign a weight to each box according to its interaction list, and then + # divides boxes evenly by the total weights. + + if current_rank == 0: + # mask[i][j] is true iff box j is in the locally essential tree of rank i + mask = np.zeros((total_rank, parameters["nboxes"]), dtype=bool) + target_boxes = traversal.target_boxes + num_boxes_per_rank = (len(target_boxes) + total_rank - 1) // total_rank + + for i in range(total_rank): + # Get the start and end box index for rank i + box_start_idx = num_boxes_per_rank * i + if current_rank == total_rank - 1: + box_end_idx = len(target_boxes) + else: + box_end_idx = num_boxes_per_rank * (i + 1) + + # Mark all ancestors of boxes of rank i + new_mask = np.zeros(parameters["nboxes"], dtype=bool) + new_mask[target_boxes[box_start_idx:box_end_idx]] = True + while np.count_nonzero(new_mask) != 0: + np.logical_or(mask[i, :], new_mask, out=mask[i, :]) + new_mask_idx = np.nonzero(new_mask) + new_mask_parent_idx = tree.box_parent_ids[new_mask_idx] + new_mask[:] = False + new_mask[new_mask_parent_idx] = True + new_mask = np.logical_and(new_mask, np.logical_not(mask[i, :]), + out=new_mask) + # }}} -- GitLab From e254f9472c0c1af1d8e9a408ad78a7ba1215cea2 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 23 Oct 2017 13:49:05 -0500 Subject: [PATCH 006/260] Generate masks for list 1 and list 2 --- boxtree/dfmm.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index 35261f0..ebd8350 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -68,7 +68,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): # {{{ Construct locally essential tree mask for each rank - # Problem: Current implementation divides all boxes with targets evenly across all + # Problem: Current implementation divides all boxes evenly across all # ranks. This scheme is subject to significant load imbalance. A better way to do # this is to assign a weight to each box according to its interaction list, and then # divides boxes evenly by the total weights. @@ -76,20 +76,19 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): if current_rank == 0: # mask[i][j] is true iff box j is in the locally essential tree of rank i mask = np.zeros((total_rank, parameters["nboxes"]), dtype=bool) - target_boxes = traversal.target_boxes - num_boxes_per_rank = (len(target_boxes) + total_rank - 1) // total_rank + num_boxes_per_rank = (parameters["nboxes"] + total_rank - 1) // total_rank for i in range(total_rank): # Get the start and end box index for rank i box_start_idx = num_boxes_per_rank * i if current_rank == total_rank - 1: - box_end_idx = len(target_boxes) + box_end_idx = parameters["nboxes"] else: box_end_idx = num_boxes_per_rank * (i + 1) # Mark all ancestors of boxes of rank i new_mask = np.zeros(parameters["nboxes"], dtype=bool) - new_mask[target_boxes[box_start_idx:box_end_idx]] = True + new_mask[box_start_idx:box_end_idx] = True while np.count_nonzero(new_mask) != 0: np.logical_or(mask[i, :], new_mask, out=mask[i, :]) new_mask_idx = np.nonzero(new_mask) @@ -98,5 +97,21 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): new_mask[new_mask_parent_idx] = True new_mask = np.logical_and(new_mask, np.logical_not(mask[i, :]), out=new_mask) + + # Generate interaction list mask for mask[i, :] + interaction_mask = np.zeros(parameters["nboxes"], dtype=bool) + box_indices = np.nonzero(mask[i, :]) + for j in range(len(box_indices)): + box_index = box_indices[j] + # List 1 + start, end = traversal.neighbor_source_boxes_starts[box_index:box_index + 2] + list1_idx = traversal.neighbor_source_boxes_lists[start:end] + interaction_mask[list1_idx] = True + # List 2 + start, end = traversal.from_sep_siblings_starts[box_index:box_index + 2] + list2_idx = traversal.from_sep_siblings_lists[start:end] + interaction_mask[list2_idx] = True + # List 3 + # }}} -- GitLab From cd5200b9a36741761ba289af0dc7b8891dea209c Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 30 Oct 2017 17:05:41 -0500 Subject: [PATCH 007/260] Distribute tranversal object without particles --- boxtree/dfmm.py | 90 +++++++++++++------------------------------------ 1 file changed, 23 insertions(+), 67 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index ebd8350..fe84e9f 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -31,7 +31,7 @@ import numpy as np def drive_dfmm(traversal, expansion_wrangler, src_weights): - # {{{ Get MPI information + # {{{ Get MPI information comm = MPI.COMM_WORLD current_rank = comm.Get_rank() @@ -39,79 +39,35 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): # }}} - # {{{ Distribute tree parameters + # {{{ Broadcast traversal object without particles if current_rank == 0: - tree = traversal.tree - # TODO: distribute more parameters of the tree - parameters = {"sources_are_targets": tree.sources_are_targets, - "sources_have_extent": tree.sources_have_extent, - "nsources":tree.nsources, - "nboxes":tree.box_source_starts.shape[0], - "dimensions":tree.sources.shape[0], - "coord_dtype":tree.coord_dtype, - "box_id_dtype":tree.box_id_dtype} + local_traversal = traversal.copy() + local_tree = local_traversal.tree + local_tree.sources = None + if local_tree.sources_have_extent == True: + local_tree.source_radii = None + local_tree.targets = None + if local_tree.targets_have_extent == True: + local_tree.target_radii = None + local_tree.user_source_ids = None + local_tree.sorted_target_ids = None else: - parameters = None - parameters = comm.bcast(parameters, root=0) - - # }}} - - # {{{ Fill tree parameters to the locally essentail tree + local_traversal = None - from boxtree import Tree - letree = Tree() - # TODO: add more parameters to the locally essential tree - letree.sources_are_targets = parameters["sources_are_targets"] + comm.bcast(local_traversal, root=0) # }}} - # {{{ Construct locally essential tree mask for each rank + # {{{ Generate an array which contains responsible box indices - # Problem: Current implementation divides all boxes evenly across all - # ranks. This scheme is subject to significant load imbalance. A better way to do - # this is to assign a weight to each box according to its interaction list, and then - # divides boxes evenly by the total weights. - - if current_rank == 0: - # mask[i][j] is true iff box j is in the locally essential tree of rank i - mask = np.zeros((total_rank, parameters["nboxes"]), dtype=bool) - num_boxes_per_rank = (parameters["nboxes"] + total_rank - 1) // total_rank - - for i in range(total_rank): - # Get the start and end box index for rank i - box_start_idx = num_boxes_per_rank * i - if current_rank == total_rank - 1: - box_end_idx = parameters["nboxes"] - else: - box_end_idx = num_boxes_per_rank * (i + 1) - - # Mark all ancestors of boxes of rank i - new_mask = np.zeros(parameters["nboxes"], dtype=bool) - new_mask[box_start_idx:box_end_idx] = True - while np.count_nonzero(new_mask) != 0: - np.logical_or(mask[i, :], new_mask, out=mask[i, :]) - new_mask_idx = np.nonzero(new_mask) - new_mask_parent_idx = tree.box_parent_ids[new_mask_idx] - new_mask[:] = False - new_mask[new_mask_parent_idx] = True - new_mask = np.logical_and(new_mask, np.logical_not(mask[i, :]), - out=new_mask) - - # Generate interaction list mask for mask[i, :] - interaction_mask = np.zeros(parameters["nboxes"], dtype=bool) - box_indices = np.nonzero(mask[i, :]) - for j in range(len(box_indices)): - box_index = box_indices[j] - # List 1 - start, end = traversal.neighbor_source_boxes_starts[box_index:box_index + 2] - list1_idx = traversal.neighbor_source_boxes_lists[start:end] - interaction_mask[list1_idx] = True - # List 2 - start, end = traversal.from_sep_siblings_starts[box_index:box_index + 2] - list2_idx = traversal.from_sep_siblings_lists[start:end] - interaction_mask[list2_idx] = True - # List 3 - + num_boxes = local_traversal.tree.box_source_starts.shape[0] + num_responsible_boxes_per_rank = (num_boxes + total_rank - 1) // total_rank + if current_rank == total_rank - 1: + responsible_boxes = np.arange(num_responsible_boxes_per_rank * current_rank, + num_boxes, dtype=box_id_dtype) + else: + responsible_boxes = np.arange(num_responsible_boxes_per_rank * current_rank, + num_responsible_boxes_per_rank * (current_rank + 1), dtype=box_id_dtype) # }}} -- GitLab From 25fabba7c8c7bd6ffa38309d0f122462945a827d Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 31 Oct 2017 22:54:50 -0500 Subject: [PATCH 008/260] Generate responsible source mask for each rank --- boxtree/dfmm.py | 112 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 80 insertions(+), 32 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index fe84e9f..a7fa23a 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -28,6 +28,53 @@ logger = logging.getLogger(__name__) from mpi4py import MPI import numpy as np +import pyopencl as cl +from mako.template import Template +from pyopencl.tools import dtype_to_ctype + + +def partition_work(tree, total_rank, queue): + # This function returns a list of total_rank elements, where element i is a + # pyopencl array of indices of process i's responsible boxes. + responsible_boxes = [] + num_boxes = tree.box_source_starts.shape[0] + num_boxes_per_rank = (num_boxes + total_rank - 1) // total_rank + for current_rank in range(total_rank): + if current_rank == total_rank - 1: + responsible_boxes.append(cl.array.arange( + queue, + num_boxes_per_rank * current_rank, + num_boxes, + dtype=tree.box_id_dtype)) + else: + responsible_boxes.append(cl.array.arange( + queue, + num_boxes_per_rank * current_rank, + num_boxes_per_rank * (current_rank + 1), + dtype=tree.box_id_dtype)) + return responsible_boxes + + +# gen_particle_mask takes the responsible box indices as input and generate a mask +# for responsible particles. +gen_particle_mask_tpl = Template(r""" +typedef ${dtype_to_ctype(tree.box_id_dtype)} box_id_t; +typedef ${dtype_to_ctype(tree.particle_id_dtype)} particle_id_t; +typedef ${dtype_to_ctype(mask_dtype)} mask_t; +__kernel void generate_particle_mask(__global const box_id_t *res_boxes, + __global const particle_id_t *box_particle_starts, + __global const particle_id_t *box_particle_counts_nonchild, + __global mask_t *particle_mask) +{ + int gid = get_global_id(0); + for(particle_id_t i = box_particle_starts[gid]; + i < box_particle_starts[gid] + box_particle_counts_nonchild[gid]; + i++) { + particle_mask[i] = 1; + } +} +""", strict_undefined=True) + def drive_dfmm(traversal, expansion_wrangler, src_weights): @@ -38,36 +85,37 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): total_rank = comm.Get_size() # }}} - - # {{{ Broadcast traversal object without particles - + + ctx = cl.create_some_context() + queue = cl.CommandQueue(ctx) + if current_rank == 0: - local_traversal = traversal.copy() - local_tree = local_traversal.tree - local_tree.sources = None - if local_tree.sources_have_extent == True: - local_tree.source_radii = None - local_tree.targets = None - if local_tree.targets_have_extent == True: - local_tree.target_radii = None - local_tree.user_source_ids = None - local_tree.sorted_target_ids = None - else: - local_traversal = None - - comm.bcast(local_traversal, root=0) - - # }}} - - # {{{ Generate an array which contains responsible box indices - - num_boxes = local_traversal.tree.box_source_starts.shape[0] - num_responsible_boxes_per_rank = (num_boxes + total_rank - 1) // total_rank - if current_rank == total_rank - 1: - responsible_boxes = np.arange(num_responsible_boxes_per_rank * current_rank, - num_boxes, dtype=box_id_dtype) - else: - responsible_boxes = np.arange(num_responsible_boxes_per_rank * current_rank, - num_responsible_boxes_per_rank * (current_rank + 1), dtype=box_id_dtype) - - # }}} + tree = traversal.tree + + # Partition the work across all ranks by allocating responsible boxes + responsible_boxes = partition_work(tree, total_rank, queue) + + # Convert tree structures to device memory + d_box_source_starts = cl.array.to_device(queue, tree.box_source_starts) + d_box_source_counts_nonchild = cl.array.to_device(queue, + tree.box_source_counts_nonchild) + + # Generate particle mask program + mask_dtype = np.dtype(np.int8) + gen_particle_mask_prg = cl.Program(ctx, gen_particle_mask_tpl.render( + tree=tree, + dtype_to_ctype=dtype_to_ctype, + mask_dtype=mask_dtype)).build() + + for rank in range(total_rank): + d_source_mask = cl.array.zeros(queue, (tree.nsources,), + dtype=mask_dtype) + gen_particle_mask_prg.generate_particle_mask( + queue, + responsible_boxes[rank].shape, + None, + responsible_boxes[rank].data, + d_box_source_starts.data, + d_box_source_counts_nonchild.data, + d_source_mask.data + ) -- GitLab From 96c7623c8a7b16794ff1b287951cf28074619834 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 1 Nov 2017 01:02:51 -0500 Subject: [PATCH 009/260] Generate the scan of mask array --- boxtree/dfmm.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index a7fa23a..50a40da 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -31,6 +31,7 @@ import numpy as np import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype +from pyopencl.scan import GenericScanKernel def partition_work(tree, total_rank, queue): @@ -67,8 +68,9 @@ __kernel void generate_particle_mask(__global const box_id_t *res_boxes, __global mask_t *particle_mask) { int gid = get_global_id(0); - for(particle_id_t i = box_particle_starts[gid]; - i < box_particle_starts[gid] + box_particle_counts_nonchild[gid]; + box_id_t cur_box = res_boxes[gid]; + for(particle_id_t i = box_particle_starts[cur_box]; + i < box_particle_starts[cur_box] + box_particle_counts_nonchild[cur_box]; i++) { particle_mask[i] = 1; } @@ -101,13 +103,23 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): tree.box_source_counts_nonchild) # Generate particle mask program - mask_dtype = np.dtype(np.int8) + mask_dtype = tree.particle_id_dtype gen_particle_mask_prg = cl.Program(ctx, gen_particle_mask_tpl.render( tree=tree, dtype_to_ctype=dtype_to_ctype, mask_dtype=mask_dtype)).build() + # Construct mask scan kernel + arg_tpl = Template(r"__global ${mask_t} *ary, __global ${mask_t} *out") + mask_scan_knl = GenericScanKernel( + ctx, mask_dtype, + arguments=arg_tpl.render(mask_t=dtype_to_ctype(mask_dtype)), + input_expr="ary[i]", + scan_expr="a+b", neutral="0", + output_statement="out[i] = item;") + for rank in range(total_rank): + # Generate the particle mask array d_source_mask = cl.array.zeros(queue, (tree.nsources,), dtype=mask_dtype) gen_particle_mask_prg.generate_particle_mask( @@ -119,3 +131,10 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): d_box_source_counts_nonchild.data, d_source_mask.data ) + + # Generate the scan of the particle mask array + d_source_scan = cl.array.empty(queue, (tree.nsources,), + dtype=tree.particle_id_dtype) + mask_scan_knl(d_source_mask, d_source_scan) + + l_nsources = d_source_scan[-1].get(queue) -- GitLab From b8afa4520874eb316ce7ac7dc76cb5b4ee0cd3f9 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 1 Nov 2017 21:11:18 -0500 Subject: [PATCH 010/260] Add generate_local_particles kernel --- boxtree/dfmm.py | 81 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 66 insertions(+), 15 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index 50a40da..a543b49 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -56,25 +56,71 @@ def partition_work(tree, total_rank, queue): return responsible_boxes -# gen_particle_mask takes the responsible box indices as input and generate a mask -# for responsible particles. -gen_particle_mask_tpl = Template(r""" +gen_local_tree_tpl = Template(r""" typedef ${dtype_to_ctype(tree.box_id_dtype)} box_id_t; typedef ${dtype_to_ctype(tree.particle_id_dtype)} particle_id_t; typedef ${dtype_to_ctype(mask_dtype)} mask_t; -__kernel void generate_particle_mask(__global const box_id_t *res_boxes, +typedef ${dtype_to_ctype(tree.coord_dtype)} coord_t; + +__kernel void generate_particle_mask( + __global const box_id_t *res_boxes, __global const particle_id_t *box_particle_starts, __global const particle_id_t *box_particle_counts_nonchild, + const int total_num_res_boxes, __global mask_t *particle_mask) { + /* generate_particle_mask takes the responsible box indices as input and generate + * a mask for responsible particles. + */ + int gid = get_global_id(0); + int gsize = get_global_size(0); + int num_res_boxes = (total_num_res_boxes + gsize - 1) / gsize; + box_id_t res_boxes_start = num_res_boxes * gid; + + for(box_id_t cur_box = res_boxes_start; + cur_box < res_boxes_start + num_res_boxes && cur_box < total_num_res_boxes; + cur_box ++) + { + for(particle_id_t i = box_particle_starts[cur_box]; + i < box_particle_starts[cur_box] + box_particle_counts_nonchild[cur_box]; + i++) + { + particle_mask[i] = 1; + } + } +} + +__kernel void generate_local_particles( + const int total_num_particles, + const int total_num_local_particles, + __global const coord_t *particles, + __global const mask_t *particle_mask, + __global const mask_t *particle_scan, + __global coord_t *local_particles) +{ + /* generate_local_particles generates an array of particles for which a process + * is responsible for. + */ int gid = get_global_id(0); - box_id_t cur_box = res_boxes[gid]; - for(particle_id_t i = box_particle_starts[cur_box]; - i < box_particle_starts[cur_box] + box_particle_counts_nonchild[cur_box]; - i++) { - particle_mask[i] = 1; + int gsize = get_global_size(0); + int num_particles = (total_num_particles + gsize - 1) / gsize; + particle_id_t start = num_particles * gid; + + for(particle_id_t i = start; + i < start + num_particles && i < total_num_particles; + i++) + { + if(particle_mask[i]) + { + particle_id_t des = particle_scan[i]; + % for dim in range(ndims): + local_particles[total_num_local_particles * ${dim} + des] + = particles[total_num_particles * ${dim} + i]; + % endfor + } } } + """, strict_undefined=True) @@ -93,6 +139,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): if current_rank == 0: tree = traversal.tree + ndims = tree.sources.shape[0] # Partition the work across all ranks by allocating responsible boxes responsible_boxes = partition_work(tree, total_rank, queue) @@ -102,12 +149,13 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): d_box_source_counts_nonchild = cl.array.to_device(queue, tree.box_source_counts_nonchild) - # Generate particle mask program + # Compile the program mask_dtype = tree.particle_id_dtype - gen_particle_mask_prg = cl.Program(ctx, gen_particle_mask_tpl.render( + gen_local_tree_prg = cl.Program(ctx, gen_local_tree_tpl.render( tree=tree, dtype_to_ctype=dtype_to_ctype, - mask_dtype=mask_dtype)).build() + mask_dtype=mask_dtype, + ndims=ndims)).build() # Construct mask scan kernel arg_tpl = Template(r"__global ${mask_t} *ary, __global ${mask_t} *out") @@ -122,13 +170,14 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): # Generate the particle mask array d_source_mask = cl.array.zeros(queue, (tree.nsources,), dtype=mask_dtype) - gen_particle_mask_prg.generate_particle_mask( + gen_local_tree_prg.generate_particle_mask( queue, - responsible_boxes[rank].shape, + (2048,), None, responsible_boxes[rank].data, d_box_source_starts.data, d_box_source_counts_nonchild.data, + np.int32(responsible_boxes[rank].shape[0]), d_source_mask.data ) @@ -137,4 +186,6 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): dtype=tree.particle_id_dtype) mask_scan_knl(d_source_mask, d_source_scan) - l_nsources = d_source_scan[-1].get(queue) + local_nsources = d_source_scan[-1].get(queue) + local_sources = cl.array.empty(queue, (ndims, local_nsources), + dtype=tree.coord_dtype) -- GitLab From be24ffc638594d11471c393407e09a9305169324 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 7 Nov 2017 18:41:27 -0600 Subject: [PATCH 011/260] Generate local particles --- boxtree/dfmm.py | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index a543b49..c8189ba 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -144,10 +144,13 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): # Partition the work across all ranks by allocating responsible boxes responsible_boxes = partition_work(tree, total_rank, queue) - # Convert tree structures to device memory + # Put tree structures to device memory d_box_source_starts = cl.array.to_device(queue, tree.box_source_starts) d_box_source_counts_nonchild = cl.array.to_device(queue, tree.box_source_counts_nonchild) + d_sources = np.empty((ndims,), dtype=object) + for i in range(ndims): + d_sources[i] = cl.array.to_device(queue, tree.sources[i]) # Compile the program mask_dtype = tree.particle_id_dtype @@ -171,21 +174,31 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): d_source_mask = cl.array.zeros(queue, (tree.nsources,), dtype=mask_dtype) gen_local_tree_prg.generate_particle_mask( - queue, - (2048,), - None, + queue, (2048,), None, responsible_boxes[rank].data, d_box_source_starts.data, d_box_source_counts_nonchild.data, np.int32(responsible_boxes[rank].shape[0]), - d_source_mask.data - ) + d_source_mask.data) # Generate the scan of the particle mask array d_source_scan = cl.array.empty(queue, (tree.nsources,), dtype=tree.particle_id_dtype) mask_scan_knl(d_source_mask, d_source_scan) + # Generate sources of rank's local tree local_nsources = d_source_scan[-1].get(queue) - local_sources = cl.array.empty(queue, (ndims, local_nsources), - dtype=tree.coord_dtype) + d_local_sources = np.empty((local_nsources,), dtype=object) + for i in range(ndims): + cur_local_sources = cl.array.empty(queue, (local_nsources), + dtype=tree.coord_dtype) + gen_local_tree_prg.generate_local_particles( + queue, (2048,), None, + np.int32(tree.nsources), + np.int32(local_nsources), + d_sources[i].data, + d_source_mask.data, + d_source_scan.data, + cur_local_sources.data) + + d_local_sources[i] = cur_local_sources -- GitLab From 3dce98b9f3af16b311375dfaca020202766cc8e9 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 7 Nov 2017 23:33:45 -0600 Subject: [PATCH 012/260] Refactor local tree particle generation --- boxtree/dfmm.py | 80 ++++++++++++++++++++++++++++++------------------- 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index c8189ba..438d447 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -123,7 +123,6 @@ __kernel void generate_local_particles( """, strict_undefined=True) - def drive_dfmm(traversal, expansion_wrangler, src_weights): # {{{ Get MPI information @@ -144,14 +143,6 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): # Partition the work across all ranks by allocating responsible boxes responsible_boxes = partition_work(tree, total_rank, queue) - # Put tree structures to device memory - d_box_source_starts = cl.array.to_device(queue, tree.box_source_starts) - d_box_source_counts_nonchild = cl.array.to_device(queue, - tree.box_source_counts_nonchild) - d_sources = np.empty((ndims,), dtype=object) - for i in range(ndims): - d_sources[i] = cl.array.to_device(queue, tree.sources[i]) - # Compile the program mask_dtype = tree.particle_id_dtype gen_local_tree_prg = cl.Program(ctx, gen_local_tree_tpl.render( @@ -169,36 +160,63 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): scan_expr="a+b", neutral="0", output_statement="out[i] = item;") - for rank in range(total_rank): + + def gen_local_particles(rank, particles, nparticles, + box_particle_starts, + box_particle_counts_nonchild): + """ + This helper function generates the sources/targets related fields for + a local tree + """ + # Put particle structures to device memory + d_box_particle_starts = cl.array.to_device(queue, box_particle_starts) + d_box_particle_counts_nonchild = cl.array.to_device( + queue, box_particle_counts_nonchild) + d_particles = np.empty((ndims,), dtype=object) + for i in range(ndims): + d_particles[i] = cl.array.to_device(queue, particles[i]) + # Generate the particle mask array - d_source_mask = cl.array.zeros(queue, (tree.nsources,), - dtype=mask_dtype) + d_particle_mask = cl.array.zeros( + queue, (nparticles,), dtype=mask_dtype + ) gen_local_tree_prg.generate_particle_mask( queue, (2048,), None, responsible_boxes[rank].data, - d_box_source_starts.data, - d_box_source_counts_nonchild.data, + d_box_particle_starts.data, + d_box_particle_counts_nonchild.data, np.int32(responsible_boxes[rank].shape[0]), - d_source_mask.data) + d_particle_mask.data) # Generate the scan of the particle mask array - d_source_scan = cl.array.empty(queue, (tree.nsources,), - dtype=tree.particle_id_dtype) - mask_scan_knl(d_source_mask, d_source_scan) + d_particle_scan = cl.array.empty(queue, (nparticles,), + dtype=tree.particle_id_dtype) + mask_scan_knl(d_particle_mask, d_particle_scan) - # Generate sources of rank's local tree - local_nsources = d_source_scan[-1].get(queue) - d_local_sources = np.empty((local_nsources,), dtype=object) + # Generate particles for rank's local tree + local_nparticles = d_particle_scan[-1].get(queue) + d_local_particles = np.empty((local_nparticles,), dtype=object) for i in range(ndims): - cur_local_sources = cl.array.empty(queue, (local_nsources), - dtype=tree.coord_dtype) + d_local_particles[i] = cl.array.empty(queue, (local_nparticles,), + dtype=tree.coord_dtype) gen_local_tree_prg.generate_local_particles( queue, (2048,), None, - np.int32(tree.nsources), - np.int32(local_nsources), - d_sources[i].data, - d_source_mask.data, - d_source_scan.data, - cur_local_sources.data) - - d_local_sources[i] = cur_local_sources + np.int32(nparticles), + np.int32(local_nparticles), + d_particles[i].data, + d_particle_mask.data, + d_particle_scan.data, + d_local_particles[i].data) + + return d_local_particles + + + for rank in range(total_rank): + d_local_sources = gen_local_particles(rank, tree.sources, tree.nsources, + tree.box_source_starts, + tree.box_source_counts_nonchild) + d_local_targets = gen_local_particles(rank, tree.targets, tree.ntargets, + tree.box_target_starts, + tree.box_target_counts_nonchild) + + -- GitLab From 060e88ba9349a15cf56714eedc6b4c8555aa08c7 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 8 Nov 2017 15:11:15 -0600 Subject: [PATCH 013/260] Tweak work group size and fix bugs --- boxtree/dfmm.py | 91 ++++++++++++++++++++++++------------------------- 1 file changed, 44 insertions(+), 47 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index 438d447..3128b50 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -69,22 +69,17 @@ __kernel void generate_particle_mask( const int total_num_res_boxes, __global mask_t *particle_mask) { - /* generate_particle_mask takes the responsible box indices as input and generate + /* + * generate_particle_mask takes the responsible box indices as input and generate * a mask for responsible particles. */ int gid = get_global_id(0); - int gsize = get_global_size(0); - int num_res_boxes = (total_num_res_boxes + gsize - 1) / gsize; - box_id_t res_boxes_start = num_res_boxes * gid; - for(box_id_t cur_box = res_boxes_start; - cur_box < res_boxes_start + num_res_boxes && cur_box < total_num_res_boxes; - cur_box ++) - { + if(gid < total_num_res_boxes) { + box_id_t cur_box = res_boxes[gid]; for(particle_id_t i = box_particle_starts[cur_box]; i < box_particle_starts[cur_box] + box_particle_counts_nonchild[cur_box]; - i++) - { + i++) { particle_mask[i] = 1; } } @@ -92,32 +87,28 @@ __kernel void generate_particle_mask( __kernel void generate_local_particles( const int total_num_particles, - const int total_num_local_particles, - __global const coord_t *particles, + % for dim in range(ndims): + __global const coord_t *particles_${dim}, + % endfor __global const mask_t *particle_mask, - __global const mask_t *particle_scan, - __global coord_t *local_particles) + __global const mask_t *particle_scan + % for dim in range(ndims): + , __global coord_t *local_particles_${dim} + % endfor +) { - /* generate_local_particles generates an array of particles for which a process + /* + * generate_local_particles generates an array of particles for which a process * is responsible for. */ int gid = get_global_id(0); - int gsize = get_global_size(0); - int num_particles = (total_num_particles + gsize - 1) / gsize; - particle_id_t start = num_particles * gid; - for(particle_id_t i = start; - i < start + num_particles && i < total_num_particles; - i++) + if(gid < total_num_particles && particle_mask[gid]) { - if(particle_mask[i]) - { - particle_id_t des = particle_scan[i]; - % for dim in range(ndims): - local_particles[total_num_local_particles * ${dim} + des] - = particles[total_num_particles * ${dim} + i]; - % endfor - } + particle_id_t des = particle_scan[gid]; + % for dim in range(ndims): + local_particles_${dim}[des - 1] = particles_${dim}[gid]; + % endfor } } @@ -135,7 +126,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) - + if current_rank == 0: tree = traversal.tree ndims = tree.sources.shape[0] @@ -160,7 +151,6 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): scan_expr="a+b", neutral="0", output_statement="out[i] = item;") - def gen_local_particles(rank, particles, nparticles, box_particle_starts, box_particle_counts_nonchild): @@ -177,16 +167,16 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): d_particles[i] = cl.array.to_device(queue, particles[i]) # Generate the particle mask array - d_particle_mask = cl.array.zeros( - queue, (nparticles,), dtype=mask_dtype - ) + d_particle_mask = cl.array.zeros(queue, (nparticles,), dtype=mask_dtype) + num_responsible_boxes = responsible_boxes[rank].shape[0] gen_local_tree_prg.generate_particle_mask( - queue, (2048,), None, + queue, ((num_responsible_boxes + 127)//128,), (128,), responsible_boxes[rank].data, d_box_particle_starts.data, d_box_particle_counts_nonchild.data, - np.int32(responsible_boxes[rank].shape[0]), - d_particle_mask.data) + np.int32(num_responsible_boxes), + d_particle_mask.data, + g_times_l=True) # Generate the scan of the particle mask array d_particle_scan = cl.array.empty(queue, (nparticles,), @@ -195,18 +185,26 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): # Generate particles for rank's local tree local_nparticles = d_particle_scan[-1].get(queue) - d_local_particles = np.empty((local_nparticles,), dtype=object) + d_local_particles = np.empty((ndims,), dtype=object) for i in range(ndims): d_local_particles[i] = cl.array.empty(queue, (local_nparticles,), dtype=tree.coord_dtype) - gen_local_tree_prg.generate_local_particles( - queue, (2048,), None, - np.int32(nparticles), - np.int32(local_nparticles), - d_particles[i].data, - d_particle_mask.data, - d_particle_scan.data, - d_local_particles[i].data) + + d_paticles_list = d_particles.tolist() + for i in range(ndims): + d_paticles_list[i] = d_paticles_list[i].data + d_local_particles_list = d_local_particles.tolist() + for i in range(ndims): + d_local_particles_list[i] = d_local_particles_list[i].data + + gen_local_tree_prg.generate_local_particles( + queue, ((nparticles + 127) // 128,), (128,), + np.int32(nparticles), + *d_paticles_list, + d_particle_mask.data, + d_particle_scan.data, + *d_local_particles_list, + g_times_l=True) return d_local_particles @@ -219,4 +217,3 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): tree.box_target_starts, tree.box_target_counts_nonchild) - -- GitLab From d1bcc2e76542dc53835ed21205a0824eb7bb8ff0 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 10 Nov 2017 09:30:56 -0600 Subject: [PATCH 014/260] Improve code quality and fix bugs (Thanks @inducer) --- boxtree/dfmm.py | 58 ++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index 3128b50..efe6e06 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -35,24 +35,27 @@ from pyopencl.scan import GenericScanKernel def partition_work(tree, total_rank, queue): - # This function returns a list of total_rank elements, where element i is a - # pyopencl array of indices of process i's responsible boxes. + """ This function returns a list of total_rank elements, where element i is a + pyopencl array of indices of process i's responsible boxes. + """ responsible_boxes = [] num_boxes = tree.box_source_starts.shape[0] - num_boxes_per_rank = (num_boxes + total_rank - 1) // total_rank - for current_rank in range(total_rank): - if current_rank == total_rank - 1: - responsible_boxes.append(cl.array.arange( - queue, - num_boxes_per_rank * current_rank, - num_boxes, - dtype=tree.box_id_dtype)) - else: - responsible_boxes.append(cl.array.arange( - queue, - num_boxes_per_rank * current_rank, - num_boxes_per_rank * (current_rank + 1), - dtype=tree.box_id_dtype)) + num_boxes_per_rank = num_boxes // total_rank + extra_boxes = num_boxes - num_boxes_per_rank * total_rank + start_idx = 0 + + for current_rank in range(extra_boxes): + end_idx = start_idx + num_boxes_per_rank + 1 + responsible_boxes.append(cl.array.arange(queue, start_idx, end_idx, + dtype=tree.box_id_dtype)) + start_idx = end_idx + + for current_rank in range(extra_boxes, num_boxes): + end_idx = start_idx + num_boxes_per_rank + responsible_boxes.append(cl.array.arange(queue, start_idx, end_idx, + dtype=tree.box_id_dtype)) + start_idx = end_idx + return responsible_boxes @@ -73,10 +76,10 @@ __kernel void generate_particle_mask( * generate_particle_mask takes the responsible box indices as input and generate * a mask for responsible particles. */ - int gid = get_global_id(0); + int res_box_idx = get_global_id(0); - if(gid < total_num_res_boxes) { - box_id_t cur_box = res_boxes[gid]; + if(res_box_idx < total_num_res_boxes) { + box_id_t cur_box = res_boxes[res_box_idx]; for(particle_id_t i = box_particle_starts[cur_box]; i < box_particle_starts[cur_box] + box_particle_counts_nonchild[cur_box]; i++) { @@ -101,28 +104,24 @@ __kernel void generate_local_particles( * generate_local_particles generates an array of particles for which a process * is responsible for. */ - int gid = get_global_id(0); + int particle_idx = get_global_id(0); - if(gid < total_num_particles && particle_mask[gid]) + if(particle_idx < total_num_particles && particle_mask[particle_idx]) { - particle_id_t des = particle_scan[gid]; + particle_id_t des = particle_scan[particle_idx]; % for dim in range(ndims): - local_particles_${dim}[des - 1] = particles_${dim}[gid]; + local_particles_${dim}[des - 1] = particles_${dim}[particle_idx]; % endfor } } """, strict_undefined=True) -def drive_dfmm(traversal, expansion_wrangler, src_weights): +def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): - # {{{ Get MPI information - - comm = MPI.COMM_WORLD + # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() - - # }}} ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) @@ -216,4 +215,3 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights): d_local_targets = gen_local_particles(rank, tree.targets, tree.ntargets, tree.box_target_starts, tree.box_target_counts_nonchild) - -- GitLab From 8766a8e7336d93ffadf3bdc4814d59626eb5ae97 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 10 Nov 2017 13:12:02 -0600 Subject: [PATCH 015/260] Generate box properties for the local tree --- boxtree/dfmm.py | 101 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 18 deletions(-) diff --git a/boxtree/dfmm.py b/boxtree/dfmm.py index efe6e06..d6934d7 100644 --- a/boxtree/dfmm.py +++ b/boxtree/dfmm.py @@ -31,7 +31,7 @@ import numpy as np import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype -from pyopencl.scan import GenericScanKernel +from pyopencl.scan import ExclusiveScanKernel def partition_work(tree, total_rank, queue): @@ -110,11 +110,10 @@ __kernel void generate_local_particles( { particle_id_t des = particle_scan[particle_idx]; % for dim in range(ndims): - local_particles_${dim}[des - 1] = particles_${dim}[particle_idx]; + local_particles_${dim}[des] = particles_${dim}[particle_idx]; % endfor } } - """, strict_undefined=True) def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): @@ -129,6 +128,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): if current_rank == 0: tree = traversal.tree ndims = tree.sources.shape[0] + nboxes = tree.box_source_starts.shape[0] # Partition the work across all ranks by allocating responsible boxes responsible_boxes = partition_work(tree, total_rank, queue) @@ -142,17 +142,15 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): ndims=ndims)).build() # Construct mask scan kernel - arg_tpl = Template(r"__global ${mask_t} *ary, __global ${mask_t} *out") - mask_scan_knl = GenericScanKernel( + mask_scan_knl = ExclusiveScanKernel( ctx, mask_dtype, - arguments=arg_tpl.render(mask_t=dtype_to_ctype(mask_dtype)), - input_expr="ary[i]", scan_expr="a+b", neutral="0", - output_statement="out[i] = item;") + ) - def gen_local_particles(rank, particles, nparticles, - box_particle_starts, - box_particle_counts_nonchild): + def gen_local_tree(rank, particles, nparticles, + box_particle_starts, + box_particle_counts_nonchild, + box_particle_counts_cumul): """ This helper function generates the sources/targets related fields for a local tree @@ -161,6 +159,8 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): d_box_particle_starts = cl.array.to_device(queue, box_particle_starts) d_box_particle_counts_nonchild = cl.array.to_device( queue, box_particle_counts_nonchild) + d_box_particle_counts_cumul = cl.array.to_device(queue, + box_particle_counts_cumul) d_particles = np.empty((ndims,), dtype=object) for i in range(ndims): d_particles[i] = cl.array.to_device(queue, particles[i]) @@ -183,7 +183,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): mask_scan_knl(d_particle_mask, d_particle_scan) # Generate particles for rank's local tree - local_nparticles = d_particle_scan[-1].get(queue) + local_nparticles = d_particle_scan[-1].get(queue) + 1 d_local_particles = np.empty((ndims,), dtype=object) for i in range(ndims): d_local_particles[i] = cl.array.empty(queue, (local_nparticles,), @@ -205,13 +205,78 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): *d_local_particles_list, g_times_l=True) + # Generate "box_particle_starts" of the local tree + l_box_particle_starts = cl.array.empty(queue, (nboxes,), + dtype=tree.particle_id_dtype) + generate_box_particle_starts = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${particle_id_t} *old_starts, + __global ${scan_t} *particle_scan, + __global ${particle_id_t} *new_starts + """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype), + scan_t=dtype_to_ctype(mask_dtype)), + "new_starts[i] = particle_scan[old_starts[i]]", + name="generate_box_particle_starts" + ) + + generate_box_particle_starts(d_box_particle_starts, d_particle_scan, + l_box_particle_starts) + + # Generate "box_particle_counts_nonchild" of the local tree + l_box_particle_counts_nonchild = cl.array.zeros(queue, (nboxes,), + dtype=tree.particle_id_dtype) + + generate_box_particle_counts_nonchild = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${box_id_t} *res_boxes, + __global ${particle_id_t} *old_counts_nonchild, + __global ${particle_id_t} *new_counts_nonchild + """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype), + particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), + "new_counts_nonchild[res_boxes[i]] = " + "old_counts_nonchild[res_boxes[i]]", + name="generate_box_particle_counts_nonchild" + ) + + generate_box_particle_counts_nonchild(responsible_boxes[rank], + d_box_particle_counts_nonchild, + l_box_particle_counts_nonchild) + + # Generate "box_particle_counts_cumul" + l_box_particle_counts_cumul = cl.array.empty(queue, (nboxes,), + dtype=tree.particle_id_dtype) + + generate_box_particle_counts_cumul = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${particle_id_t} *old_counts_cumul, + __global ${particle_id_t} *old_starts, + __global ${particle_id_t} *new_counts_cumul, + __global ${mask_t} *particle_scan + """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype), + mask_t=dtype_to_ctype(mask_dtype)), + "new_counts_cumul[i] = " + "particle_scan[old_starts[i] + old_counts_cumul[i]] - " + "particle_scan[old_starts[i]]", + name="generate_box_particle_counts_cumul" + ) + + generate_box_particle_counts_cumul(d_box_particle_counts_cumul, + d_box_particle_starts, + l_box_particle_counts_cumul, + d_particle_scan) + return d_local_particles for rank in range(total_rank): - d_local_sources = gen_local_particles(rank, tree.sources, tree.nsources, - tree.box_source_starts, - tree.box_source_counts_nonchild) - d_local_targets = gen_local_particles(rank, tree.targets, tree.ntargets, - tree.box_target_starts, - tree.box_target_counts_nonchild) + d_local_sources = gen_local_tree(rank, tree.sources, tree.nsources, + tree.box_source_starts, + tree.box_source_counts_nonchild, + tree.box_source_counts_cumul) + d_local_targets = gen_local_tree(rank, tree.targets, tree.ntargets, + tree.box_target_starts, + tree.box_target_counts_nonchild, + tree.box_source_counts_cumul) -- GitLab From 460fa7e23cb89041a04fddbea1912fd442b16b27 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 10 Nov 2017 13:17:36 -0600 Subject: [PATCH 016/260] Change filename to be more explicit --- boxtree/{dfmm.py => distributed.py} | 0 test/{test_dfmm.py => test_distributed.py} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename boxtree/{dfmm.py => distributed.py} (100%) rename test/{test_dfmm.py => test_distributed.py} (100%) diff --git a/boxtree/dfmm.py b/boxtree/distributed.py similarity index 100% rename from boxtree/dfmm.py rename to boxtree/distributed.py diff --git a/test/test_dfmm.py b/test/test_distributed.py similarity index 100% rename from test/test_dfmm.py rename to test/test_distributed.py -- GitLab From 4c4c9524118d0a50aea1f85fc6de58ed5a236a61 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 10 Nov 2017 15:02:30 -0600 Subject: [PATCH 017/260] Construct local tree and send to each rank --- boxtree/distributed.py | 63 +++++++++++++++++++++++++++++++--------- test/test_distributed.py | 6 ++-- 2 files changed, 52 insertions(+), 17 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index d6934d7..4bf6571 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -50,7 +50,7 @@ def partition_work(tree, total_rank, queue): dtype=tree.box_id_dtype)) start_idx = end_idx - for current_rank in range(extra_boxes, num_boxes): + for current_rank in range(extra_boxes, total_rank): end_idx = start_idx + num_boxes_per_rank responsible_boxes.append(cl.array.arange(queue, start_idx, end_idx, dtype=tree.box_id_dtype)) @@ -116,6 +116,7 @@ __kernel void generate_local_particles( } """, strict_undefined=True) + def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): # Get MPI information @@ -147,10 +148,10 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): scan_expr="a+b", neutral="0", ) - def gen_local_tree(rank, particles, nparticles, - box_particle_starts, - box_particle_counts_nonchild, - box_particle_counts_cumul): + def gen_local_particles(rank, particles, nparticles, + box_particle_starts, + box_particle_counts_nonchild, + box_particle_counts_cumul): """ This helper function generates the sources/targets related fields for a local tree @@ -268,15 +269,49 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): l_box_particle_counts_cumul, d_particle_scan) - return d_local_particles + local_particles = np.empty((ndims,), dtype=object) + for i in range(ndims): + local_particles[i] = d_local_particles[i].get() + local_box_particle_starts = d_box_particle_starts.get() + local_box_particle_counts_nonchild = d_box_particle_counts_nonchild.get() + local_box_particle_counts_cumul = d_box_particle_counts_cumul.get() + + return (local_particles, + local_box_particle_starts, + local_box_particle_counts_nonchild, + local_box_particle_counts_cumul) + local_tree = np.empty((total_rank,), dtype=object) + # request object for non-blocking communication + req = np.empty((total_rank,), dtype=object) for rank in range(total_rank): - d_local_sources = gen_local_tree(rank, tree.sources, tree.nsources, - tree.box_source_starts, - tree.box_source_counts_nonchild, - tree.box_source_counts_cumul) - d_local_targets = gen_local_tree(rank, tree.targets, tree.ntargets, - tree.box_target_starts, - tree.box_target_counts_nonchild, - tree.box_source_counts_cumul) + local_tree[rank] = tree.copy() + + (local_tree[rank].sources, + local_tree[rank].box_source_starts, + local_tree[rank].box_source_counts_nonchild, + local_tree[rank].box_source_counts_cumul) = \ + gen_local_particles(rank, tree.sources, tree.nsources, + tree.box_source_starts, + tree.box_source_counts_nonchild, + tree.box_source_counts_cumul) + + (local_tree[rank].targets, + local_tree[rank].box_target_starts, + local_tree[rank].box_target_counts_nonchild, + local_tree[rank].local_box_target_counts_cumul) = \ + gen_local_particles(rank, tree.targets, tree.ntargets, + tree.box_target_starts, + tree.box_target_counts_nonchild, + tree.box_source_counts_cumul) + + local_tree[rank].user_source_ids = None + local_tree[rank].sorted_target_ids = None + + req[rank] = comm.isend(local_tree[rank], dest=rank) + + # All ranks begin + local_tree_req = comm.irecv(source=0) + local_tree = local_tree_req.wait() + diff --git a/test/test_distributed.py b/test/test_distributed.py index 2a6c724..eff29b3 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -4,8 +4,8 @@ from mpi4py import MPI # Parameters dims = 2 -nsources = 30 -ntargets = 10 +nsources = 300 +ntargets = 100 dtype = np.float64 # Get the current rank @@ -73,7 +73,7 @@ if rank == 0: print(la.norm(pot_fmm - pot_naive, ord=2)) # Compute FMM using distributed memory parallelism -from boxtree.dfmm import drive_dfmm +from boxtree.distributed import drive_dfmm # Note: The drive_dfmm interface works as follows: # Rank 0 passes the correct trav, wrangler, and sources_weights # All other ranks pass None to these arguments -- GitLab From 398b29d749d6d5de5c7b5603af0bdc5b29e6abfe Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 13 Nov 2017 18:22:27 -0600 Subject: [PATCH 018/260] Use blocking recv instead of async one --- boxtree/distributed.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 4bf6571..970a0a9 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -312,6 +312,5 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): req[rank] = comm.isend(local_tree[rank], dest=rank) # All ranks begin - local_tree_req = comm.irecv(source=0) - local_tree = local_tree_req.wait() + local_tree = comm.recv(source=0) -- GitLab From 052c42d8e77486bf0c38ba1ab35769079e7c55f5 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 13 Nov 2017 19:05:49 -0600 Subject: [PATCH 019/260] Add LocalTree class --- boxtree/distributed.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 970a0a9..b1679f1 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -32,6 +32,23 @@ import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype from pyopencl.scan import ExclusiveScanKernel +from boxtree import Tree + +class LocalTree(Tree): + + @property + def nsources(self): + return self.sources[0].shape[0] + + @property + def ntargets(self): + return self.targets[0].shape[0] + + @classmethod + def copy_from_global_tree(cls, global_tree): + local_tree = global_tree.copy() + local_tree.__class__ = cls + return local_tree def partition_work(tree, total_rank, queue): @@ -286,7 +303,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): req = np.empty((total_rank,), dtype=object) for rank in range(total_rank): - local_tree[rank] = tree.copy() + local_tree[rank] = LocalTree.copy_from_global_tree(tree) (local_tree[rank].sources, local_tree[rank].box_source_starts, -- GitLab From df20b73a0b1b402cf36081f8547698ca3ce05bcc Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 15 Nov 2017 12:43:36 -0600 Subject: [PATCH 020/260] Fix bugs and flake8 --- boxtree/distributed.py | 120 ++++++++++++++++++++------------------- test/test_distributed.py | 34 ++++++----- 2 files changed, 81 insertions(+), 73 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index b1679f1..ac22424 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -1,4 +1,11 @@ from __future__ import division +from mpi4py import MPI +import numpy as np +import pyopencl as cl +from mako.template import Template +from pyopencl.tools import dtype_to_ctype +from pyopencl.scan import ExclusiveScanKernel +from boxtree import Tree __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2017 Hao Gao" @@ -26,13 +33,6 @@ THE SOFTWARE. import logging logger = logging.getLogger(__name__) -from mpi4py import MPI -import numpy as np -import pyopencl as cl -from mako.template import Template -from pyopencl.tools import dtype_to_ctype -from pyopencl.scan import ExclusiveScanKernel -from boxtree import Tree class LocalTree(Tree): @@ -45,31 +45,31 @@ class LocalTree(Tree): return self.targets[0].shape[0] @classmethod - def copy_from_global_tree(cls, global_tree): - local_tree = global_tree.copy() + def copy_from_global_tree(cls, global_tree, responsible_boxes): + local_tree = global_tree.copy(responsible_boxes=responsible_boxes) local_tree.__class__ = cls return local_tree def partition_work(tree, total_rank, queue): """ This function returns a list of total_rank elements, where element i is a - pyopencl array of indices of process i's responsible boxes. + pyopencl array of indices of process i's responsible boxes. """ responsible_boxes = [] num_boxes = tree.box_source_starts.shape[0] num_boxes_per_rank = num_boxes // total_rank extra_boxes = num_boxes - num_boxes_per_rank * total_rank start_idx = 0 - + for current_rank in range(extra_boxes): end_idx = start_idx + num_boxes_per_rank + 1 - responsible_boxes.append(cl.array.arange(queue, start_idx, end_idx, + responsible_boxes.append(cl.array.arange(queue, start_idx, end_idx, dtype=tree.box_id_dtype)) start_idx = end_idx for current_rank in range(extra_boxes, total_rank): end_idx = start_idx + num_boxes_per_rank - responsible_boxes.append(cl.array.arange(queue, start_idx, end_idx, + responsible_boxes.append(cl.array.arange(queue, start_idx, end_idx, dtype=tree.box_id_dtype)) start_idx = end_idx @@ -83,18 +83,18 @@ typedef ${dtype_to_ctype(mask_dtype)} mask_t; typedef ${dtype_to_ctype(tree.coord_dtype)} coord_t; __kernel void generate_particle_mask( - __global const box_id_t *res_boxes, + __global const box_id_t *res_boxes, __global const particle_id_t *box_particle_starts, __global const particle_id_t *box_particle_counts_nonchild, const int total_num_res_boxes, - __global mask_t *particle_mask) + __global mask_t *particle_mask) { - /* - * generate_particle_mask takes the responsible box indices as input and generate + /* + * generate_particle_mask takes the responsible box indices as input and generate * a mask for responsible particles. */ int res_box_idx = get_global_id(0); - + if(res_box_idx < total_num_res_boxes) { box_id_t cur_box = res_boxes[res_box_idx]; for(particle_id_t i = box_particle_starts[cur_box]; @@ -117,8 +117,8 @@ __kernel void generate_local_particles( % endfor ) { - /* - * generate_local_particles generates an array of particles for which a process + /* + * generate_local_particles generates an array of particles for which a process * is responsible for. */ int particle_idx = get_global_id(0); @@ -135,11 +135,11 @@ __kernel void generate_local_particles( def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): - + # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() - + ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) @@ -166,7 +166,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): ) def gen_local_particles(rank, particles, nparticles, - box_particle_starts, + box_particle_starts, box_particle_counts_nonchild, box_particle_counts_cumul): """ @@ -177,8 +177,8 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): d_box_particle_starts = cl.array.to_device(queue, box_particle_starts) d_box_particle_counts_nonchild = cl.array.to_device( queue, box_particle_counts_nonchild) - d_box_particle_counts_cumul = cl.array.to_device(queue, - box_particle_counts_cumul) + d_box_particle_counts_cumul = cl.array.to_device( + queue, box_particle_counts_cumul) d_particles = np.empty((ndims,), dtype=object) for i in range(ndims): d_particles[i] = cl.array.to_device(queue, particles[i]) @@ -196,7 +196,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): g_times_l=True) # Generate the scan of the particle mask array - d_particle_scan = cl.array.empty(queue, (nparticles,), + d_particle_scan = cl.array.empty(queue, (nparticles,), dtype=tree.particle_id_dtype) mask_scan_knl(d_particle_mask, d_particle_scan) @@ -206,7 +206,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): for i in range(ndims): d_local_particles[i] = cl.array.empty(queue, (local_nparticles,), dtype=tree.coord_dtype) - + d_paticles_list = d_particles.tolist() for i in range(ndims): d_paticles_list[i] = d_paticles_list[i].data @@ -224,13 +224,13 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): g_times_l=True) # Generate "box_particle_starts" of the local tree - l_box_particle_starts = cl.array.empty(queue, (nboxes,), + l_box_particle_starts = cl.array.empty(queue, (nboxes,), dtype=tree.particle_id_dtype) generate_box_particle_starts = cl.elementwise.ElementwiseKernel( queue.context, Template(""" - __global ${particle_id_t} *old_starts, - __global ${scan_t} *particle_scan, + __global ${particle_id_t} *old_starts, + __global ${scan_t} *particle_scan, __global ${particle_id_t} *new_starts """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype), scan_t=dtype_to_ctype(mask_dtype)), @@ -238,13 +238,13 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): name="generate_box_particle_starts" ) - generate_box_particle_starts(d_box_particle_starts, d_particle_scan, + generate_box_particle_starts(d_box_particle_starts, d_particle_scan, l_box_particle_starts) # Generate "box_particle_counts_nonchild" of the local tree - l_box_particle_counts_nonchild = cl.array.zeros(queue, (nboxes,), - dtype=tree.particle_id_dtype) - + l_box_particle_counts_nonchild = cl.array.zeros( + queue, (nboxes,), dtype=tree.particle_id_dtype) + generate_box_particle_counts_nonchild = cl.elementwise.ElementwiseKernel( queue.context, Template(""" @@ -257,14 +257,14 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): "old_counts_nonchild[res_boxes[i]]", name="generate_box_particle_counts_nonchild" ) - - generate_box_particle_counts_nonchild(responsible_boxes[rank], + + generate_box_particle_counts_nonchild(responsible_boxes[rank], d_box_particle_counts_nonchild, l_box_particle_counts_nonchild) # Generate "box_particle_counts_cumul" - l_box_particle_counts_cumul = cl.array.empty(queue, (nboxes,), - dtype=tree.particle_id_dtype) + l_box_particle_counts_cumul = cl.array.empty( + queue, (nboxes,), dtype=tree.particle_id_dtype) generate_box_particle_counts_cumul = cl.elementwise.ElementwiseKernel( queue.context, @@ -293,9 +293,9 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): local_box_particle_counts_nonchild = d_box_particle_counts_nonchild.get() local_box_particle_counts_cumul = d_box_particle_counts_cumul.get() - return (local_particles, - local_box_particle_starts, - local_box_particle_counts_nonchild, + return (local_particles, + local_box_particle_starts, + local_box_particle_counts_nonchild, local_box_particle_counts_cumul) local_tree = np.empty((total_rank,), dtype=object) @@ -303,31 +303,35 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): req = np.empty((total_rank,), dtype=object) for rank in range(total_rank): - local_tree[rank] = LocalTree.copy_from_global_tree(tree) - - (local_tree[rank].sources, - local_tree[rank].box_source_starts, - local_tree[rank].box_source_counts_nonchild, - local_tree[rank].box_source_counts_cumul) = \ - gen_local_particles(rank, tree.sources, tree.nsources, - tree.box_source_starts, - tree.box_source_counts_nonchild, - tree.box_source_counts_cumul) + local_tree[rank] = LocalTree.copy_from_global_tree( + tree, responsible_boxes[rank].get()) + + (local_tree[rank].sources, + local_tree[rank].box_source_starts, + local_tree[rank].box_source_counts_nonchild, + local_tree[rank].box_source_counts_cumul) = \ + gen_local_particles(rank, tree.sources, tree.nsources, + tree.box_source_starts, + tree.box_source_counts_nonchild, + tree.box_source_counts_cumul) (local_tree[rank].targets, local_tree[rank].box_target_starts, local_tree[rank].box_target_counts_nonchild, local_tree[rank].local_box_target_counts_cumul) = \ - gen_local_particles(rank, tree.targets, tree.ntargets, - tree.box_target_starts, - tree.box_target_counts_nonchild, - tree.box_source_counts_cumul) + gen_local_particles(rank, tree.targets, tree.ntargets, + tree.box_target_starts, + tree.box_target_counts_nonchild, + tree.box_source_counts_cumul) local_tree[rank].user_source_ids = None local_tree[rank].sorted_target_ids = None req[rank] = comm.isend(local_tree[rank], dest=rank) - # All ranks begin - local_tree = comm.recv(source=0) - + if current_rank == 0: + for rank in range(1, total_rank): + req[rank].wait() + local_tree = local_tree[0] + else: + local_tree = comm.recv(source=0) diff --git a/test/test_distributed.py b/test/test_distributed.py index eff29b3..9b306f6 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,11 +1,12 @@ import numpy as np import sys from mpi4py import MPI +from boxtree.distributed import drive_dfmm # Parameters dims = 2 -nsources = 300 -ntargets = 100 +nsources = 3000 +ntargets = 1000 dtype = np.float64 # Get the current rank @@ -27,7 +28,8 @@ if rank == 0: # Generate random particles and source weights from boxtree.tools import make_normal_particle_array as p_normal sources = p_normal(queue, nsources, dims, dtype, seed=15) - targets = p_normal(queue, ntargets, dims, dtype, seed=18) + np.array([2, 0, 0])[:dims] + targets = (p_normal(queue, ntargets, dims, dtype, seed=18) + + np.array([2, 0, 0])[:dims]) from boxtree.tools import particle_array_to_host sources_host = particle_array_to_host(sources) @@ -45,16 +47,17 @@ if rank == 0: plt.show() # Calculate potentials using direct evaluation - import numpy.linalg as la - distances = la.norm(sources_host.reshape(1, nsources, 2) - \ - targets_host.reshape(ntargets, 1, 2), - ord=2, axis=2) - pot_naive = np.sum(-np.log(distances)*sources_weights, axis=1) + # import numpy.linalg as la + # distances = la.norm(sources_host.reshape(1, nsources, 2) - \ + # targets_host.reshape(ntargets, 1, 2), + # ord=2, axis=2) + # pot_naive = np.sum(-np.log(distances)*sources_weights, axis=1) # Build the tree and interaction lists from boxtree import TreeBuilder tb = TreeBuilder(ctx) - tree, _ = tb(queue, sources, targets=targets, max_particles_in_box=30, debug=True) + tree, _ = tb(queue, sources, targets=targets, max_particles_in_box=30, + debug=True) from boxtree.traversal import FMMTraversalBuilder tg = FMMTraversalBuilder(ctx) @@ -63,18 +66,19 @@ if rank == 0: # Get pyfmmlib expansion wrangler from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler + def fmm_level_to_nterms(tree, level): return 20 - wrangler = FMMLibExpansionWrangler(trav.tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) + wrangler = FMMLibExpansionWrangler( + trav.tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) # Compute FMM using shared memory parallelism - from boxtree.fmm import drive_fmm - pot_fmm = drive_fmm(trav, wrangler, sources_weights)* 2 * np.pi - print(la.norm(pot_fmm - pot_naive, ord=2)) + # from boxtree.fmm import drive_fmm + # pot_fmm = drive_fmm(trav, wrangler, sources_weights)* 2 * np.pi + # print(la.norm(pot_fmm - pot_naive, ord=2)) # Compute FMM using distributed memory parallelism -from boxtree.distributed import drive_dfmm -# Note: The drive_dfmm interface works as follows: +# Note: The drive_dfmm interface works as follows: # Rank 0 passes the correct trav, wrangler, and sources_weights # All other ranks pass None to these arguments pot_dfmm = drive_dfmm(trav, wrangler, sources_weights) -- GitLab From ef58f73f730a142fef092bd0c4f94c33bb5494ae Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 17 Nov 2017 14:15:58 -0600 Subject: [PATCH 021/260] Compute tree flags --- boxtree/distributed.py | 68 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index ac22424..f7e5d5f 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -36,6 +36,10 @@ logger = logging.getLogger(__name__) class LocalTree(Tree): + @property + def nboxes(self): + return self.box_source_starts.shape[0] + @property def nsources(self): return self.sources[0].shape[0] @@ -133,6 +137,36 @@ __kernel void generate_local_particles( } """, strict_undefined=True) +gen_traversal_tpl = Template(r""" +#define HAS_CHILD_SOURCES ${HAS_CHILD_SOURCES} +#define HAS_CHILD_TARGETS ${HAS_CHILD_TARGETS} +#define HAS_OWN_SOURCES ${HAS_OWN_SOURCES} +#define HAS_OWN_TARGETS ${HAS_OWN_TARGETS} +typedef ${box_flag_t} box_flag_t; +typedef ${box_id_t} box_id_t; +typedef ${particle_id_t} particle_id_t; + +__kernel void generate_tree_flags( + __global box_flag_t *tree_flags, + __global const particle_id_t *box_source_counts_nonchild, + __global const particle_id_t *box_source_counts_cumul, + __global const particle_id_t *box_target_counts_nonchild, + __global const particle_id_t *box_target_counts_cumul) +{ + box_id_t box_idx = get_global_id(0); + box_flag_t flag = 0; + if (box_source_counts_nonchild[box_idx]) + flag |= HAS_OWN_SOURCES; + if (box_source_counts_cumul[box_idx] > box_source_counts_nonchild[box_idx]) + flag |= HAS_CHILD_SOURCES; + if (box_target_counts_nonchild[box_idx]) + flag |= HAS_OWN_TARGETS; + if (box_target_counts_cumul[box_idx] > box_target_counts_nonchild[box_idx]) + flag |= HAS_CHILD_TARGETS; + tree_flags[box_idx] = flag; +} +""", strict_undefined=True) + def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): @@ -318,7 +352,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): (local_tree[rank].targets, local_tree[rank].box_target_starts, local_tree[rank].box_target_counts_nonchild, - local_tree[rank].local_box_target_counts_cumul) = \ + local_tree[rank].box_target_counts_cumul) = \ gen_local_particles(rank, tree.targets, tree.ntargets, tree.box_target_starts, tree.box_target_counts_nonchild, @@ -326,6 +360,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): local_tree[rank].user_source_ids = None local_tree[rank].sorted_target_ids = None + local_tree[rank].box_flags = None req[rank] = comm.isend(local_tree[rank], dest=rank) @@ -335,3 +370,34 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): local_tree = local_tree[0] else: local_tree = comm.recv(source=0) + + d_box_source_counts_nonchild = cl.array.to_device( + queue, local_tree.box_source_counts_nonchild) + d_box_source_counts_cumul = cl.array.to_device( + queue, local_tree.box_source_counts_cumul) + d_box_target_counts_nonchild = cl.array.to_device( + queue, local_tree.box_target_counts_nonchild) + d_box_target_counts_cumul = cl.array.to_device( + queue, local_tree.box_target_counts_cumul) + + from boxtree.tree import box_flags_enum + local_tree.box_flags = cl.array.empty(queue, (local_tree.nboxes,), + box_flags_enum.dtype) + gen_traversal_src = gen_traversal_tpl.render( + box_flag_t=dtype_to_ctype(box_flags_enum.dtype), + box_id_t=dtype_to_ctype(local_tree.box_id_dtype), + particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), + HAS_CHILD_SOURCES=box_flags_enum.HAS_CHILD_SOURCES, + HAS_CHILD_TARGETS=box_flags_enum.HAS_CHILD_TARGETS, + HAS_OWN_SOURCES=box_flags_enum.HAS_OWN_SOURCES, + HAS_OWN_TARGETS=box_flags_enum.HAS_OWN_TARGETS + ) + gen_traversal_prg = cl.Program(ctx, gen_traversal_src).build() + gen_traversal_prg.generate_tree_flags( + queue, ((local_tree.nboxes + 127) // 128,), (128,), + local_tree.box_flags.data, + d_box_source_counts_nonchild.data, + d_box_source_counts_cumul.data, + d_box_target_counts_nonchild.data, + d_box_target_counts_cumul.data, + g_times_l=True) -- GitLab From ae7d07068e03fd1b36474513a1c4f31b08e5a5f0 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 19 Nov 2017 17:56:38 -0600 Subject: [PATCH 022/260] Generate sources_parents_and_targets of traversal --- boxtree/distributed.py | 139 ++++++++++++++++++++++++++++++----------- 1 file changed, 102 insertions(+), 37 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index f7e5d5f..6241f66 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -137,15 +137,17 @@ __kernel void generate_local_particles( } """, strict_undefined=True) -gen_traversal_tpl = Template(r""" -#define HAS_CHILD_SOURCES ${HAS_CHILD_SOURCES} -#define HAS_CHILD_TARGETS ${HAS_CHILD_TARGETS} -#define HAS_OWN_SOURCES ${HAS_OWN_SOURCES} -#define HAS_OWN_TARGETS ${HAS_OWN_TARGETS} -typedef ${box_flag_t} box_flag_t; -typedef ${box_id_t} box_id_t; -typedef ${particle_id_t} particle_id_t; +traversal_preamble_tpl = Template(r""" + #define HAS_CHILD_SOURCES ${HAS_CHILD_SOURCES} + #define HAS_CHILD_TARGETS ${HAS_CHILD_TARGETS} + #define HAS_OWN_SOURCES ${HAS_OWN_SOURCES} + #define HAS_OWN_TARGETS ${HAS_OWN_TARGETS} + typedef ${box_flag_t} box_flag_t; + typedef ${box_id_t} box_id_t; + typedef ${particle_id_t} particle_id_t; +""", strict_undefined=True) +gen_traversal_tpl = Template(r""" __kernel void generate_tree_flags( __global box_flag_t *tree_flags, __global const particle_id_t *box_source_counts_nonchild, @@ -167,6 +169,26 @@ __kernel void generate_tree_flags( } """, strict_undefined=True) +SOURCES_PARENTS_AND_TARGETS_TEMPLATE = r"""//CL// +void generate(LIST_ARG_DECL USER_ARG_DECL box_id_t box_id) +{ + box_flag_t flags = box_flags[box_id]; + + if (flags & HAS_OWN_SOURCES) + { APPEND_source_boxes(box_id); } + + if (flags & HAS_CHILD_SOURCES) + { APPEND_source_parent_boxes(box_id); } + + %if not sources_are_targets: + if (flags & HAS_OWN_TARGETS) + { APPEND_target_boxes(box_id); } + %endif + if (flags & (HAS_CHILD_TARGETS | HAS_OWN_TARGETS)) + { APPEND_target_or_target_parent_boxes(box_id); } +} +""" + def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): @@ -177,6 +199,8 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) + # {{{ Construct local traversal on root + if current_rank == 0: tree = traversal.tree ndims = tree.sources.shape[0] @@ -332,44 +356,71 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): local_box_particle_counts_nonchild, local_box_particle_counts_cumul) - local_tree = np.empty((total_rank,), dtype=object) + local_trav = np.empty((total_rank,), dtype=object) # request object for non-blocking communication req = np.empty((total_rank,), dtype=object) for rank in range(total_rank): - local_tree[rank] = LocalTree.copy_from_global_tree( + local_tree = LocalTree.copy_from_global_tree( tree, responsible_boxes[rank].get()) - (local_tree[rank].sources, - local_tree[rank].box_source_starts, - local_tree[rank].box_source_counts_nonchild, - local_tree[rank].box_source_counts_cumul) = \ + (local_tree.sources, + local_tree.box_source_starts, + local_tree.box_source_counts_nonchild, + local_tree.box_source_counts_cumul) = \ gen_local_particles(rank, tree.sources, tree.nsources, tree.box_source_starts, tree.box_source_counts_nonchild, tree.box_source_counts_cumul) - (local_tree[rank].targets, - local_tree[rank].box_target_starts, - local_tree[rank].box_target_counts_nonchild, - local_tree[rank].box_target_counts_cumul) = \ + (local_tree.targets, + local_tree.box_target_starts, + local_tree.box_target_counts_nonchild, + local_tree.box_target_counts_cumul) = \ gen_local_particles(rank, tree.targets, tree.ntargets, tree.box_target_starts, tree.box_target_counts_nonchild, tree.box_source_counts_cumul) - local_tree[rank].user_source_ids = None - local_tree[rank].sorted_target_ids = None - local_tree[rank].box_flags = None - - req[rank] = comm.isend(local_tree[rank], dest=rank) - + local_tree.user_source_ids = None + local_tree.sorted_target_ids = None + local_tree.box_flags = None + + local_trav[rank] = traversal.copy() + local_trav[rank].tree = local_tree + local_trav[rank].source_boxes = None + local_trav[rank].target_boxes = None + local_trav[rank].source_parent_boxes = None + local_trav[rank].level_start_source_box_nrs = None + local_trav[rank].level_start_source_parent_box_nrs = None + local_trav[rank].target_or_target_parent_boxes = None + local_trav[rank].level_start_target_box_nrs = None + local_trav[rank].level_start_target_or_target_parent_box_nrs = None + req[rank] = comm.isend(local_trav[rank], dest=rank) + + # }}} + + # Distribute the local trav to each rank if current_rank == 0: for rank in range(1, total_rank): req[rank].wait() - local_tree = local_tree[0] + local_trav = local_trav[0] else: - local_tree = comm.recv(source=0) + local_trav = comm.recv(source=0) + local_tree = local_trav.tree + + from boxtree.tree import box_flags_enum + traversal_preamble = traversal_preamble_tpl.render( + box_flag_t=dtype_to_ctype(box_flags_enum.dtype), + box_id_t=dtype_to_ctype(local_tree.box_id_dtype), + particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), + HAS_CHILD_SOURCES=box_flags_enum.HAS_CHILD_SOURCES, + HAS_CHILD_TARGETS=box_flags_enum.HAS_CHILD_TARGETS, + HAS_OWN_SOURCES=box_flags_enum.HAS_OWN_SOURCES, + HAS_OWN_TARGETS=box_flags_enum.HAS_OWN_TARGETS + ) + + # {{{ Fetch local tree to device memory d_box_source_counts_nonchild = cl.array.to_device( queue, local_tree.box_source_counts_nonchild) @@ -379,20 +430,14 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): queue, local_tree.box_target_counts_nonchild) d_box_target_counts_cumul = cl.array.to_device( queue, local_tree.box_target_counts_cumul) - - from boxtree.tree import box_flags_enum local_tree.box_flags = cl.array.empty(queue, (local_tree.nboxes,), box_flags_enum.dtype) - gen_traversal_src = gen_traversal_tpl.render( - box_flag_t=dtype_to_ctype(box_flags_enum.dtype), - box_id_t=dtype_to_ctype(local_tree.box_id_dtype), - particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), - HAS_CHILD_SOURCES=box_flags_enum.HAS_CHILD_SOURCES, - HAS_CHILD_TARGETS=box_flags_enum.HAS_CHILD_TARGETS, - HAS_OWN_SOURCES=box_flags_enum.HAS_OWN_SOURCES, - HAS_OWN_TARGETS=box_flags_enum.HAS_OWN_TARGETS - ) + + # }}} + + gen_traversal_src = traversal_preamble + gen_traversal_tpl.render() gen_traversal_prg = cl.Program(ctx, gen_traversal_src).build() + gen_traversal_prg.generate_tree_flags( queue, ((local_tree.nboxes + 127) // 128,), (128,), local_tree.box_flags.data, @@ -401,3 +446,23 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): d_box_target_counts_nonchild.data, d_box_target_counts_cumul.data, g_times_l=True) + + # {{{ + + from pyopencl.algorithm import ListOfListsBuilder + from pyopencl.tools import VectorArg + + sources_parents_and_targets_builder = ListOfListsBuilder( + ctx, + [("source_parent_boxes", local_tree.box_id_dtype), + ("source_boxes", local_tree.box_id_dtype), + ("target_or_target_parent_boxes", local_tree.box_id_dtype)] + ( + [("target_boxes", local_tree.box_id_dtype)] + if not local_tree.sources_are_targets else []), + traversal_preamble + Template(SOURCES_PARENTS_AND_TARGETS_TEMPLATE).render(), + arg_decls=[VectorArg(box_flags_enum.dtype, "box_flags")], + name_prefix="sources_parents_and_targets") + + result = sources_parents_and_targets_builder( + queue, local_tree.nboxes, local_tree.box_flags.data) + # }}} -- GitLab From 1df5400f49c91b15415bc9a391128910b0c5f85a Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 19 Nov 2017 20:28:40 -0600 Subject: [PATCH 023/260] Fix bugs and renaming --- boxtree/distributed.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 6241f66..18b6f23 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -282,8 +282,8 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): g_times_l=True) # Generate "box_particle_starts" of the local tree - l_box_particle_starts = cl.array.empty(queue, (nboxes,), - dtype=tree.particle_id_dtype) + local_box_particle_starts = cl.array.empty(queue, (nboxes,), + dtype=tree.particle_id_dtype) generate_box_particle_starts = cl.elementwise.ElementwiseKernel( queue.context, Template(""" @@ -297,10 +297,10 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): ) generate_box_particle_starts(d_box_particle_starts, d_particle_scan, - l_box_particle_starts) + local_box_particle_starts) # Generate "box_particle_counts_nonchild" of the local tree - l_box_particle_counts_nonchild = cl.array.zeros( + local_box_particle_counts_nonchild = cl.array.zeros( queue, (nboxes,), dtype=tree.particle_id_dtype) generate_box_particle_counts_nonchild = cl.elementwise.ElementwiseKernel( @@ -318,10 +318,10 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): generate_box_particle_counts_nonchild(responsible_boxes[rank], d_box_particle_counts_nonchild, - l_box_particle_counts_nonchild) + local_box_particle_counts_nonchild) # Generate "box_particle_counts_cumul" - l_box_particle_counts_cumul = cl.array.empty( + local_box_particle_counts_cumul = cl.array.empty( queue, (nboxes,), dtype=tree.particle_id_dtype) generate_box_particle_counts_cumul = cl.elementwise.ElementwiseKernel( @@ -341,15 +341,16 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): generate_box_particle_counts_cumul(d_box_particle_counts_cumul, d_box_particle_starts, - l_box_particle_counts_cumul, + local_box_particle_counts_cumul, d_particle_scan) local_particles = np.empty((ndims,), dtype=object) for i in range(ndims): local_particles[i] = d_local_particles[i].get() - local_box_particle_starts = d_box_particle_starts.get() - local_box_particle_counts_nonchild = d_box_particle_counts_nonchild.get() - local_box_particle_counts_cumul = d_box_particle_counts_cumul.get() + local_box_particle_starts = local_box_particle_starts.get() + local_box_particle_counts_nonchild = \ + local_box_particle_counts_nonchild.get() + local_box_particle_counts_cumul = local_box_particle_counts_cumul.get() return (local_particles, local_box_particle_starts, @@ -463,6 +464,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): arg_decls=[VectorArg(box_flags_enum.dtype, "box_flags")], name_prefix="sources_parents_and_targets") - result = sources_parents_and_targets_builder( + result, evt = sources_parents_and_targets_builder( queue, local_tree.nboxes, local_tree.box_flags.data) + # }}} -- GitLab From c520319fec8774a3f5310e5193cd3d1979f7b4a7 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 20 Nov 2017 09:42:23 -0600 Subject: [PATCH 024/260] Generate level starts --- boxtree/distributed.py | 87 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 18b6f23..e0ff959 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -145,6 +145,7 @@ traversal_preamble_tpl = Template(r""" typedef ${box_flag_t} box_flag_t; typedef ${box_id_t} box_id_t; typedef ${particle_id_t} particle_id_t; + typedef ${box_level_t} box_level_t; """, strict_undefined=True) gen_traversal_tpl = Template(r""" @@ -415,6 +416,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): box_flag_t=dtype_to_ctype(box_flags_enum.dtype), box_id_t=dtype_to_ctype(local_tree.box_id_dtype), particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), + box_level_t=dtype_to_ctype(local_tree.box_level_dtype), HAS_CHILD_SOURCES=box_flags_enum.HAS_CHILD_SOURCES, HAS_CHILD_TARGETS=box_flags_enum.HAS_CHILD_TARGETS, HAS_OWN_SOURCES=box_flags_enum.HAS_OWN_SOURCES, @@ -433,6 +435,10 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): queue, local_tree.box_target_counts_cumul) local_tree.box_flags = cl.array.empty(queue, (local_tree.nboxes,), box_flags_enum.dtype) + d_level_start_box_nrs = cl.array.to_device( + queue, local_tree.level_start_box_nrs) + d_box_levels = cl.array.to_device( + queue, local_tree.box_levels) # }}} @@ -467,4 +473,85 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): result, evt = sources_parents_and_targets_builder( queue, local_tree.nboxes, local_tree.box_flags.data) + local_trav.source_boxes = result["source_boxes"].lists + if not tree.sources_are_targets: + local_trav.target_boxes = result["target_boxes"].lists + else: + local_trav.target_boxes = local_trav.source_boxes + local_trav.source_parent_boxes = result["source_parent_boxes"].lists + local_trav.target_or_target_parent_boxes = \ + result["target_or_target_parent_boxes"].lists + + # }}} + + # {{{ + level_start_box_nrs_extractor = cl.elementwise.ElementwiseTemplate( + arguments="""//CL// + box_id_t *level_start_box_nrs, + box_level_t *box_levels, + box_id_t *box_list, + box_id_t *list_level_start_box_nrs, + """, + + operation=r"""//CL// + // Kernel is ranged so that this is true: + // assert(i > 0); + + box_id_t my_box_id = box_list[i]; + int my_level = box_levels[my_box_id]; + + bool is_level_leading_box; + if (i == 0) + is_level_leading_box = true; + else + { + box_id_t prev_box_id = box_list[i-1]; + box_id_t my_level_start = level_start_box_nrs[my_level]; + + is_level_leading_box = ( + prev_box_id < my_level_start + && my_level_start <= my_box_id); + } + + if (is_level_leading_box) + list_level_start_box_nrs[my_level] = i; + """, + name="extract_level_start_box_nrs").build(ctx, + type_aliases=( + ("box_id_t", local_tree.box_id_dtype), + ("box_level_t", local_tree.box_level_dtype), + ) + ) + + def extract_level_start_box_nrs(box_list, wait_for): + result = cl.array.empty(queue, local_tree.nlevels + 1, + local_tree.box_id_dtype) \ + .fill(len(box_list)) + evt = level_start_box_nrs_extractor( + d_level_start_box_nrs, + d_box_levels, + box_list, + result, + range=slice(0, len(box_list)), + queue=queue, wait_for=wait_for) + + result = result.get() + + # Postprocess result for unoccupied levels + prev_start = len(box_list) + for ilev in range(tree.nlevels-1, -1, -1): + result[ilev] = prev_start = min(result[ilev], prev_start) + + return result, evt + + local_trav.level_start_source_box_nrs, _ = \ + extract_level_start_box_nrs(local_trav.source_boxes, wait_for=[]) + local_trav.level_start_source_parent_box_nrs, _ = \ + extract_level_start_box_nrs(local_trav.source_parent_boxes, wait_for=[]) + local_trav.level_start_target_box_nrs, _ = \ + extract_level_start_box_nrs(local_trav.target_boxes, wait_for=[]) + local_trav.level_start_target_or_target_parent_box_nrs, _ = \ + extract_level_start_box_nrs(local_trav.target_or_target_parent_boxes, + wait_for=[]) + # }}} -- GitLab From 86117a460ebebd01e05c1c62fa5ada3d4456c3a2 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 24 Nov 2017 11:30:48 -0600 Subject: [PATCH 025/260] Step 2.1 and 2.2 --- boxtree/distributed.py | 145 +++++++++++++++++++++++++++++++++++------ 1 file changed, 125 insertions(+), 20 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index e0ff959..885e436 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -4,8 +4,9 @@ import numpy as np import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype -from pyopencl.scan import ExclusiveScanKernel +from pyopencl.scan import GenericScanKernel from boxtree import Tree +from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2017 Hao Gao" @@ -55,6 +56,11 @@ class LocalTree(Tree): return local_tree +class MPI_Tags(): + DIST_TRAV = 0 + DIST_WEIGHT = 1 + + def partition_work(tree, total_rank, queue): """ This function returns a list of total_rank elements, where element i is a pyopencl array of indices of process i's responsible boxes. @@ -191,7 +197,7 @@ void generate(LIST_ARG_DECL USER_ARG_DECL box_id_t box_id) """ -def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): +def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): # Get MPI information current_rank = comm.Get_rank() @@ -219,15 +225,27 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): ndims=ndims)).build() # Construct mask scan kernel - mask_scan_knl = ExclusiveScanKernel( + mask_scan_knl = GenericScanKernel( ctx, mask_dtype, + arguments=Template(""" + __global ${mask_t} *ary, + __global ${mask_t} *scan + """).render( + mask_t=dtype_to_ctype(mask_dtype) + ), + input_expr="ary[i]", scan_expr="a+b", neutral="0", + output_statement="scan[i + 1] = item;" ) + src_weights = src_weights[tree.user_source_ids] + src_weights = cl.array.to_device(queue, src_weights) + def gen_local_particles(rank, particles, nparticles, box_particle_starts, box_particle_counts_nonchild, - box_particle_counts_cumul): + box_particle_counts_cumul, + particle_weights=None): """ This helper function generates the sources/targets related fields for a local tree @@ -255,8 +273,9 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): g_times_l=True) # Generate the scan of the particle mask array - d_particle_scan = cl.array.empty(queue, (nparticles,), + d_particle_scan = cl.array.empty(queue, (nparticles + 1,), dtype=tree.particle_id_dtype) + d_particle_scan[0] = 0 mask_scan_knl(d_particle_mask, d_particle_scan) # Generate particles for rank's local tree @@ -353,14 +372,55 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): local_box_particle_counts_nonchild.get() local_box_particle_counts_cumul = local_box_particle_counts_cumul.get() - return (local_particles, - local_box_particle_starts, - local_box_particle_counts_nonchild, - local_box_particle_counts_cumul) + # {{{ Distribute source weights + if particle_weights is not None: + local_particle_weights = cl.array.empty(queue, (local_nparticles,), + dtype=src_weights.dtype) + gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( + ctx, + arguments=Template(""" + __global ${weight_t} *src_weights, + __global ${mask_t} *particle_mask, + __global ${particle_id_t} *particle_scan, + __global ${weight_t} *local_weights + """).render( + weight_t=dtype_to_ctype(src_weights.dtype), + mask_t=dtype_to_ctype(mask_dtype), + particle_id_t=dtype_to_ctype(tree.particle_id_dtype) + ), + operation=""" + if(particle_mask[i]) { + local_weights[particle_scan[i]] = src_weights[i]; + } + """ + ) + gen_local_source_weights_knl( + particle_weights, + d_particle_mask, + d_particle_scan, + local_particle_weights + ) + + # }}} + + if particle_weights is not None: + return (local_particles, + local_box_particle_starts, + local_box_particle_counts_nonchild, + local_box_particle_counts_cumul, + local_particle_weights.get()) + else: + return (local_particles, + local_box_particle_starts, + local_box_particle_counts_nonchild, + local_box_particle_counts_cumul) local_trav = np.empty((total_rank,), dtype=object) - # request object for non-blocking communication - req = np.empty((total_rank,), dtype=object) + local_src_weights = np.empty((total_rank,), dtype=object) + + # request objects for non-blocking communication + trav_req = np.empty((total_rank,), dtype=object) + weight_req = np.empty((total_rank,), dtype=object) for rank in range(total_rank): local_tree = LocalTree.copy_from_global_tree( @@ -369,11 +429,13 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): (local_tree.sources, local_tree.box_source_starts, local_tree.box_source_counts_nonchild, - local_tree.box_source_counts_cumul) = \ + local_tree.box_source_counts_cumul, + local_src_weights[rank]) = \ gen_local_particles(rank, tree.sources, tree.nsources, tree.box_source_starts, tree.box_source_counts_nonchild, - tree.box_source_counts_cumul) + tree.box_source_counts_cumul, + src_weights) (local_tree.targets, local_tree.box_target_starts, @@ -382,7 +444,8 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): gen_local_particles(rank, tree.targets, tree.ntargets, tree.box_target_starts, tree.box_target_counts_nonchild, - tree.box_source_counts_cumul) + tree.box_source_counts_cumul, + None) local_tree.user_source_ids = None local_tree.sorted_target_ids = None @@ -398,19 +461,31 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): local_trav[rank].target_or_target_parent_boxes = None local_trav[rank].level_start_target_box_nrs = None local_trav[rank].level_start_target_or_target_parent_box_nrs = None - req[rank] = comm.isend(local_trav[rank], dest=rank) + + trav_req[rank] = comm.isend(local_trav[rank], dest=rank, + tag=MPI_Tags.DIST_TRAV) + weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, + tag=MPI_Tags.DIST_WEIGHT) # }}} - # Distribute the local trav to each rank + # Recieve the local trav from root if current_rank == 0: for rank in range(1, total_rank): - req[rank].wait() + trav_req[rank].wait() local_trav = local_trav[0] else: - local_trav = comm.recv(source=0) + local_trav = comm.recv(source=0, tag=MPI_Tags.DIST_TRAV) local_tree = local_trav.tree + # Recieve source weights from root + if current_rank == 0: + for rank in range(1, total_rank): + weight_req[rank].wait() + local_src_weights = local_src_weights[0] + else: + local_src_weights = comm.recv(source=0, tag=MPI_Tags.DIST_WEIGHT) + from boxtree.tree import box_flags_enum traversal_preamble = traversal_preamble_tpl.render( box_flag_t=dtype_to_ctype(box_flags_enum.dtype), @@ -474,7 +549,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): queue, local_tree.nboxes, local_tree.box_flags.data) local_trav.source_boxes = result["source_boxes"].lists - if not tree.sources_are_targets: + if not local_tree.sources_are_targets: local_trav.target_boxes = result["target_boxes"].lists else: local_trav.target_boxes = local_trav.source_boxes @@ -539,7 +614,7 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): # Postprocess result for unoccupied levels prev_start = len(box_list) - for ilev in range(tree.nlevels-1, -1, -1): + for ilev in range(local_tree.nlevels-1, -1, -1): result[ilev] = prev_start = min(result[ilev], prev_start) return result, evt @@ -554,4 +629,34 @@ def drive_dfmm(traversal, expansion_wrangler, src_weights, comm=MPI.COMM_WORLD): extract_level_start_box_nrs(local_trav.target_or_target_parent_boxes, wait_for=[]) + local_trav = local_trav.get(queue=queue) + + def fmm_level_to_nterms(tree, level): + return 20 + wrangler = FMMLibExpansionWrangler( + local_trav.tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) + + # {{{ "Step 2.1:" Construct local multipoles + + logger.debug("construct local multipoles") + mpole_exps = wrangler.form_multipoles( + local_trav.level_start_source_box_nrs, + local_trav.source_boxes, + local_src_weights) + # }}} + + # {{{ "Step 2.2:" Propagate multipoles upward + + logger.debug("propagate multipoles upward") + wrangler.coarsen_multipoles( + local_trav.level_start_source_parent_box_nrs, + local_trav.source_parent_boxes, + mpole_exps) + + # mpole_exps is called Phi in [1] + + # }}} + + mpole_exps_all = np.empty_like(mpole_exps) + comm.Allreduce(mpole_exps, mpole_exps_all) -- GitLab From 82ee4860b5782602fd838ac9b41f12c876683d5b Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 25 Nov 2017 21:32:29 -0600 Subject: [PATCH 026/260] Change CI configuration file --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 64d6e73..b7aebc9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -50,6 +50,7 @@ Python 3.5 POCL: - pocl except: - tags + - distributed-fmm-global Python 3.6 POCL: script: @@ -63,6 +64,7 @@ Python 3.6 POCL: - pocl except: - tags + - distributed-fmm-global Documentation: script: @@ -82,3 +84,4 @@ Flake8: - python3.5 except: - tags + - distributed-fmm-global -- GitLab From 58acf53a1d815ac61efeb5893ac163d199257a1f Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 25 Nov 2017 21:35:02 -0600 Subject: [PATCH 027/260] Change CI configuration file --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b7aebc9..8b99942 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,6 +24,7 @@ Python 3.5 K40: - nvidia-k40 except: - tags + - distributed-fmm-global Python 2.7 POCL: script: @@ -37,6 +38,7 @@ Python 2.7 POCL: - pocl except: - tags + - distributed-fmm-global Python 3.5 POCL: script: @@ -84,4 +86,3 @@ Flake8: - python3.5 except: - tags - - distributed-fmm-global -- GitLab From ebfac89378d4e71a4b965714dbfcd43cc32aa53c Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 25 Nov 2017 21:36:35 -0600 Subject: [PATCH 028/260] Use two tranversal objects --- boxtree/distributed.py | 845 ++++++++++++++++++----------------------- 1 file changed, 374 insertions(+), 471 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 885e436..b342631 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -50,40 +50,68 @@ class LocalTree(Tree): return self.targets[0].shape[0] @classmethod - def copy_from_global_tree(cls, global_tree, responsible_boxes): - local_tree = global_tree.copy(responsible_boxes=responsible_boxes) + def copy_from_global_tree(cls, global_tree, responsible_boxes_list, + ancestor_mask): + local_tree = global_tree.copy( + responsible_boxes_list=responsible_boxes_list, + ancestor_mask=ancestor_mask) local_tree.__class__ = cls return local_tree + def to_device(self, queue): + field_to_device = [ + "box_centers", "box_child_ids", "box_flags", "box_levels", + "box_parent_ids", "box_source_counts_cumul", + "box_source_counts_nonchild", "box_source_starts", + "box_target_counts_cumul", "box_target_counts_nonchild", + "box_target_starts", "level_start_box_nrs_dev", "sources", "targets", + "responsible_boxes_list", "ancestor_mask" + ] + d_tree = self.copy() + for field in field_to_device: + current_obj = d_tree.__getattribute__(field) + if current_obj.dtype == object: + new_obj = np.empty_like(current_obj) + for i in range(current_obj.shape[0]): + new_obj[i] = cl.array.to_device(queue, current_obj[i]) + d_tree.__setattr__(field, new_obj) + else: + d_tree.__setattr__( + field, cl.array.to_device(queue, current_obj)) + return d_tree + class MPI_Tags(): - DIST_TRAV = 0 + DIST_TREE = 0 DIST_WEIGHT = 1 def partition_work(tree, total_rank, queue): - """ This function returns a list of total_rank elements, where element i is a - pyopencl array of indices of process i's responsible boxes. + """ This function returns a pyopencl array of size total_rank*nboxes, where + the (i,j) entry is 1 iff rank i is responsible for box j. """ - responsible_boxes = [] - num_boxes = tree.box_source_starts.shape[0] - num_boxes_per_rank = num_boxes // total_rank - extra_boxes = num_boxes - num_boxes_per_rank * total_rank + responsible_boxes_mask = cl.array.zeros(queue, (total_rank, tree.nboxes), + dtype=np.int8) + responsible_boxes_list = np.empty((total_rank,), dtype=object) + nboxes_per_rank = tree.nboxes // total_rank + extra_boxes = tree.nboxes - nboxes_per_rank * total_rank start_idx = 0 for current_rank in range(extra_boxes): - end_idx = start_idx + num_boxes_per_rank + 1 - responsible_boxes.append(cl.array.arange(queue, start_idx, end_idx, - dtype=tree.box_id_dtype)) + end_idx = start_idx + nboxes_per_rank + 1 + responsible_boxes_mask[current_rank, start_idx:end_idx] = 1 + responsible_boxes_list[current_rank] = cl.array.arange( + queue, start_idx, end_idx, dtype=tree.box_id_dtype) start_idx = end_idx for current_rank in range(extra_boxes, total_rank): - end_idx = start_idx + num_boxes_per_rank - responsible_boxes.append(cl.array.arange(queue, start_idx, end_idx, - dtype=tree.box_id_dtype)) + end_idx = start_idx + nboxes_per_rank + responsible_boxes_mask[current_rank, start_idx:end_idx] = 1 + responsible_boxes_list[current_rank] = cl.array.arange( + queue, start_idx, end_idx, dtype=tree.box_id_dtype) start_idx = end_idx - return responsible_boxes + return responsible_boxes_mask, responsible_boxes_list gen_local_tree_tpl = Template(r""" @@ -92,29 +120,6 @@ typedef ${dtype_to_ctype(tree.particle_id_dtype)} particle_id_t; typedef ${dtype_to_ctype(mask_dtype)} mask_t; typedef ${dtype_to_ctype(tree.coord_dtype)} coord_t; -__kernel void generate_particle_mask( - __global const box_id_t *res_boxes, - __global const particle_id_t *box_particle_starts, - __global const particle_id_t *box_particle_counts_nonchild, - const int total_num_res_boxes, - __global mask_t *particle_mask) -{ - /* - * generate_particle_mask takes the responsible box indices as input and generate - * a mask for responsible particles. - */ - int res_box_idx = get_global_id(0); - - if(res_box_idx < total_num_res_boxes) { - box_id_t cur_box = res_boxes[res_box_idx]; - for(particle_id_t i = box_particle_starts[cur_box]; - i < box_particle_starts[cur_box] + box_particle_counts_nonchild[cur_box]; - i++) { - particle_mask[i] = 1; - } - } -} - __kernel void generate_local_particles( const int total_num_particles, % for dim in range(ndims): @@ -143,327 +148,302 @@ __kernel void generate_local_particles( } """, strict_undefined=True) -traversal_preamble_tpl = Template(r""" - #define HAS_CHILD_SOURCES ${HAS_CHILD_SOURCES} - #define HAS_CHILD_TARGETS ${HAS_CHILD_TARGETS} - #define HAS_OWN_SOURCES ${HAS_OWN_SOURCES} - #define HAS_OWN_TARGETS ${HAS_OWN_TARGETS} - typedef ${box_flag_t} box_flag_t; - typedef ${box_id_t} box_id_t; - typedef ${particle_id_t} particle_id_t; - typedef ${box_level_t} box_level_t; -""", strict_undefined=True) -gen_traversal_tpl = Template(r""" -__kernel void generate_tree_flags( - __global box_flag_t *tree_flags, - __global const particle_id_t *box_source_counts_nonchild, - __global const particle_id_t *box_source_counts_cumul, - __global const particle_id_t *box_target_counts_nonchild, - __global const particle_id_t *box_target_counts_cumul) -{ - box_id_t box_idx = get_global_id(0); - box_flag_t flag = 0; - if (box_source_counts_nonchild[box_idx]) - flag |= HAS_OWN_SOURCES; - if (box_source_counts_cumul[box_idx] > box_source_counts_nonchild[box_idx]) - flag |= HAS_CHILD_SOURCES; - if (box_target_counts_nonchild[box_idx]) - flag |= HAS_OWN_TARGETS; - if (box_target_counts_cumul[box_idx] > box_target_counts_nonchild[box_idx]) - flag |= HAS_CHILD_TARGETS; - tree_flags[box_idx] = flag; -} -""", strict_undefined=True) +def gen_local_particles(queue, particles, nparticles, tree, + responsible_boxes, + box_particle_starts, + box_particle_counts_nonchild, + box_particle_counts_cumul, + particle_weights=None): + """ + This helper function generates the sources/targets related fields for + a local tree + """ + # Put particle structures to device memory + d_box_particle_starts = cl.array.to_device(queue, box_particle_starts) + d_box_particle_counts_nonchild = cl.array.to_device( + queue, box_particle_counts_nonchild) + d_box_particle_counts_cumul = cl.array.to_device( + queue, box_particle_counts_cumul) + d_particles = np.empty((tree.dimensions,), dtype=object) + for i in range(tree.dimensions): + d_particles[i] = cl.array.to_device(queue, particles[i]) + + # Generate the particle mask array + d_particle_mask = cl.array.zeros(queue, (nparticles,), + dtype=tree.particle_id_dtype) + particle_mask_knl = cl.elementwise.ElementwiseKernel( + queue.context, + arguments=Template(""" + __global char *responsible_boxes, + __global ${particle_id_t} *box_particle_starts, + __global ${particle_id_t} *box_particle_counts_nonchild, + __global ${particle_id_t} *particle_mask + """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), + operation=Template(""" + if(responsible_boxes[i]) { + for(${particle_id_t} pid = box_particle_starts[i]; + pid < box_particle_starts[i] + box_particle_counts_nonchild[i]; + ++pid) { + particle_mask[pid] = 1; + } + } + """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)) + ) + particle_mask_knl(responsible_boxes, d_box_particle_starts, + d_box_particle_counts_nonchild, d_particle_mask) + + # Generate the scan of the particle mask array + mask_scan_knl = GenericScanKernel( + queue.context, tree.particle_id_dtype, + arguments=Template(""" + __global ${mask_t} *ary, + __global ${mask_t} *scan + """).render(mask_t=dtype_to_ctype(tree.particle_id_dtype)), + input_expr="ary[i]", + scan_expr="a+b", neutral="0", + output_statement="scan[i + 1] = item;" + ) + d_particle_scan = cl.array.empty(queue, (nparticles + 1,), + dtype=tree.particle_id_dtype) + d_particle_scan[0] = 0 + mask_scan_knl(d_particle_mask, d_particle_scan) + + # Generate particles for rank's local tree + local_nparticles = d_particle_scan[-1].get(queue) + d_local_particles = np.empty((tree.dimensions,), dtype=object) + for i in range(tree.dimensions): + d_local_particles[i] = cl.array.empty(queue, (local_nparticles,), + dtype=tree.coord_dtype) + + d_paticles_list = d_particles.tolist() + for i in range(tree.dimensions): + d_paticles_list[i] = d_paticles_list[i].data + d_local_particles_list = d_local_particles.tolist() + for i in range(tree.dimensions): + d_local_particles_list[i] = d_local_particles_list[i].data + + gen_local_tree_prg = cl.Program( + queue.context, + gen_local_tree_tpl.render( + tree=tree, + dtype_to_ctype=dtype_to_ctype, + mask_dtype=tree.particle_id_dtype, + ndims=tree.dimensions + ) + ).build() + + gen_local_tree_prg.generate_local_particles( + queue, ((nparticles + 127) // 128,), (128,), + np.int32(nparticles), + *d_paticles_list, + d_particle_mask.data, + d_particle_scan.data, + *d_local_particles_list, + g_times_l=True) -SOURCES_PARENTS_AND_TARGETS_TEMPLATE = r"""//CL// -void generate(LIST_ARG_DECL USER_ARG_DECL box_id_t box_id) -{ - box_flag_t flags = box_flags[box_id]; + # Generate "box_particle_starts" of the local tree + local_box_particle_starts = cl.array.empty(queue, (tree.nboxes,), + dtype=tree.particle_id_dtype) + + generate_box_particle_starts = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${particle_id_t} *old_starts, + __global ${particle_id_t} *particle_scan, + __global ${particle_id_t} *new_starts + """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), + "new_starts[i] = particle_scan[old_starts[i]]", + name="generate_box_particle_starts" + ) - if (flags & HAS_OWN_SOURCES) - { APPEND_source_boxes(box_id); } + generate_box_particle_starts(d_box_particle_starts, d_particle_scan, + local_box_particle_starts) + + # Generate "box_particle_counts_nonchild" of the local tree + local_box_particle_counts_nonchild = cl.array.zeros( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + + generate_box_particle_counts_nonchild = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global char *res_boxes, + __global ${particle_id_t} *old_counts_nonchild, + __global ${particle_id_t} *new_counts_nonchild + """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), + "if(res_boxes[i]) new_counts_nonchild[i] = old_counts_nonchild[i];" + ) - if (flags & HAS_CHILD_SOURCES) - { APPEND_source_parent_boxes(box_id); } + generate_box_particle_counts_nonchild(responsible_boxes, + d_box_particle_counts_nonchild, + local_box_particle_counts_nonchild) + + # Generate "box_particle_counts_cumul" + local_box_particle_counts_cumul = cl.array.empty( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + + generate_box_particle_counts_cumul = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${particle_id_t} *old_counts_cumul, + __global ${particle_id_t} *old_starts, + __global ${particle_id_t} *new_counts_cumul, + __global ${particle_id_t} *particle_scan + """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), + """ + new_counts_cumul[i] = + particle_scan[old_starts[i] + old_counts_cumul[i]] - + particle_scan[old_starts[i]] + """ + ) - %if not sources_are_targets: - if (flags & HAS_OWN_TARGETS) - { APPEND_target_boxes(box_id); } - %endif - if (flags & (HAS_CHILD_TARGETS | HAS_OWN_TARGETS)) - { APPEND_target_or_target_parent_boxes(box_id); } -} -""" + generate_box_particle_counts_cumul(d_box_particle_counts_cumul, + d_box_particle_starts, + local_box_particle_counts_cumul, + d_particle_scan) + + local_particles = np.empty((tree.dimensions,), dtype=object) + for i in range(tree.dimensions): + local_particles[i] = d_local_particles[i].get() + local_box_particle_starts = local_box_particle_starts.get() + local_box_particle_counts_nonchild = local_box_particle_counts_nonchild.get() + local_box_particle_counts_cumul = local_box_particle_counts_cumul.get() + + # {{{ Generate source weights + if particle_weights is not None: + local_particle_weights = cl.array.empty(queue, (local_nparticles,), + dtype=particle_weights.dtype) + gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( + queue.context, + arguments=Template(""" + __global ${weight_t} *src_weights, + __global ${particle_id_t} *particle_mask, + __global ${particle_id_t} *particle_scan, + __global ${weight_t} *local_weights + """).render( + weight_t=dtype_to_ctype(particle_weights.dtype), + particle_id_t=dtype_to_ctype(tree.particle_id_dtype) + ), + operation=""" + if(particle_mask[i]) { + local_weights[particle_scan[i]] = src_weights[i]; + } + """ + ) + gen_local_source_weights_knl(particle_weights, d_particle_mask, + d_particle_scan, local_particle_weights) + + # }}} + + if particle_weights is not None: + return (local_particles, + local_box_particle_starts, + local_box_particle_counts_nonchild, + local_box_particle_counts_cumul, + local_particle_weights.get()) + else: + return (local_particles, + local_box_particle_starts, + local_box_particle_counts_nonchild, + local_box_particle_counts_cumul) def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): - # Get MPI information + # Get MPI and pyopencl information current_rank = comm.Get_rank() total_rank = comm.Get_size() - ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) - # {{{ Construct local traversal on root + # {{{ Construct local tree for each rank on root if current_rank == 0: + local_tree = np.empty((total_rank,), dtype=object) tree = traversal.tree - ndims = tree.sources.shape[0] - nboxes = tree.box_source_starts.shape[0] - # Partition the work across all ranks by allocating responsible boxes - responsible_boxes = partition_work(tree, total_rank, queue) + d_box_parent_ids = cl.array.to_device(queue, tree.box_parent_ids) - # Compile the program - mask_dtype = tree.particle_id_dtype - gen_local_tree_prg = cl.Program(ctx, gen_local_tree_tpl.render( - tree=tree, - dtype_to_ctype=dtype_to_ctype, - mask_dtype=mask_dtype, - ndims=ndims)).build() - - # Construct mask scan kernel - mask_scan_knl = GenericScanKernel( - ctx, mask_dtype, - arguments=Template(""" - __global ${mask_t} *ary, - __global ${mask_t} *scan - """).render( - mask_t=dtype_to_ctype(mask_dtype) - ), - input_expr="ary[i]", - scan_expr="a+b", neutral="0", - output_statement="scan[i + 1] = item;" - ) + # {{{ Partition the work - src_weights = src_weights[tree.user_source_ids] - src_weights = cl.array.to_device(queue, src_weights) + # Each rank is responsible for calculating the multiple expansion as well as + # evaluating target potentials in *responsible_boxes* + responsible_boxes_mask, responsible_boxes_list = \ + partition_work(tree, total_rank, queue) - def gen_local_particles(rank, particles, nparticles, - box_particle_starts, - box_particle_counts_nonchild, - box_particle_counts_cumul, - particle_weights=None): - """ - This helper function generates the sources/targets related fields for - a local tree - """ - # Put particle structures to device memory - d_box_particle_starts = cl.array.to_device(queue, box_particle_starts) - d_box_particle_counts_nonchild = cl.array.to_device( - queue, box_particle_counts_nonchild) - d_box_particle_counts_cumul = cl.array.to_device( - queue, box_particle_counts_cumul) - d_particles = np.empty((ndims,), dtype=object) - for i in range(ndims): - d_particles[i] = cl.array.to_device(queue, particles[i]) - - # Generate the particle mask array - d_particle_mask = cl.array.zeros(queue, (nparticles,), dtype=mask_dtype) - num_responsible_boxes = responsible_boxes[rank].shape[0] - gen_local_tree_prg.generate_particle_mask( - queue, ((num_responsible_boxes + 127)//128,), (128,), - responsible_boxes[rank].data, - d_box_particle_starts.data, - d_box_particle_counts_nonchild.data, - np.int32(num_responsible_boxes), - d_particle_mask.data, - g_times_l=True) - - # Generate the scan of the particle mask array - d_particle_scan = cl.array.empty(queue, (nparticles + 1,), - dtype=tree.particle_id_dtype) - d_particle_scan[0] = 0 - mask_scan_knl(d_particle_mask, d_particle_scan) - - # Generate particles for rank's local tree - local_nparticles = d_particle_scan[-1].get(queue) + 1 - d_local_particles = np.empty((ndims,), dtype=object) - for i in range(ndims): - d_local_particles[i] = cl.array.empty(queue, (local_nparticles,), - dtype=tree.coord_dtype) - - d_paticles_list = d_particles.tolist() - for i in range(ndims): - d_paticles_list[i] = d_paticles_list[i].data - d_local_particles_list = d_local_particles.tolist() - for i in range(ndims): - d_local_particles_list[i] = d_local_particles_list[i].data - - gen_local_tree_prg.generate_local_particles( - queue, ((nparticles + 127) // 128,), (128,), - np.int32(nparticles), - *d_paticles_list, - d_particle_mask.data, - d_particle_scan.data, - *d_local_particles_list, - g_times_l=True) - - # Generate "box_particle_starts" of the local tree - local_box_particle_starts = cl.array.empty(queue, (nboxes,), - dtype=tree.particle_id_dtype) - generate_box_particle_starts = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${particle_id_t} *old_starts, - __global ${scan_t} *particle_scan, - __global ${particle_id_t} *new_starts - """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype), - scan_t=dtype_to_ctype(mask_dtype)), - "new_starts[i] = particle_scan[old_starts[i]]", - name="generate_box_particle_starts" - ) + # In order to evaluate, each rank needs sources in boxes in + # *src_boxes* + src_boxes = responsible_boxes_mask.copy() - generate_box_particle_starts(d_box_particle_starts, d_particle_scan, - local_box_particle_starts) - - # Generate "box_particle_counts_nonchild" of the local tree - local_box_particle_counts_nonchild = cl.array.zeros( - queue, (nboxes,), dtype=tree.particle_id_dtype) - - generate_box_particle_counts_nonchild = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${box_id_t} *res_boxes, - __global ${particle_id_t} *old_counts_nonchild, - __global ${particle_id_t} *new_counts_nonchild - """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype), - particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), - "new_counts_nonchild[res_boxes[i]] = " - "old_counts_nonchild[res_boxes[i]]", - name="generate_box_particle_counts_nonchild" - ) + # Add list 1 and list 4 of responsible boxes to src_boxes - generate_box_particle_counts_nonchild(responsible_boxes[rank], - d_box_particle_counts_nonchild, - local_box_particle_counts_nonchild) - - # Generate "box_particle_counts_cumul" - local_box_particle_counts_cumul = cl.array.empty( - queue, (nboxes,), dtype=tree.particle_id_dtype) - - generate_box_particle_counts_cumul = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${particle_id_t} *old_counts_cumul, - __global ${particle_id_t} *old_starts, - __global ${particle_id_t} *new_counts_cumul, - __global ${mask_t} *particle_scan - """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype), - mask_t=dtype_to_ctype(mask_dtype)), - "new_counts_cumul[i] = " - "particle_scan[old_starts[i] + old_counts_cumul[i]] - " - "particle_scan[old_starts[i]]", - name="generate_box_particle_counts_cumul" + # Calculate ancestors of responsible boxes + ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), + dtype=np.int8) + for rank in range(total_rank): + ancestor_boxes_last = responsible_boxes_mask[rank, :].copy() + mark_parent_knl = cl.elementwise.ElementwiseKernel( + ctx, + "__global char *current, __global char *parent, " + "__global %s *box_parent_ids" % dtype_to_ctype(tree.box_id_dtype), + "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" ) - - generate_box_particle_counts_cumul(d_box_particle_counts_cumul, - d_box_particle_starts, - local_box_particle_counts_cumul, - d_particle_scan) - - local_particles = np.empty((ndims,), dtype=object) - for i in range(ndims): - local_particles[i] = d_local_particles[i].get() - local_box_particle_starts = local_box_particle_starts.get() - local_box_particle_counts_nonchild = \ - local_box_particle_counts_nonchild.get() - local_box_particle_counts_cumul = local_box_particle_counts_cumul.get() - - # {{{ Distribute source weights - if particle_weights is not None: - local_particle_weights = cl.array.empty(queue, (local_nparticles,), - dtype=src_weights.dtype) - gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( - ctx, - arguments=Template(""" - __global ${weight_t} *src_weights, - __global ${mask_t} *particle_mask, - __global ${particle_id_t} *particle_scan, - __global ${weight_t} *local_weights - """).render( - weight_t=dtype_to_ctype(src_weights.dtype), - mask_t=dtype_to_ctype(mask_dtype), - particle_id_t=dtype_to_ctype(tree.particle_id_dtype) - ), - operation=""" - if(particle_mask[i]) { - local_weights[particle_scan[i]] = src_weights[i]; - } - """ - ) - gen_local_source_weights_knl( - particle_weights, - d_particle_mask, - d_particle_scan, - local_particle_weights - ) - - # }}} - - if particle_weights is not None: - return (local_particles, - local_box_particle_starts, - local_box_particle_counts_nonchild, - local_box_particle_counts_cumul, - local_particle_weights.get()) - else: - return (local_particles, - local_box_particle_starts, - local_box_particle_counts_nonchild, - local_box_particle_counts_cumul) - - local_trav = np.empty((total_rank,), dtype=object) + while ancestor_boxes_last.any(): + ancestor_boxes_new = cl.array.zeros(queue, (tree.nboxes,), + dtype=np.int8) + mark_parent_knl(ancestor_boxes_last, ancestor_boxes_new, + d_box_parent_ids) + ancestor_boxes_new = ancestor_boxes_new & (~ancestor_boxes[rank, :]) + ancestor_boxes[rank, :] = \ + ancestor_boxes[rank, :] | ancestor_boxes_new + ancestor_boxes_last = ancestor_boxes_new + + # }}} + + # Convert src_weights to tree order + src_weights = src_weights[tree.user_source_ids] + src_weights = cl.array.to_device(queue, src_weights) local_src_weights = np.empty((total_rank,), dtype=object) # request objects for non-blocking communication - trav_req = np.empty((total_rank,), dtype=object) + tree_req = np.empty((total_rank,), dtype=object) weight_req = np.empty((total_rank,), dtype=object) for rank in range(total_rank): - local_tree = LocalTree.copy_from_global_tree( - tree, responsible_boxes[rank].get()) - - (local_tree.sources, - local_tree.box_source_starts, - local_tree.box_source_counts_nonchild, - local_tree.box_source_counts_cumul, + local_tree[rank] = LocalTree.copy_from_global_tree( + tree, responsible_boxes_list[rank].get(), + ancestor_boxes[rank].get()) + + (local_tree[rank].sources, + local_tree[rank].box_source_starts, + local_tree[rank].box_source_counts_nonchild, + local_tree[rank].box_source_counts_cumul, local_src_weights[rank]) = \ - gen_local_particles(rank, tree.sources, tree.nsources, + gen_local_particles(queue, tree.sources, tree.nsources, tree, + src_boxes[rank], tree.box_source_starts, tree.box_source_counts_nonchild, tree.box_source_counts_cumul, src_weights) - (local_tree.targets, - local_tree.box_target_starts, - local_tree.box_target_counts_nonchild, - local_tree.box_target_counts_cumul) = \ - gen_local_particles(rank, tree.targets, tree.ntargets, + (local_tree[rank].targets, + local_tree[rank].box_target_starts, + local_tree[rank].box_target_counts_nonchild, + local_tree[rank].box_target_counts_cumul) = \ + gen_local_particles(queue, tree.targets, tree.ntargets, tree, + responsible_boxes_mask[rank], tree.box_target_starts, tree.box_target_counts_nonchild, - tree.box_source_counts_cumul, + tree.box_target_counts_cumul, None) - local_tree.user_source_ids = None - local_tree.sorted_target_ids = None - local_tree.box_flags = None - - local_trav[rank] = traversal.copy() - local_trav[rank].tree = local_tree - local_trav[rank].source_boxes = None - local_trav[rank].target_boxes = None - local_trav[rank].source_parent_boxes = None - local_trav[rank].level_start_source_box_nrs = None - local_trav[rank].level_start_source_parent_box_nrs = None - local_trav[rank].target_or_target_parent_boxes = None - local_trav[rank].level_start_target_box_nrs = None - local_trav[rank].level_start_target_or_target_parent_box_nrs = None - - trav_req[rank] = comm.isend(local_trav[rank], dest=rank, - tag=MPI_Tags.DIST_TRAV) + local_tree[rank].source_radii = None + local_tree[rank].target_radii = None + local_tree[rank].user_source_ids = None + local_tree[rank].sorted_target_ids = None + + tree_req[rank] = comm.isend(local_tree[rank], dest=rank, + tag=MPI_Tags.DIST_TREE) weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, tag=MPI_Tags.DIST_WEIGHT) @@ -472,11 +452,10 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): # Recieve the local trav from root if current_rank == 0: for rank in range(1, total_rank): - trav_req[rank].wait() - local_trav = local_trav[0] + tree_req[rank].wait() + local_tree = local_tree[0] else: - local_trav = comm.recv(source=0, tag=MPI_Tags.DIST_TRAV) - local_tree = local_trav.tree + local_tree = comm.recv(source=0, tag=MPI_Tags.DIST_TREE) # Recieve source weights from root if current_rank == 0: @@ -486,162 +465,82 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): else: local_src_weights = comm.recv(source=0, tag=MPI_Tags.DIST_WEIGHT) - from boxtree.tree import box_flags_enum - traversal_preamble = traversal_preamble_tpl.render( - box_flag_t=dtype_to_ctype(box_flags_enum.dtype), - box_id_t=dtype_to_ctype(local_tree.box_id_dtype), - particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), - box_level_t=dtype_to_ctype(local_tree.box_level_dtype), - HAS_CHILD_SOURCES=box_flags_enum.HAS_CHILD_SOURCES, - HAS_CHILD_TARGETS=box_flags_enum.HAS_CHILD_TARGETS, - HAS_OWN_SOURCES=box_flags_enum.HAS_OWN_SOURCES, - HAS_OWN_TARGETS=box_flags_enum.HAS_OWN_TARGETS + d_tree = local_tree.to_device(queue) + + # Modify box flags for targets + from boxtree import box_flags_enum + box_flag_t = dtype_to_ctype(box_flags_enum.dtype) + modify_target_flags_knl = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${particle_id_t} *box_target_counts_nonchild, + __global ${particle_id_t} *box_target_counts_cumul, + __global ${box_flag_t} *box_flags + """).render(particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), + box_flag_t=box_flag_t), + Template(""" + box_flags[i] &= (~${HAS_OWN_TARGETS}); + box_flags[i] &= (~${HAS_CHILD_TARGETS}); + if(box_target_counts_nonchild[i]) box_flags[i] |= ${HAS_OWN_TARGETS}; + if(box_target_counts_nonchild[i] < box_target_counts_cumul[i]) + box_flags[i] |= ${HAS_CHILD_TARGETS}; + """).render(HAS_OWN_TARGETS=("(" + box_flag_t + ") " + + str(box_flags_enum.HAS_OWN_TARGETS)), + HAS_CHILD_TARGETS=("(" + box_flag_t + ") " + + str(box_flags_enum.HAS_CHILD_TARGETS))) ) - - # {{{ Fetch local tree to device memory - - d_box_source_counts_nonchild = cl.array.to_device( - queue, local_tree.box_source_counts_nonchild) - d_box_source_counts_cumul = cl.array.to_device( - queue, local_tree.box_source_counts_cumul) - d_box_target_counts_nonchild = cl.array.to_device( - queue, local_tree.box_target_counts_nonchild) - d_box_target_counts_cumul = cl.array.to_device( - queue, local_tree.box_target_counts_cumul) - local_tree.box_flags = cl.array.empty(queue, (local_tree.nboxes,), - box_flags_enum.dtype) - d_level_start_box_nrs = cl.array.to_device( - queue, local_tree.level_start_box_nrs) - d_box_levels = cl.array.to_device( - queue, local_tree.box_levels) - - # }}} - - gen_traversal_src = traversal_preamble + gen_traversal_tpl.render() - gen_traversal_prg = cl.Program(ctx, gen_traversal_src).build() - - gen_traversal_prg.generate_tree_flags( - queue, ((local_tree.nboxes + 127) // 128,), (128,), - local_tree.box_flags.data, - d_box_source_counts_nonchild.data, - d_box_source_counts_cumul.data, - d_box_target_counts_nonchild.data, - d_box_target_counts_cumul.data, - g_times_l=True) - - # {{{ - - from pyopencl.algorithm import ListOfListsBuilder - from pyopencl.tools import VectorArg - - sources_parents_and_targets_builder = ListOfListsBuilder( - ctx, - [("source_parent_boxes", local_tree.box_id_dtype), - ("source_boxes", local_tree.box_id_dtype), - ("target_or_target_parent_boxes", local_tree.box_id_dtype)] + ( - [("target_boxes", local_tree.box_id_dtype)] - if not local_tree.sources_are_targets else []), - traversal_preamble + Template(SOURCES_PARENTS_AND_TARGETS_TEMPLATE).render(), - arg_decls=[VectorArg(box_flags_enum.dtype, "box_flags")], - name_prefix="sources_parents_and_targets") - - result, evt = sources_parents_and_targets_builder( - queue, local_tree.nboxes, local_tree.box_flags.data) - - local_trav.source_boxes = result["source_boxes"].lists - if not local_tree.sources_are_targets: - local_trav.target_boxes = result["target_boxes"].lists - else: - local_trav.target_boxes = local_trav.source_boxes - local_trav.source_parent_boxes = result["source_parent_boxes"].lists - local_trav.target_or_target_parent_boxes = \ - result["target_or_target_parent_boxes"].lists - - # }}} - - # {{{ - level_start_box_nrs_extractor = cl.elementwise.ElementwiseTemplate( - arguments="""//CL// - box_id_t *level_start_box_nrs, - box_level_t *box_levels, - box_id_t *box_list, - box_id_t *list_level_start_box_nrs, - """, - - operation=r"""//CL// - // Kernel is ranged so that this is true: - // assert(i > 0); - - box_id_t my_box_id = box_list[i]; - int my_level = box_levels[my_box_id]; - - bool is_level_leading_box; - if (i == 0) - is_level_leading_box = true; - else - { - box_id_t prev_box_id = box_list[i-1]; - box_id_t my_level_start = level_start_box_nrs[my_level]; - - is_level_leading_box = ( - prev_box_id < my_level_start - && my_level_start <= my_box_id); - } - - if (is_level_leading_box) - list_level_start_box_nrs[my_level] = i; - """, - name="extract_level_start_box_nrs").build(ctx, - type_aliases=( - ("box_id_t", local_tree.box_id_dtype), - ("box_level_t", local_tree.box_level_dtype), - ) + modify_target_flags_knl(d_tree.box_target_counts_nonchild, + d_tree.box_target_counts_cumul, + d_tree.box_flags) + + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(queue.context) + d_trav_global, _ = tg(queue, d_tree, debug=True) + trav_global = d_trav_global.get(queue=queue) + + # Source flags + d_tree.box_flags = d_tree.box_flags & 250 + modify_own_sources_knl = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${box_id_t} *responsible_box_list, + __global ${box_flag_t} *box_flags + """).render(box_id_t=dtype_to_ctype(local_tree.box_id_dtype), + box_flag_t=box_flag_t), + Template(r""" + box_flags[responsible_box_list[i]] |= ${HAS_OWN_SOURCES}; + """).render(HAS_OWN_SOURCES=("(" + box_flag_t + ") " + + str(box_flags_enum.HAS_OWN_SOURCES))) ) + modify_child_sources_knl = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global char *ancestor_box_mask, + __global ${box_flag_t} *box_flags + """).render(box_flag_t=box_flag_t), + Template(""" + if(ancestor_box_mask[i]) box_flags[i] |= ${HAS_CHILD_SOURCES}; + """).render(HAS_CHILD_SOURCES=("(" + box_flag_t + ") " + + str(box_flags_enum.HAS_CHILD_SOURCES))) + ) + modify_own_sources_knl(d_tree.responsible_boxes_list, d_tree.box_flags) + modify_child_sources_knl(d_tree.ancestor_mask, d_tree.box_flags) - def extract_level_start_box_nrs(box_list, wait_for): - result = cl.array.empty(queue, local_tree.nlevels + 1, - local_tree.box_id_dtype) \ - .fill(len(box_list)) - evt = level_start_box_nrs_extractor( - d_level_start_box_nrs, - d_box_levels, - box_list, - result, - range=slice(0, len(box_list)), - queue=queue, wait_for=wait_for) - - result = result.get() - - # Postprocess result for unoccupied levels - prev_start = len(box_list) - for ilev in range(local_tree.nlevels-1, -1, -1): - result[ilev] = prev_start = min(result[ilev], prev_start) - - return result, evt - - local_trav.level_start_source_box_nrs, _ = \ - extract_level_start_box_nrs(local_trav.source_boxes, wait_for=[]) - local_trav.level_start_source_parent_box_nrs, _ = \ - extract_level_start_box_nrs(local_trav.source_parent_boxes, wait_for=[]) - local_trav.level_start_target_box_nrs, _ = \ - extract_level_start_box_nrs(local_trav.target_boxes, wait_for=[]) - local_trav.level_start_target_or_target_parent_box_nrs, _ = \ - extract_level_start_box_nrs(local_trav.target_or_target_parent_boxes, - wait_for=[]) - - local_trav = local_trav.get(queue=queue) + d_trav_local, _ = tg(queue, d_tree, debug=True) + trav_local = d_trav_local.get(queue=queue) def fmm_level_to_nterms(tree, level): - return 20 + return 3 wrangler = FMMLibExpansionWrangler( - local_trav.tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) + local_tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) # {{{ "Step 2.1:" Construct local multipoles logger.debug("construct local multipoles") + mpole_exps = wrangler.form_multipoles( - local_trav.level_start_source_box_nrs, - local_trav.source_boxes, + trav_local.level_start_source_box_nrs, + trav_local.source_boxes, local_src_weights) # }}} @@ -650,13 +549,17 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): logger.debug("propagate multipoles upward") wrangler.coarsen_multipoles( - local_trav.level_start_source_parent_box_nrs, - local_trav.source_parent_boxes, + trav_local.level_start_source_parent_box_nrs, + trav_local.source_parent_boxes, mpole_exps) # mpole_exps is called Phi in [1] # }}} + # {{{ Communicate mpole + mpole_exps_all = np.empty_like(mpole_exps) comm.Allreduce(mpole_exps, mpole_exps_all) + + # }}} -- GitLab From e224d27581c98365c3f8100a53f311dd1d639a47 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 26 Nov 2017 21:50:40 -0600 Subject: [PATCH 029/260] Remaining steps of FMM --- boxtree/distributed.py | 220 +++++++++++++++++++++++++++++++++++---- test/test_distributed.py | 21 ++-- 2 files changed, 214 insertions(+), 27 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index b342631..103d680 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -84,6 +84,7 @@ class LocalTree(Tree): class MPI_Tags(): DIST_TREE = 0 DIST_WEIGHT = 1 + GATHER_POTENTIALS = 2 def partition_work(tree, total_rank, queue): @@ -154,7 +155,8 @@ def gen_local_particles(queue, particles, nparticles, tree, box_particle_starts, box_particle_counts_nonchild, box_particle_counts_cumul, - particle_weights=None): + particle_weights=None, + return_mask_scan=False): """ This helper function generates the sources/targets related fields for a local tree @@ -335,17 +337,18 @@ def gen_local_particles(queue, particles, nparticles, tree, # }}} + rtv = (local_particles, + local_box_particle_starts, + local_box_particle_counts_nonchild, + local_box_particle_counts_cumul) + if particle_weights is not None: - return (local_particles, - local_box_particle_starts, - local_box_particle_counts_nonchild, - local_box_particle_counts_cumul, - local_particle_weights.get()) - else: - return (local_particles, - local_box_particle_starts, - local_box_particle_counts_nonchild, - local_box_particle_counts_cumul) + rtv = rtv + (local_particle_weights.get(),) + + if return_mask_scan: + rtv = rtv + (d_particle_mask, d_particle_scan, local_nparticles) + + return rtv def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): @@ -359,8 +362,11 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): # {{{ Construct local tree for each rank on root if current_rank == 0: - local_tree = np.empty((total_rank,), dtype=object) tree = traversal.tree + local_tree = np.empty((total_rank,), dtype=object) + local_target_mask = np.empty((total_rank,), dtype=object) + local_target_scan = np.empty((total_rank,), dtype=object) + local_ntargets = np.empty((total_rank,), dtype=tree.particle_id_dtype) d_box_parent_ids = cl.array.to_device(queue, tree.box_parent_ids) @@ -372,10 +378,54 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): partition_work(tree, total_rank, queue) # In order to evaluate, each rank needs sources in boxes in - # *src_boxes* - src_boxes = responsible_boxes_mask.copy() + # *src_boxes_mask* + src_boxes_mask = responsible_boxes_mask.copy() - # Add list 1 and list 4 of responsible boxes to src_boxes + # Add list 1 and list 4 of responsible boxes to src_boxes_mask + for rank in range(total_rank): + add_interaction_list_boxes = cl.elementwise.ElementwiseKernel( + ctx, + Template(""" + __global ${box_id_t} *box_list, + __global char *responsible_boxes_mask, + __global ${box_id_t} *interaction_boxes_starts, + __global ${box_id_t} *interaction_boxes_lists, + __global char *src_boxes_mask + """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype)), + Template(""" + typedef ${box_id_t} box_id_t; + box_id_t current_box = box_list[i]; + if(responsible_boxes_mask[current_box]) { + for(box_id_t box_idx = interaction_boxes_starts[i]; + box_idx < interaction_boxes_starts[i + 1]; + ++box_idx) + src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; + } + """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype)), + ) + + d_target_boxes = cl.array.to_device(queue, traversal.target_boxes) + d_neighbor_source_boxes_starts = cl.array.to_device( + queue, traversal.neighbor_source_boxes_starts) + d_neighbor_source_boxes_lists = cl.array.to_device( + queue, traversal.neighbor_source_boxes_lists) + add_interaction_list_boxes( + d_target_boxes, responsible_boxes_mask[rank], + d_neighbor_source_boxes_starts, + d_neighbor_source_boxes_lists, src_boxes_mask[rank], + range=range(0, traversal.target_boxes.shape[0])) + + d_target_or_target_parent_boxes = cl.array.to_device( + queue, traversal.target_or_target_parent_boxes) + d_from_sep_bigger_starts = cl.array.to_device( + queue, traversal.from_sep_bigger_starts) + d_from_sep_bigger_lists = cl.array.to_device( + queue, traversal.from_sep_bigger_lists) + add_interaction_list_boxes( + d_target_or_target_parent_boxes, responsible_boxes_mask[rank], + d_from_sep_bigger_starts, d_from_sep_bigger_lists, + src_boxes_mask[rank], + range=range(0, traversal.target_or_target_parent_boxes.shape[0])) # Calculate ancestors of responsible boxes ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), @@ -420,7 +470,7 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): local_tree[rank].box_source_counts_cumul, local_src_weights[rank]) = \ gen_local_particles(queue, tree.sources, tree.nsources, tree, - src_boxes[rank], + src_boxes_mask[rank], tree.box_source_starts, tree.box_source_counts_nonchild, tree.box_source_counts_cumul, @@ -429,13 +479,16 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): (local_tree[rank].targets, local_tree[rank].box_target_starts, local_tree[rank].box_target_counts_nonchild, - local_tree[rank].box_target_counts_cumul) = \ + local_tree[rank].box_target_counts_cumul, + local_target_mask[rank], + local_target_scan[rank], + local_ntargets[rank]) = \ gen_local_particles(queue, tree.targets, tree.ntargets, tree, responsible_boxes_mask[rank], tree.box_target_starts, tree.box_target_counts_nonchild, tree.box_target_counts_cumul, - None) + None, return_mask_scan=True) local_tree[rank].source_radii = None local_tree[rank].target_radii = None @@ -562,4 +615,135 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): mpole_exps_all = np.empty_like(mpole_exps) comm.Allreduce(mpole_exps, mpole_exps_all) + mpole_exps = mpole_exps_all + + # }}} + + # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") + + logger.debug("direct evaluation from neighbor source boxes ('list 1')") + potentials = wrangler.eval_direct( + trav_global.target_boxes, + trav_global.neighbor_source_boxes_starts, + trav_global.neighbor_source_boxes_lists, + local_src_weights) + + # these potentials are called alpha in [1] + # }}} + + # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local + + logger.debug("translate separated siblings' ('list 2') mpoles to local") + local_exps = wrangler.multipole_to_local( + trav_global.level_start_target_or_target_parent_box_nrs, + trav_global.target_or_target_parent_boxes, + trav_global.from_sep_siblings_starts, + trav_global.from_sep_siblings_lists, + mpole_exps) + + # local_exps represents both Gamma and Delta in [1] + + # }}} + + # {{{ "Stage 5:" evaluate sep. smaller mpoles ("list 3") at particles + + logger.debug("evaluate sep. smaller mpoles at particles ('list 3 far')") + + # (the point of aiming this stage at particles is specifically to keep its + # contribution *out* of the downward-propagating local expansions) + + potentials = potentials + wrangler.eval_multipoles( + trav_global.level_start_target_box_nrs, + trav_global.target_boxes, + trav_global.from_sep_smaller_by_level, + mpole_exps) + + # }}} + + # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") + + logger.debug("form locals for separated bigger source boxes ('list 4 far')") + + local_exps = local_exps + wrangler.form_locals( + trav_global.level_start_target_or_target_parent_box_nrs, + trav_global.target_or_target_parent_boxes, + trav_global.from_sep_bigger_starts, + trav_global.from_sep_bigger_lists, + local_src_weights) + + # }}} + + # {{{ "Stage 7:" propagate local_exps downward + + logger.debug("propagate local_exps downward") + + wrangler.refine_locals( + trav_global.level_start_target_or_target_parent_box_nrs, + trav_global.target_or_target_parent_boxes, + local_exps) + + # }}} + + # {{{ "Stage 8:" evaluate locals + + logger.debug("evaluate locals") + + potentials = potentials + wrangler.eval_locals( + trav_global.level_start_target_box_nrs, + trav_global.target_boxes, + local_exps) + + # }}} + + potentials_mpi_type = MPI._typedict[potentials.dtype.char] + if current_rank == 0: + potentials_all_ranks = np.empty((total_rank,), dtype=object) + for i in range(1, total_rank): + potentials_all_ranks[i] = np.empty( + (local_ntargets[i],), dtype=potentials.dtype) + comm.Recv([potentials_all_ranks[i], potentials_mpi_type], + source=i, tag=MPI_Tags.GATHER_POTENTIALS) + else: + comm.Send([potentials, potentials_mpi_type], + dest=0, tag=MPI_Tags.GATHER_POTENTIALS) + + if current_rank == 0: + d_potentials = cl.array.to_device(queue, potentials) + fill_potentials_knl = cl.elementwise.ElementwiseKernel( + ctx, + Template(""" + __global char *particle_mask, + __global ${particle_id_t} *particle_scan, + __global ${potential_t} *local_potentials, + __global ${potential_t} *potentials + """).render( + particle_id_t=dtype_to_ctype(tree.particle_id_dtype), + potential_t=dtype_to_ctype(potentials.dtype)), + """ + if(particle_mask[i]) { + potentials[i] = local_potentials[particle_scan[i]]; + } + """ + ) + + for i in range(1, total_rank): + local_potentials = cl.array.to_device(queue, potentials_all_ranks[i]) + fill_potentials_knl( + local_target_mask[i], local_target_scan[i], + local_potentials, d_potentials) + + potentials = d_potentials.get() + + global_wrangler = FMMLibExpansionWrangler( + tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) + + logger.debug("reorder potentials") + result = global_wrangler.reorder_potentials(potentials) + + logger.debug("finalize potentials") + result = global_wrangler.finalize_potentials(result) + + logger.info("fmm complete") + + return result diff --git a/test/test_distributed.py b/test/test_distributed.py index 9b306f6..47669b6 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -2,11 +2,12 @@ import numpy as np import sys from mpi4py import MPI from boxtree.distributed import drive_dfmm +import numpy.linalg as la # Parameters dims = 2 -nsources = 3000 -ntargets = 1000 +nsources = 100000 +ntargets = 20000 dtype = np.float64 # Get the current rank @@ -47,7 +48,6 @@ if rank == 0: plt.show() # Calculate potentials using direct evaluation - # import numpy.linalg as la # distances = la.norm(sources_host.reshape(1, nsources, 2) - \ # targets_host.reshape(ntargets, 1, 2), # ord=2, axis=2) @@ -61,24 +61,27 @@ if rank == 0: from boxtree.traversal import FMMTraversalBuilder tg = FMMTraversalBuilder(ctx) - trav, _ = tg(queue, tree, debug=True) - trav = trav.get(queue=queue) + d_trav, _ = tg(queue, tree, debug=True) + trav = d_trav.get(queue=queue) # Get pyfmmlib expansion wrangler from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler def fmm_level_to_nterms(tree, level): - return 20 + return 3 wrangler = FMMLibExpansionWrangler( trav.tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) # Compute FMM using shared memory parallelism - # from boxtree.fmm import drive_fmm - # pot_fmm = drive_fmm(trav, wrangler, sources_weights)* 2 * np.pi + from boxtree.fmm import drive_fmm + pot_fmm = drive_fmm(trav, wrangler, sources_weights) * 2 * np.pi # print(la.norm(pot_fmm - pot_naive, ord=2)) # Compute FMM using distributed memory parallelism # Note: The drive_dfmm interface works as follows: # Rank 0 passes the correct trav, wrangler, and sources_weights # All other ranks pass None to these arguments -pot_dfmm = drive_dfmm(trav, wrangler, sources_weights) +pot_dfmm = drive_dfmm(trav, sources_weights) + +if rank == 0: + print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=2)) -- GitLab From c36b00f26271fd6014852c9a4c2a3fde914988b8 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 26 Nov 2017 22:16:15 -0600 Subject: [PATCH 030/260] Fix bug --- boxtree/distributed.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 103d680..45b1283 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -699,6 +699,7 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): potentials_mpi_type = MPI._typedict[potentials.dtype.char] if current_rank == 0: potentials_all_ranks = np.empty((total_rank,), dtype=object) + potentials_all_ranks[0] = potentials for i in range(1, total_rank): potentials_all_ranks[i] = np.empty( (local_ntargets[i],), dtype=potentials.dtype) @@ -709,26 +710,29 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): dest=0, tag=MPI_Tags.GATHER_POTENTIALS) if current_rank == 0: - d_potentials = cl.array.to_device(queue, potentials) + d_potentials = cl.array.empty(queue, (tree.ntargets,), + dtype=potentials.dtype) fill_potentials_knl = cl.elementwise.ElementwiseKernel( ctx, Template(""" - __global char *particle_mask, + __global ${particle_id_t} *particle_mask, __global ${particle_id_t} *particle_scan, __global ${potential_t} *local_potentials, __global ${potential_t} *potentials """).render( particle_id_t=dtype_to_ctype(tree.particle_id_dtype), potential_t=dtype_to_ctype(potentials.dtype)), - """ + r""" + // printf("%d ", particle_mask[i]); if(particle_mask[i]) { potentials[i] = local_potentials[particle_scan[i]]; } """ ) - for i in range(1, total_rank): + for i in range(total_rank): local_potentials = cl.array.to_device(queue, potentials_all_ranks[i]) + print(local_target_mask[i]) fill_potentials_knl( local_target_mask[i], local_target_scan[i], local_potentials, d_potentials) -- GitLab From a06d01354620e8bd23edd01dffdbe547fcadba63 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 27 Nov 2017 10:00:19 -0600 Subject: [PATCH 031/260] Fix bugs --- boxtree/distributed.py | 109 +++++++++++++++++++++------------------ test/test_distributed.py | 4 +- 2 files changed, 62 insertions(+), 51 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 45b1283..36090f7 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -362,6 +362,10 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): # {{{ Construct local tree for each rank on root if current_rank == 0: + import time + start_time = time.time() + print("time start") + tree = traversal.tree local_tree = np.empty((total_rank,), dtype=object) local_target_mask = np.empty((total_rank,), dtype=object) @@ -377,33 +381,55 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): responsible_boxes_mask, responsible_boxes_list = \ partition_work(tree, total_rank, queue) + # Calculate ancestors of responsible boxes + ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), + dtype=np.int8) + for rank in range(total_rank): + ancestor_boxes_last = responsible_boxes_mask[rank, :].copy() + mark_parent_knl = cl.elementwise.ElementwiseKernel( + ctx, + "__global char *current, __global char *parent, " + "__global %s *box_parent_ids" % dtype_to_ctype(tree.box_id_dtype), + "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" + ) + while ancestor_boxes_last.any(): + ancestor_boxes_new = cl.array.zeros(queue, (tree.nboxes,), + dtype=np.int8) + mark_parent_knl(ancestor_boxes_last, ancestor_boxes_new, + d_box_parent_ids) + ancestor_boxes_new = ancestor_boxes_new & (~ancestor_boxes[rank, :]) + ancestor_boxes[rank, :] = \ + ancestor_boxes[rank, :] | ancestor_boxes_new + ancestor_boxes_last = ancestor_boxes_new + # In order to evaluate, each rank needs sources in boxes in # *src_boxes_mask* src_boxes_mask = responsible_boxes_mask.copy() - # Add list 1 and list 4 of responsible boxes to src_boxes_mask - for rank in range(total_rank): - add_interaction_list_boxes = cl.elementwise.ElementwiseKernel( - ctx, - Template(""" - __global ${box_id_t} *box_list, - __global char *responsible_boxes_mask, - __global ${box_id_t} *interaction_boxes_starts, - __global ${box_id_t} *interaction_boxes_lists, - __global char *src_boxes_mask - """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype)), - Template(""" - typedef ${box_id_t} box_id_t; - box_id_t current_box = box_list[i]; - if(responsible_boxes_mask[current_box]) { - for(box_id_t box_idx = interaction_boxes_starts[i]; - box_idx < interaction_boxes_starts[i + 1]; - ++box_idx) - src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; - } - """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype)), - ) + # Add list 1 and list 4 to src_boxes_mask + add_interaction_list_boxes = cl.elementwise.ElementwiseKernel( + ctx, + Template(""" + __global ${box_id_t} *box_list, + __global char *responsible_boxes_mask, + __global ${box_id_t} *interaction_boxes_starts, + __global ${box_id_t} *interaction_boxes_lists, + __global char *src_boxes_mask + """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype)), + Template(r""" + typedef ${box_id_t} box_id_t; + box_id_t current_box = box_list[i]; + if(responsible_boxes_mask[current_box]) { + for(box_id_t box_idx = interaction_boxes_starts[i]; + box_idx < interaction_boxes_starts[i + 1]; + ++box_idx) + src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; + } + """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype)), + ) + for rank in range(total_rank): + # Add list 1 of responsible boxes d_target_boxes = cl.array.to_device(queue, traversal.target_boxes) d_neighbor_source_boxes_starts = cl.array.to_device( queue, traversal.neighbor_source_boxes_starts) @@ -415,6 +441,7 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): d_neighbor_source_boxes_lists, src_boxes_mask[rank], range=range(0, traversal.target_boxes.shape[0])) + # Add list 4 of responsible boxes or ancestor boxes d_target_or_target_parent_boxes = cl.array.to_device( queue, traversal.target_or_target_parent_boxes) d_from_sep_bigger_starts = cl.array.to_device( @@ -422,34 +449,16 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): d_from_sep_bigger_lists = cl.array.to_device( queue, traversal.from_sep_bigger_lists) add_interaction_list_boxes( - d_target_or_target_parent_boxes, responsible_boxes_mask[rank], + d_target_or_target_parent_boxes, + responsible_boxes_mask[rank] | ancestor_boxes[rank], d_from_sep_bigger_starts, d_from_sep_bigger_lists, src_boxes_mask[rank], range=range(0, traversal.target_or_target_parent_boxes.shape[0])) - # Calculate ancestors of responsible boxes - ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), - dtype=np.int8) - for rank in range(total_rank): - ancestor_boxes_last = responsible_boxes_mask[rank, :].copy() - mark_parent_knl = cl.elementwise.ElementwiseKernel( - ctx, - "__global char *current, __global char *parent, " - "__global %s *box_parent_ids" % dtype_to_ctype(tree.box_id_dtype), - "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" - ) - while ancestor_boxes_last.any(): - ancestor_boxes_new = cl.array.zeros(queue, (tree.nboxes,), - dtype=np.int8) - mark_parent_knl(ancestor_boxes_last, ancestor_boxes_new, - d_box_parent_ids) - ancestor_boxes_new = ancestor_boxes_new & (~ancestor_boxes[rank, :]) - ancestor_boxes[rank, :] = \ - ancestor_boxes[rank, :] | ancestor_boxes_new - ancestor_boxes_last = ancestor_boxes_new - # }}} + print("Partition the work " + str(time.time() - start_time)) + # Convert src_weights to tree order src_weights = src_weights[tree.user_source_ids] src_weights = cl.array.to_device(queue, src_weights) @@ -489,7 +498,6 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): tree.box_target_counts_nonchild, tree.box_target_counts_cumul, None, return_mask_scan=True) - local_tree[rank].source_radii = None local_tree[rank].target_radii = None local_tree[rank].user_source_ids = None @@ -500,6 +508,8 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, tag=MPI_Tags.DIST_WEIGHT) + print("Construct local tree " + str(time.time() - start_time)) + # }}} # Recieve the local trav from root @@ -515,6 +525,7 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): for rank in range(1, total_rank): weight_req[rank].wait() local_src_weights = local_src_weights[0] + print("Communicate local tree " + str(time.time() - start_time)) else: local_src_weights = comm.recv(source=0, tag=MPI_Tags.DIST_WEIGHT) @@ -612,7 +623,7 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): # {{{ Communicate mpole - mpole_exps_all = np.empty_like(mpole_exps) + mpole_exps_all = np.zeros_like(mpole_exps) comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all @@ -688,7 +699,6 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): # {{{ "Stage 8:" evaluate locals logger.debug("evaluate locals") - potentials = potentials + wrangler.eval_locals( trav_global.level_start_target_box_nrs, trav_global.target_boxes, @@ -698,6 +708,7 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): potentials_mpi_type = MPI._typedict[potentials.dtype.char] if current_rank == 0: + print("Calculate potentials " + str(time.time() - start_time)) potentials_all_ranks = np.empty((total_rank,), dtype=object) potentials_all_ranks[0] = potentials for i in range(1, total_rank): @@ -705,6 +716,7 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): (local_ntargets[i],), dtype=potentials.dtype) comm.Recv([potentials_all_ranks[i], potentials_mpi_type], source=i, tag=MPI_Tags.GATHER_POTENTIALS) + print("Communicate potentials " + str(time.time() - start_time)) else: comm.Send([potentials, potentials_mpi_type], dest=0, tag=MPI_Tags.GATHER_POTENTIALS) @@ -723,7 +735,6 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): particle_id_t=dtype_to_ctype(tree.particle_id_dtype), potential_t=dtype_to_ctype(potentials.dtype)), r""" - // printf("%d ", particle_mask[i]); if(particle_mask[i]) { potentials[i] = local_potentials[particle_scan[i]]; } @@ -732,7 +743,6 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): for i in range(total_rank): local_potentials = cl.array.to_device(queue, potentials_all_ranks[i]) - print(local_target_mask[i]) fill_potentials_knl( local_target_mask[i], local_target_scan[i], local_potentials, d_potentials) @@ -750,4 +760,5 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): logger.info("fmm complete") + print("Assembly potentials " + str(time.time() - start_time)) return result diff --git a/test/test_distributed.py b/test/test_distributed.py index 47669b6..9589319 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -6,7 +6,7 @@ import numpy.linalg as la # Parameters dims = 2 -nsources = 100000 +nsources = 40000 ntargets = 20000 dtype = np.float64 @@ -84,4 +84,4 @@ if rank == 0: pot_dfmm = drive_dfmm(trav, sources_weights) if rank == 0: - print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=2)) + print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) -- GitLab From ef77b4b373e241471db2f02511b38ba667421cd6 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 28 Nov 2017 10:06:17 -0600 Subject: [PATCH 032/260] Rearrange interfaces --- boxtree/distributed.py | 66 ++++++++++++++++++-------------------- test/test_distributed.py | 69 ++++++++++++++++++++++++++++++++++------ 2 files changed, 91 insertions(+), 44 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 36090f7..78760bb 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -6,7 +6,6 @@ from mako.template import Template from pyopencl.tools import dtype_to_ctype from pyopencl.scan import GenericScanKernel from boxtree import Tree -from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2017 Hao Gao" @@ -34,6 +33,9 @@ THE SOFTWARE. import logging logger = logging.getLogger(__name__) +ctx = cl.create_some_context() +queue = cl.CommandQueue(ctx) + class LocalTree(Tree): @@ -81,7 +83,7 @@ class LocalTree(Tree): return d_tree -class MPI_Tags(): +class MPITags(): DIST_TREE = 0 DIST_WEIGHT = 1 GATHER_POTENTIALS = 2 @@ -351,26 +353,22 @@ def gen_local_particles(queue, particles, nparticles, tree, return rtv -def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): - - # Get MPI and pyopencl information +def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): + # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() - ctx = cl.create_some_context() - queue = cl.CommandQueue(ctx) # {{{ Construct local tree for each rank on root - + local_target = {"mask": None, "scan": None, "size": None} if current_rank == 0: - import time - start_time = time.time() - print("time start") - tree = traversal.tree local_tree = np.empty((total_rank,), dtype=object) local_target_mask = np.empty((total_rank,), dtype=object) local_target_scan = np.empty((total_rank,), dtype=object) local_ntargets = np.empty((total_rank,), dtype=tree.particle_id_dtype) + local_target["mask"] = local_target_mask + local_target["scan"] = local_target_scan + local_target["size"] = local_ntargets d_box_parent_ids = cl.array.to_device(queue, tree.box_parent_ids) @@ -457,8 +455,6 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): # }}} - print("Partition the work " + str(time.time() - start_time)) - # Convert src_weights to tree order src_weights = src_weights[tree.user_source_ids] src_weights = cl.array.to_device(queue, src_weights) @@ -504,11 +500,9 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): local_tree[rank].sorted_target_ids = None tree_req[rank] = comm.isend(local_tree[rank], dest=rank, - tag=MPI_Tags.DIST_TREE) + tag=MPITags.DIST_TREE) weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, - tag=MPI_Tags.DIST_WEIGHT) - - print("Construct local tree " + str(time.time() - start_time)) + tag=MPITags.DIST_WEIGHT) # }}} @@ -518,17 +512,20 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): tree_req[rank].wait() local_tree = local_tree[0] else: - local_tree = comm.recv(source=0, tag=MPI_Tags.DIST_TREE) + local_tree = comm.recv(source=0, tag=MPITags.DIST_TREE) # Recieve source weights from root if current_rank == 0: for rank in range(1, total_rank): weight_req[rank].wait() local_src_weights = local_src_weights[0] - print("Communicate local tree " + str(time.time() - start_time)) else: - local_src_weights = comm.recv(source=0, tag=MPI_Tags.DIST_WEIGHT) + local_src_weights = comm.recv(source=0, tag=MPITags.DIST_WEIGHT) + + return local_tree, local_src_weights, local_target + +def generate_local_travs(local_tree, local_src_weights, comm=MPI.COMM_WORLD): d_tree = local_tree.to_device(queue) # Modify box flags for targets @@ -593,10 +590,15 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): d_trav_local, _ = tg(queue, d_tree, debug=True) trav_local = d_trav_local.get(queue=queue) - def fmm_level_to_nterms(tree, level): - return 3 - wrangler = FMMLibExpansionWrangler( - local_tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) + return trav_local, trav_global + + +def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wrangler, + local_target_mask, local_target_scan, local_ntargets, + comm=MPI.COMM_WORLD): + # Get MPI information + current_rank = comm.Get_rank() + total_rank = comm.Get_size() # {{{ "Step 2.1:" Construct local multipoles @@ -708,21 +710,19 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): potentials_mpi_type = MPI._typedict[potentials.dtype.char] if current_rank == 0: - print("Calculate potentials " + str(time.time() - start_time)) potentials_all_ranks = np.empty((total_rank,), dtype=object) potentials_all_ranks[0] = potentials for i in range(1, total_rank): potentials_all_ranks[i] = np.empty( (local_ntargets[i],), dtype=potentials.dtype) comm.Recv([potentials_all_ranks[i], potentials_mpi_type], - source=i, tag=MPI_Tags.GATHER_POTENTIALS) - print("Communicate potentials " + str(time.time() - start_time)) + source=i, tag=MPITags.GATHER_POTENTIALS) else: comm.Send([potentials, potentials_mpi_type], - dest=0, tag=MPI_Tags.GATHER_POTENTIALS) + dest=0, tag=MPITags.GATHER_POTENTIALS) if current_rank == 0: - d_potentials = cl.array.empty(queue, (tree.ntargets,), + d_potentials = cl.array.empty(queue, (global_wrangler.tree.ntargets,), dtype=potentials.dtype) fill_potentials_knl = cl.elementwise.ElementwiseKernel( ctx, @@ -732,7 +732,7 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): __global ${potential_t} *local_potentials, __global ${potential_t} *potentials """).render( - particle_id_t=dtype_to_ctype(tree.particle_id_dtype), + particle_id_t=dtype_to_ctype(global_wrangler.tree.particle_id_dtype), potential_t=dtype_to_ctype(potentials.dtype)), r""" if(particle_mask[i]) { @@ -749,9 +749,6 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): potentials = d_potentials.get() - global_wrangler = FMMLibExpansionWrangler( - tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) - logger.debug("reorder potentials") result = global_wrangler.reorder_potentials(potentials) @@ -760,5 +757,4 @@ def drive_dfmm(traversal, src_weights, comm=MPI.COMM_WORLD): logger.info("fmm complete") - print("Assembly potentials " + str(time.time() - start_time)) return result diff --git a/test/test_distributed.py b/test/test_distributed.py index 9589319..9a05638 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,13 +1,17 @@ import numpy as np import sys from mpi4py import MPI -from boxtree.distributed import drive_dfmm +from boxtree.distributed import generate_local_tree, generate_local_travs, drive_dfmm import numpy.linalg as la +from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler + +import time +print("program start") # Parameters dims = 2 -nsources = 40000 -ntargets = 20000 +nsources = 100000 +ntargets = 50000 dtype = np.float64 # Get the current rank @@ -21,6 +25,8 @@ wrangler = None # Generate particles and run shared-memory parallelism on rank 0 if rank == 0: + last_time = time.time() + # Configure PyOpenCL import pyopencl as cl ctx = cl.create_some_context() @@ -47,6 +53,10 @@ if rank == 0: plt.plot(targets_host[:, 0], targets_host[:, 1], "ro") plt.show() + now = time.time() + print("Generate particles " + str(now - last_time)) + last_time = now + # Calculate potentials using direct evaluation # distances = la.norm(sources_host.reshape(1, nsources, 2) - \ # targets_host.reshape(ntargets, 1, 2), @@ -59,14 +69,20 @@ if rank == 0: tree, _ = tb(queue, sources, targets=targets, max_particles_in_box=30, debug=True) + now = time.time() + print("Generate tree " + str(now - last_time)) + last_time = now + from boxtree.traversal import FMMTraversalBuilder tg = FMMTraversalBuilder(ctx) d_trav, _ = tg(queue, tree, debug=True) trav = d_trav.get(queue=queue) - # Get pyfmmlib expansion wrangler - from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler + now = time.time() + print("Generate traversal " + str(now - last_time)) + last_time = now + # Get pyfmmlib expansion wrangler def fmm_level_to_nterms(tree, level): return 3 wrangler = FMMLibExpansionWrangler( @@ -75,13 +91,48 @@ if rank == 0: # Compute FMM using shared memory parallelism from boxtree.fmm import drive_fmm pot_fmm = drive_fmm(trav, wrangler, sources_weights) * 2 * np.pi + + now = time.time() + print("Shared memory FMM " + str(now - last_time)) # print(la.norm(pot_fmm - pot_naive, ord=2)) +last_time = time.time() + # Compute FMM using distributed memory parallelism -# Note: The drive_dfmm interface works as follows: -# Rank 0 passes the correct trav, wrangler, and sources_weights -# All other ranks pass None to these arguments -pot_dfmm = drive_dfmm(trav, sources_weights) +local_tree, local_src_weights, local_target = \ + generate_local_tree(trav, sources_weights) + +now = time.time() +print("Generate local tree " + str(now - last_time)) +last_time = now + +trav_local, trav_global = generate_local_travs(local_tree, local_src_weights) + +now = time.time() +print("Generate local trav " + str(now - last_time)) +last_time = now + + +def fmm_level_to_nterms(tree, level): + return 3 + + +local_wrangler = FMMLibExpansionWrangler( + local_tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) +if rank == 0: + global_wrangler = FMMLibExpansionWrangler( + trav.tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) +else: + global_wrangler = None + +pot_dfmm = drive_dfmm( + local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, + local_target["mask"], local_target["scan"], local_target["size"] +) + +now = time.time() +print("Distributed FMM " + str(now - last_time)) +last_time = now if rank == 0: print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) -- GitLab From 09c20b067948d32c6aa1e2790783597fd55ee6f2 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 30 Nov 2017 11:10:36 -0600 Subject: [PATCH 033/260] Add more timing --- boxtree/distributed.py | 24 ++++++++++++++++++++++++ test/test_distributed.py | 13 ++++++++++--- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 78760bb..89a9207 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -35,6 +35,11 @@ logger = logging.getLogger(__name__) ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) +print("Process %d of %d on %s with ctx %s.\n" % ( + MPI.COMM_WORLD.Get_rank(), + MPI.COMM_WORLD.Get_size(), + MPI.Get_processor_name(), + queue.context.devices)) class LocalTree(Tree): @@ -600,6 +605,9 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran current_rank = comm.Get_rank() total_rank = comm.Get_size() + import time + last_time = time.time() + # {{{ "Step 2.1:" Construct local multipoles logger.debug("construct local multipoles") @@ -623,6 +631,10 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} + now = time.time() + print("Step 1 and Step 2 " + str(now - last_time)) + last_time = now + # {{{ Communicate mpole mpole_exps_all = np.zeros_like(mpole_exps) @@ -632,6 +644,10 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} + now = time.time() + print("Communication " + str(now - last_time)) + last_time = now + # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") logger.debug("direct evaluation from neighbor source boxes ('list 1')") @@ -708,6 +724,10 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} + now = time.time() + print("Step 3-8 " + str(now - last_time)) + last_time = now + potentials_mpi_type = MPI._typedict[potentials.dtype.char] if current_rank == 0: potentials_all_ranks = np.empty((total_rank,), dtype=object) @@ -757,4 +777,8 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran logger.info("fmm complete") + now = time.time() + print("Assemble result " + str(now - last_time)) + last_time = now + return result diff --git a/test/test_distributed.py b/test/test_distributed.py index 9a05638..8c94fa7 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -10,8 +10,8 @@ print("program start") # Parameters dims = 2 -nsources = 100000 -ntargets = 50000 +nsources = 1000000 +ntargets = 500000 dtype = np.float64 # Get the current rank @@ -31,6 +31,7 @@ if rank == 0: import pyopencl as cl ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) + print(queue.context.devices) # Generate random particles and source weights from boxtree.tools import make_normal_particle_array as p_normal @@ -96,7 +97,8 @@ if rank == 0: print("Shared memory FMM " + str(now - last_time)) # print(la.norm(pot_fmm - pot_naive, ord=2)) -last_time = time.time() +comm.barrier() +start_time = last_time = time.time() # Compute FMM using distributed memory parallelism local_tree, local_src_weights, local_target = \ @@ -125,6 +127,10 @@ if rank == 0: else: global_wrangler = None +print(trav_global.target_boxes.shape[0]) +print(trav_global.source_boxes.shape[0]) +print(local_tree.nboxes) + pot_dfmm = drive_dfmm( local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, local_target["mask"], local_target["scan"], local_target["size"] @@ -136,3 +142,4 @@ last_time = now if rank == 0: print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) + print("Total time " + str(time.time() - start_time)) -- GitLab From e2e199c41148855ce0006d3bc4a3a360b9b0df5b Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 1 Dec 2017 21:49:26 -0600 Subject: [PATCH 034/260] Use Elementwise kernel for local particle generation --- boxtree/distributed.py | 117 +++++++++++++++-------------------------- 1 file changed, 43 insertions(+), 74 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 89a9207..5f1d7e2 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -122,46 +122,12 @@ def partition_work(tree, total_rank, queue): return responsible_boxes_mask, responsible_boxes_list -gen_local_tree_tpl = Template(r""" -typedef ${dtype_to_ctype(tree.box_id_dtype)} box_id_t; -typedef ${dtype_to_ctype(tree.particle_id_dtype)} particle_id_t; -typedef ${dtype_to_ctype(mask_dtype)} mask_t; -typedef ${dtype_to_ctype(tree.coord_dtype)} coord_t; - -__kernel void generate_local_particles( - const int total_num_particles, - % for dim in range(ndims): - __global const coord_t *particles_${dim}, - % endfor - __global const mask_t *particle_mask, - __global const mask_t *particle_scan - % for dim in range(ndims): - , __global coord_t *local_particles_${dim} - % endfor -) -{ - /* - * generate_local_particles generates an array of particles for which a process - * is responsible for. - */ - int particle_idx = get_global_id(0); - - if(particle_idx < total_num_particles && particle_mask[particle_idx]) - { - particle_id_t des = particle_scan[particle_idx]; - % for dim in range(ndims): - local_particles_${dim}[des] = particles_${dim}[particle_idx]; - % endfor - } -} -""", strict_undefined=True) - - def gen_local_particles(queue, particles, nparticles, tree, responsible_boxes, box_particle_starts, box_particle_counts_nonchild, box_particle_counts_cumul, + particle_radii=None, particle_weights=None, return_mask_scan=False): """ @@ -227,29 +193,51 @@ def gen_local_particles(queue, particles, nparticles, tree, d_paticles_list = d_particles.tolist() for i in range(tree.dimensions): - d_paticles_list[i] = d_paticles_list[i].data + d_paticles_list[i] = d_paticles_list[i] d_local_particles_list = d_local_particles.tolist() for i in range(tree.dimensions): - d_local_particles_list[i] = d_local_particles_list[i].data + d_local_particles_list[i] = d_local_particles_list[i] - gen_local_tree_prg = cl.Program( - queue.context, - gen_local_tree_tpl.render( - tree=tree, - dtype_to_ctype=dtype_to_ctype, - mask_dtype=tree.particle_id_dtype, - ndims=tree.dimensions + fetch_local_particles_knl = cl.elementwise.ElementwiseKernel( + ctx, + Template(""" + __global const ${mask_t} *particle_mask, + __global const ${mask_t} *particle_scan + % for dim in range(ndims): + , __global const ${coord_t} *particles_${dim} + % endfor + % if particles_have_extent: + , __global const ${coord_t} *particle_radii + , __global ${coord_t} *local_particle_radii + % endif + % for dim in range(ndims): + , __global ${coord_t} *local_particles_${dim} + % endfor + """, strict_undefined=True).render( + mask_t=dtype_to_ctype(tree.particle_id_dtype), + coord_t=dtype_to_ctype(tree.coord_dtype), + ndims=tree.dimensions, + particles_have_extent=(particle_radii is not None) + ), + Template(""" + if(particle_mask[i]) { + ${particle_id_t} des = particle_scan[i]; + % for dim in range(ndims): + local_particles_${dim}[des] = particles_${dim}[i]; + % endfor + % if particles_have_extent: + local_particle_radii[des] = particle_radii[i]; + % endif + } + """, strict_undefined=True).render( + particle_id_t=dtype_to_ctype(tree.particle_id_dtype), + ndims=tree.dimensions, + particles_have_extent=(particle_radii is not None) ) - ).build() + ) - gen_local_tree_prg.generate_local_particles( - queue, ((nparticles + 127) // 128,), (128,), - np.int32(nparticles), - *d_paticles_list, - d_particle_mask.data, - d_particle_scan.data, - *d_local_particles_list, - g_times_l=True) + fetch_local_particles_knl(d_particle_mask, d_particle_scan, + *d_paticles_list, *d_local_particles_list) # Generate "box_particle_starts" of the local tree local_box_particle_starts = cl.array.empty(queue, (tree.nboxes,), @@ -484,7 +472,7 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): tree.box_source_starts, tree.box_source_counts_nonchild, tree.box_source_counts_cumul, - src_weights) + None, src_weights) (local_tree[rank].targets, local_tree[rank].box_target_starts, @@ -498,7 +486,7 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): tree.box_target_starts, tree.box_target_counts_nonchild, tree.box_target_counts_cumul, - None, return_mask_scan=True) + None, None, return_mask_scan=True) local_tree[rank].source_radii = None local_tree[rank].target_radii = None local_tree[rank].user_source_ids = None @@ -605,9 +593,6 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran current_rank = comm.Get_rank() total_rank = comm.Get_size() - import time - last_time = time.time() - # {{{ "Step 2.1:" Construct local multipoles logger.debug("construct local multipoles") @@ -631,10 +616,6 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} - now = time.time() - print("Step 1 and Step 2 " + str(now - last_time)) - last_time = now - # {{{ Communicate mpole mpole_exps_all = np.zeros_like(mpole_exps) @@ -644,10 +625,6 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} - now = time.time() - print("Communication " + str(now - last_time)) - last_time = now - # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") logger.debug("direct evaluation from neighbor source boxes ('list 1')") @@ -724,10 +701,6 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} - now = time.time() - print("Step 3-8 " + str(now - last_time)) - last_time = now - potentials_mpi_type = MPI._typedict[potentials.dtype.char] if current_rank == 0: potentials_all_ranks = np.empty((total_rank,), dtype=object) @@ -777,8 +750,4 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran logger.info("fmm complete") - now = time.time() - print("Assemble result " + str(now - last_time)) - last_time = now - return result -- GitLab From 521fe68512d4c72622665ca1e65750d6554fc347 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 2 Dec 2017 16:36:03 -0600 Subject: [PATCH 035/260] Pass radii to the local tree --- boxtree/distributed.py | 45 ++++++++++++++++++++++++++++++++-------- test/test_distributed.py | 21 +++++++++---------- 2 files changed, 46 insertions(+), 20 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 5f1d7e2..b381e19 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -85,6 +85,12 @@ class LocalTree(Tree): else: d_tree.__setattr__( field, cl.array.to_device(queue, current_obj)) + + if self.sources_have_extent: + d_tree.source_radii = cl.array.to_device(queue, d_tree.source_radii) + if self.targets_have_extent: + d_tree.target_radii = cl.array.to_device(queue, d_tree.target_radii) + return d_tree @@ -206,13 +212,13 @@ def gen_local_particles(queue, particles, nparticles, tree, % for dim in range(ndims): , __global const ${coord_t} *particles_${dim} % endfor + % for dim in range(ndims): + , __global ${coord_t} *local_particles_${dim} + % endfor % if particles_have_extent: , __global const ${coord_t} *particle_radii , __global ${coord_t} *local_particle_radii % endif - % for dim in range(ndims): - , __global ${coord_t} *local_particles_${dim} - % endfor """, strict_undefined=True).render( mask_t=dtype_to_ctype(tree.particle_id_dtype), coord_t=dtype_to_ctype(tree.coord_dtype), @@ -236,8 +242,16 @@ def gen_local_particles(queue, particles, nparticles, tree, ) ) - fetch_local_particles_knl(d_particle_mask, d_particle_scan, - *d_paticles_list, *d_local_particles_list) + if particle_radii is None: + fetch_local_particles_knl(d_particle_mask, d_particle_scan, + *d_paticles_list, *d_local_particles_list) + else: + d_particle_radii = cl.array.to_device(queue, particle_radii) + d_local_particle_radii = cl.array.empty(queue, (local_nparticles,), + dtype=tree.coord_dtype) + fetch_local_particles_knl(d_particle_mask, d_particle_scan, + *d_paticles_list, *d_local_particles_list, + d_particle_radii, d_local_particle_radii) # Generate "box_particle_starts" of the local tree local_box_particle_starts = cl.array.empty(queue, (tree.nboxes,), @@ -337,6 +351,9 @@ def gen_local_particles(queue, particles, nparticles, tree, local_box_particle_counts_nonchild, local_box_particle_counts_cumul) + if particle_radii is not None: + rtv = rtv + (d_local_particle_radii.get(),) + if particle_weights is not None: rtv = rtv + (local_particle_weights.get(),) @@ -457,6 +474,16 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): tree_req = np.empty((total_rank,), dtype=object) weight_req = np.empty((total_rank,), dtype=object) + if tree.sources_have_extent: + source_radii = tree.source_radii + else: + source_radii = None + + if tree.targets_have_extent: + target_radii = tree.target_radii + else: + target_radii = None + for rank in range(total_rank): local_tree[rank] = LocalTree.copy_from_global_tree( tree, responsible_boxes_list[rank].get(), @@ -472,12 +499,13 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): tree.box_source_starts, tree.box_source_counts_nonchild, tree.box_source_counts_cumul, - None, src_weights) + source_radii, src_weights) (local_tree[rank].targets, local_tree[rank].box_target_starts, local_tree[rank].box_target_counts_nonchild, local_tree[rank].box_target_counts_cumul, + local_tree[rank].target_radii, local_target_mask[rank], local_target_scan[rank], local_ntargets[rank]) = \ @@ -486,9 +514,8 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): tree.box_target_starts, tree.box_target_counts_nonchild, tree.box_target_counts_cumul, - None, None, return_mask_scan=True) - local_tree[rank].source_radii = None - local_tree[rank].target_radii = None + target_radii, None, return_mask_scan=True) + local_tree[rank].user_source_ids = None local_tree[rank].sorted_target_ids = None diff --git a/test/test_distributed.py b/test/test_distributed.py index 8c94fa7..dc6095d 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -4,14 +4,12 @@ from mpi4py import MPI from boxtree.distributed import generate_local_tree, generate_local_travs, drive_dfmm import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler - import time -print("program start") # Parameters dims = 2 -nsources = 1000000 -ntargets = 500000 +nsources = 50000 +ntargets = 50000 dtype = np.float64 # Get the current rank @@ -23,6 +21,7 @@ trav = None sources_weights = None wrangler = None + # Generate particles and run shared-memory parallelism on rank 0 if rank == 0: last_time = time.time() @@ -47,6 +46,10 @@ if rank == 0: rng = PhiloxGenerator(queue.context, seed=20) sources_weights = rng.uniform(queue, nsources, dtype=np.float64).get() + from pyopencl.clrandom import PhiloxGenerator + rng = PhiloxGenerator(queue.context, seed=22) + target_radii = rng.uniform(queue, ntargets, a=0, b=0.25, dtype=np.float64).get() + # Display sources and targets if "--display" in sys.argv: import matplotlib.pyplot as plt @@ -67,8 +70,8 @@ if rank == 0: # Build the tree and interaction lists from boxtree import TreeBuilder tb = TreeBuilder(ctx) - tree, _ = tb(queue, sources, targets=targets, max_particles_in_box=30, - debug=True) + tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, + stick_out_factor=0.25, max_particles_in_box=30, debug=True) now = time.time() print("Generate tree " + str(now - last_time)) @@ -127,10 +130,6 @@ if rank == 0: else: global_wrangler = None -print(trav_global.target_boxes.shape[0]) -print(trav_global.source_boxes.shape[0]) -print(local_tree.nboxes) - pot_dfmm = drive_dfmm( local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, local_target["mask"], local_target["scan"], local_target["size"] @@ -141,5 +140,5 @@ print("Distributed FMM " + str(now - last_time)) last_time = now if rank == 0: - print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) print("Total time " + str(time.time() - start_time)) + print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) -- GitLab From e3652cb0b404b35f123e1fcc72d6d8afe5dccd5a Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 3 Dec 2017 21:16:26 -0600 Subject: [PATCH 036/260] Support target with scale --- boxtree/distributed.py | 81 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 72 insertions(+), 9 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index b381e19..39fdcdc 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -153,6 +153,7 @@ def gen_local_particles(queue, particles, nparticles, tree, # Generate the particle mask array d_particle_mask = cl.array.zeros(queue, (nparticles,), dtype=tree.particle_id_dtype) + particle_mask_knl = cl.elementwise.ElementwiseKernel( queue.context, arguments=Template(""" @@ -160,7 +161,9 @@ def gen_local_particles(queue, particles, nparticles, tree, __global ${particle_id_t} *box_particle_starts, __global ${particle_id_t} *box_particle_counts_nonchild, __global ${particle_id_t} *particle_mask - """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), + """, strict_undefined=True).render( + particle_id_t=dtype_to_ctype(tree.particle_id_dtype) + ), operation=Template(""" if(responsible_boxes[i]) { for(${particle_id_t} pid = box_particle_starts[i]; @@ -180,7 +183,9 @@ def gen_local_particles(queue, particles, nparticles, tree, arguments=Template(""" __global ${mask_t} *ary, __global ${mask_t} *scan - """).render(mask_t=dtype_to_ctype(tree.particle_id_dtype)), + """, strict_undefined=True).render( + mask_t=dtype_to_ctype(tree.particle_id_dtype) + ), input_expr="ary[i]", scan_expr="a+b", neutral="0", output_statement="scan[i + 1] = item;" @@ -263,7 +268,9 @@ def gen_local_particles(queue, particles, nparticles, tree, __global ${particle_id_t} *old_starts, __global ${particle_id_t} *particle_scan, __global ${particle_id_t} *new_starts - """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), + """, strict_undefined=True).render( + particle_id_t=dtype_to_ctype(tree.particle_id_dtype) + ), "new_starts[i] = particle_scan[old_starts[i]]", name="generate_box_particle_starts" ) @@ -281,7 +288,9 @@ def gen_local_particles(queue, particles, nparticles, tree, __global char *res_boxes, __global ${particle_id_t} *old_counts_nonchild, __global ${particle_id_t} *new_counts_nonchild - """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), + """, strict_undefined=True).render( + particle_id_t=dtype_to_ctype(tree.particle_id_dtype) + ), "if(res_boxes[i]) new_counts_nonchild[i] = old_counts_nonchild[i];" ) @@ -300,7 +309,9 @@ def gen_local_particles(queue, particles, nparticles, tree, __global ${particle_id_t} *old_starts, __global ${particle_id_t} *new_counts_cumul, __global ${particle_id_t} *particle_scan - """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)), + """, strict_undefined=True).render( + particle_id_t=dtype_to_ctype(tree.particle_id_dtype) + ), """ new_counts_cumul[i] = particle_scan[old_starts[i] + old_counts_cumul[i]] - @@ -331,7 +342,7 @@ def gen_local_particles(queue, particles, nparticles, tree, __global ${particle_id_t} *particle_mask, __global ${particle_id_t} *particle_scan, __global ${weight_t} *local_weights - """).render( + """, strict_undefined=True).render( weight_t=dtype_to_ctype(particle_weights.dtype), particle_id_t=dtype_to_ctype(tree.particle_id_dtype) ), @@ -423,7 +434,9 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): __global ${box_id_t} *interaction_boxes_starts, __global ${box_id_t} *interaction_boxes_lists, __global char *src_boxes_mask - """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype)), + """, strict_undefined=True).render( + box_id_t=dtype_to_ctype(tree.box_id_dtype) + ), Template(r""" typedef ${box_id_t} box_id_t; box_id_t current_box = box_list[i]; @@ -433,7 +446,9 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): ++box_idx) src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; } - """).render(box_id_t=dtype_to_ctype(tree.box_id_dtype)), + """, strict_undefined=True).render( + box_id_t=dtype_to_ctype(tree.box_id_dtype) + ), ) for rank in range(total_rank): @@ -463,6 +478,33 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): src_boxes_mask[rank], range=range(0, traversal.target_or_target_parent_boxes.shape[0])) + if tree.targets_have_extent: + d_from_sep_close_bigger_starts = cl.array.to_device( + queue, traversal.from_sep_close_bigger_starts) + d_from_sep_close_bigger_lists = cl.array.to_device( + queue, traversal.from_sep_close_bigger_lists) + add_interaction_list_boxes( + d_target_or_target_parent_boxes, + responsible_boxes_mask[rank] | ancestor_boxes[rank], + d_from_sep_close_bigger_starts, + d_from_sep_close_bigger_lists, + src_boxes_mask[rank] + ) + + # Add list 3 direct + d_from_sep_close_smaller_starts = cl.array.to_device( + queue, traversal.from_sep_close_smaller_starts) + d_from_sep_close_smaller_lists = cl.array.to_device( + queue, traversal.from_sep_close_smaller_lists) + + add_interaction_list_boxes( + d_target_boxes, + responsible_boxes_mask[rank], + d_from_sep_close_smaller_starts, + d_from_sep_close_smaller_lists, + src_boxes_mask[rank] + ) + # }}} # Convert src_weights to tree order @@ -568,7 +610,7 @@ def generate_local_travs(local_tree, local_src_weights, comm=MPI.COMM_WORLD): """).render(HAS_OWN_TARGETS=("(" + box_flag_t + ") " + str(box_flags_enum.HAS_OWN_TARGETS)), HAS_CHILD_TARGETS=("(" + box_flag_t + ") " + - str(box_flags_enum.HAS_CHILD_TARGETS))) + str(box_flags_enum.HAS_CHILD_TARGETS))) ) modify_target_flags_knl(d_tree.box_target_counts_nonchild, d_tree.box_target_counts_cumul, @@ -692,6 +734,17 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran trav_global.from_sep_smaller_by_level, mpole_exps) + # these potentials are called beta in [1] + + if trav_global.from_sep_close_smaller_starts is not None: + logger.debug("evaluate separated close smaller interactions directly " + "('list 3 close')") + potentials = potentials + wrangler.eval_direct( + trav_global.target_boxes, + trav_global.from_sep_close_smaller_starts, + trav_global.from_sep_close_smaller_lists, + local_src_weights) + # }}} # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") @@ -705,6 +758,16 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran trav_global.from_sep_bigger_lists, local_src_weights) + if trav_global.from_sep_close_bigger_starts is not None: + logger.debug("evaluate separated close bigger interactions directly " + "('list 4 close')") + + potentials = potentials + wrangler.eval_direct( + trav_global.target_or_target_parent_boxes, + trav_global.from_sep_close_bigger_starts, + trav_global.from_sep_close_bigger_lists, + local_src_weights) + # }}} # {{{ "Stage 7:" propagate local_exps downward -- GitLab From a48a058fe02685bc11c3d6d1a1f43c168aa0d8bf Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 8 Dec 2017 10:03:58 -0600 Subject: [PATCH 037/260] Add an argument for traversal builder to use existing box extent --- boxtree/distributed.py | 46 ++++++++++-- boxtree/traversal.py | 154 +++++++++++++++++++++------------------ test/test_distributed.py | 5 +- 3 files changed, 125 insertions(+), 80 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 39fdcdc..abe3be6 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -334,7 +334,7 @@ def gen_local_particles(queue, particles, nparticles, tree, # {{{ Generate source weights if particle_weights is not None: local_particle_weights = cl.array.empty(queue, (local_nparticles,), - dtype=particle_weights.dtype) + dtype=particle_weights.dtype) gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( queue.context, arguments=Template(""" @@ -584,10 +584,35 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): else: local_src_weights = comm.recv(source=0, tag=MPITags.DIST_WEIGHT) - return local_tree, local_src_weights, local_target + rtv = (local_tree, local_src_weights, local_target) + + # Recieve box extent + if local_tree.targets_have_extent: + if current_rank == 0: + box_target_bounding_box_min = traversal.box_target_bounding_box_min + box_target_bounding_box_max = traversal.box_target_bounding_box_max + else: + box_target_bounding_box_min = np.empty( + (local_tree.dimensions, local_tree.aligned_nboxes), + dtype=local_tree.coord_dtype + ) + box_target_bounding_box_max = np.empty( + (local_tree.dimensions, local_tree.aligned_nboxes), + dtype=local_tree.coord_dtype + ) + comm.Bcast(box_target_bounding_box_min, root=0) + comm.Bcast(box_target_bounding_box_max, root=0) + box_bounding_box = { + "min": box_target_bounding_box_min, + "max": box_target_bounding_box_max + } + rtv += (box_bounding_box,) + + return rtv -def generate_local_travs(local_tree, local_src_weights, comm=MPI.COMM_WORLD): +def generate_local_travs(local_tree, local_src_weights, box_bounding_box=None, + comm=MPI.COMM_WORLD): d_tree = local_tree.to_device(queue) # Modify box flags for targets @@ -618,7 +643,8 @@ def generate_local_travs(local_tree, local_src_weights, comm=MPI.COMM_WORLD): from boxtree.traversal import FMMTraversalBuilder tg = FMMTraversalBuilder(queue.context) - d_trav_global, _ = tg(queue, d_tree, debug=True) + d_trav_global, _ = tg(queue, d_tree, debug=True, + box_bounding_box=box_bounding_box) trav_global = d_trav_global.get(queue=queue) # Source flags @@ -633,7 +659,7 @@ def generate_local_travs(local_tree, local_src_weights, comm=MPI.COMM_WORLD): Template(r""" box_flags[responsible_box_list[i]] |= ${HAS_OWN_SOURCES}; """).render(HAS_OWN_SOURCES=("(" + box_flag_t + ") " + - str(box_flags_enum.HAS_OWN_SOURCES))) + str(box_flags_enum.HAS_OWN_SOURCES))) ) modify_child_sources_knl = cl.elementwise.ElementwiseKernel( queue.context, @@ -649,7 +675,8 @@ def generate_local_travs(local_tree, local_src_weights, comm=MPI.COMM_WORLD): modify_own_sources_knl(d_tree.responsible_boxes_list, d_tree.box_flags) modify_child_sources_knl(d_tree.ancestor_mask, d_tree.box_flags) - d_trav_local, _ = tg(queue, d_tree, debug=True) + d_trav_local, _ = tg(queue, d_tree, debug=True, + box_bounding_box=box_bounding_box) trav_local = d_trav_local.get(queue=queue) return trav_local, trav_global @@ -664,6 +691,7 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # {{{ "Step 2.1:" Construct local multipoles + import time logger.debug("construct local multipoles") mpole_exps = wrangler.form_multipoles( @@ -686,15 +714,18 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} # {{{ Communicate mpole + last_time = time.time() mpole_exps_all = np.zeros_like(mpole_exps) comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all + print("Communication: " + str(time.time()-last_time)) # }}} # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") + last_time = time.time() logger.debug("direct evaluation from neighbor source boxes ('list 1')") potentials = wrangler.eval_direct( @@ -704,10 +735,12 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran local_src_weights) # these potentials are called alpha in [1] + print("Step 3: " + str(time.time()-last_time)) # }}} # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local + last_time = time.time() logger.debug("translate separated siblings' ('list 2') mpoles to local") local_exps = wrangler.multipole_to_local( @@ -718,6 +751,7 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran mpole_exps) # local_exps represents both Gamma and Delta in [1] + print("Step 4: " + str(time.time()-last_time)) # }}} diff --git a/boxtree/traversal.py b/boxtree/traversal.py index a35c60d..16e49f9 100644 --- a/boxtree/traversal.py +++ b/boxtree/traversal.py @@ -759,6 +759,7 @@ void generate(LIST_ARG_DECL USER_ARG_DECL box_id_t target_box_number) %elif from_sep_smaller_crit == "precise_linf": { + coord_t source_rad = LEVEL_TO_RAD(walk_level); // l^infty distance between source box and target box. @@ -774,6 +775,7 @@ void generate(LIST_ARG_DECL USER_ARG_DECL box_id_t target_box_number) meets_sep_crit = l_inf_dist >= (2 - 8 * COORD_T_MACH_EPS) * source_rad; + } %elif from_sep_smaller_crit == "static_l2": @@ -1742,7 +1744,8 @@ class FMMTraversalBuilder: # {{{ driver def __call__(self, queue, tree, wait_for=None, debug=False, - _from_sep_smaller_min_nsources_cumul=None): + _from_sep_smaller_min_nsources_cumul=None, + box_bounding_box=None): """ :arg queue: A :class:`pyopencl.CommandQueue` instance. :arg tree: A :class:`boxtree.Tree` instance. @@ -1857,84 +1860,91 @@ class FMMTraversalBuilder: # {{{ box extents fin_debug("finding box extents") - - box_source_bounding_box_min = cl.array.empty( - queue, (tree.dimensions, tree.aligned_nboxes), - dtype=tree.coord_dtype) - box_source_bounding_box_max = cl.array.empty( - queue, (tree.dimensions, tree.aligned_nboxes), - dtype=tree.coord_dtype) - - if tree.sources_are_targets: - box_target_bounding_box_min = box_source_bounding_box_min - box_target_bounding_box_max = box_source_bounding_box_max + if box_bounding_box is not None: + box_target_bounding_box_min = cl.array.to_device( + queue, box_bounding_box["min"]) + box_target_bounding_box_max = cl.array.to_device( + queue, box_bounding_box["max"]) + box_source_bounding_box_min = None + box_source_bounding_box_max = None else: - box_target_bounding_box_min = cl.array.empty( + box_source_bounding_box_min = cl.array.empty( queue, (tree.dimensions, tree.aligned_nboxes), dtype=tree.coord_dtype) - box_target_bounding_box_max = cl.array.empty( + box_source_bounding_box_max = cl.array.empty( queue, (tree.dimensions, tree.aligned_nboxes), dtype=tree.coord_dtype) - bogus_radii_array = cl.array.empty(queue, 1, dtype=tree.coord_dtype) - - # nlevels-1 is the highest valid level index - for level in range(tree.nlevels-1, -1, -1): - start, stop = tree.level_start_box_nrs[level:level+2] - - for (skip, enable_radii, bbox_min, bbox_max, - pstarts, pcounts, radii_tree_attr, particles) in [ - ( - # never skip - False, - - tree.sources_have_extent, - box_source_bounding_box_min, - box_source_bounding_box_max, - tree.box_source_starts, - tree.box_source_counts_nonchild, - "source_radii", - tree.sources), - ( - # skip the 'target' round if sources and targets - # are the same. - tree.sources_are_targets, - - tree.targets_have_extent, - box_target_bounding_box_min, - box_target_bounding_box_max, - tree.box_target_starts, - tree.box_target_counts_nonchild, - "target_radii", - tree.targets), - ]: - - if skip: - continue - - args = ( + if tree.sources_are_targets: + box_target_bounding_box_min = box_source_bounding_box_min + box_target_bounding_box_max = box_source_bounding_box_max + else: + box_target_bounding_box_min = cl.array.empty( + queue, (tree.dimensions, tree.aligned_nboxes), + dtype=tree.coord_dtype) + box_target_bounding_box_max = cl.array.empty( + queue, (tree.dimensions, tree.aligned_nboxes), + dtype=tree.coord_dtype) + + bogus_radii_array = cl.array.empty(queue, 1, dtype=tree.coord_dtype) + + # nlevels-1 is the highest valid level index + for level in range(tree.nlevels-1, -1, -1): + start, stop = tree.level_start_box_nrs[level:level+2] + + for (skip, enable_radii, bbox_min, bbox_max, + pstarts, pcounts, radii_tree_attr, particles) in [ ( - tree.aligned_nboxes, - tree.box_child_ids, - tree.box_centers, - pstarts, pcounts,) - + tuple(particles) - + ( - getattr(tree, radii_tree_attr, bogus_radii_array), - enable_radii, - - bbox_min, - bbox_max)) - - evt = knl_info.box_extents_finder( - *args, - - range=slice(start, stop), - queue=queue, wait_for=wait_for) - - wait_for = [evt] - - del bogus_radii_array + # never skip + False, + + tree.sources_have_extent, + box_source_bounding_box_min, + box_source_bounding_box_max, + tree.box_source_starts, + tree.box_source_counts_nonchild, + "source_radii", + tree.sources), + ( + # skip the 'target' round if sources and targets + # are the same. + tree.sources_are_targets, + + tree.targets_have_extent, + box_target_bounding_box_min, + box_target_bounding_box_max, + tree.box_target_starts, + tree.box_target_counts_nonchild, + "target_radii", + tree.targets), + ]: + + if skip: + continue + + args = ( + ( + tree.aligned_nboxes, + tree.box_child_ids, + tree.box_centers, + pstarts, pcounts,) + + tuple(particles) + + ( + getattr(tree, radii_tree_attr, bogus_radii_array), + enable_radii, + + bbox_min, + bbox_max)) + + evt = knl_info.box_extents_finder( + *args, + + range=slice(start, stop), + queue=queue, wait_for=wait_for) + + wait_for = [evt] + + del bogus_radii_array # }}} diff --git a/test/test_distributed.py b/test/test_distributed.py index dc6095d..064d663 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -104,14 +104,15 @@ comm.barrier() start_time = last_time = time.time() # Compute FMM using distributed memory parallelism -local_tree, local_src_weights, local_target = \ +local_tree, local_src_weights, local_target, box_bounding_box = \ generate_local_tree(trav, sources_weights) now = time.time() print("Generate local tree " + str(now - last_time)) last_time = now -trav_local, trav_global = generate_local_travs(local_tree, local_src_weights) +trav_local, trav_global = generate_local_travs(local_tree, local_src_weights, + box_bounding_box) now = time.time() print("Generate local trav " + str(now - last_time)) -- GitLab From cabadd95e393d53b82f686a69c42370fcfae60e1 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 8 Dec 2017 14:56:48 -0600 Subject: [PATCH 038/260] Improve load balancing by DFS order and load prediction --- boxtree/distributed.py | 86 +++++++++++++++++++++++++++++++----------- 1 file changed, 64 insertions(+), 22 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index abe3be6..1c46de2 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -100,30 +100,71 @@ class MPITags(): GATHER_POTENTIALS = 2 -def partition_work(tree, total_rank, queue): +def partition_work(traversal, total_rank, queue): """ This function returns a pyopencl array of size total_rank*nboxes, where the (i,j) entry is 1 iff rank i is responsible for box j. """ + tree = traversal.tree responsible_boxes_mask = cl.array.zeros(queue, (total_rank, tree.nboxes), dtype=np.int8) responsible_boxes_list = np.empty((total_rank,), dtype=object) - nboxes_per_rank = tree.nboxes // total_rank - extra_boxes = tree.nboxes - nboxes_per_rank * total_rank - start_idx = 0 - - for current_rank in range(extra_boxes): - end_idx = start_idx + nboxes_per_rank + 1 - responsible_boxes_mask[current_rank, start_idx:end_idx] = 1 - responsible_boxes_list[current_rank] = cl.array.arange( - queue, start_idx, end_idx, dtype=tree.box_id_dtype) - start_idx = end_idx - - for current_rank in range(extra_boxes, total_rank): - end_idx = start_idx + nboxes_per_rank - responsible_boxes_mask[current_rank, start_idx:end_idx] = 1 - responsible_boxes_list[current_rank] = cl.array.arange( - queue, start_idx, end_idx, dtype=tree.box_id_dtype) - start_idx = end_idx + + workload = np.zeros((tree.nboxes,), dtype=np.float64) + for i in range(traversal.target_boxes.shape[0]): + box_idx = traversal.target_boxes[i] + box_ntargets = tree.box_target_counts_nonchild[box_idx] + + # workload for list 1 + start = traversal.neighbor_source_boxes_starts[i] + end = traversal.neighbor_source_boxes_starts[i + 1] + list1 = traversal.neighbor_source_boxes_lists[start:end] + particle_count = 0 + for j in range(list1.shape[0]): + particle_count += tree.box_source_counts_nonchild[list1[j]] + workload[box_idx] += box_ntargets * particle_count + + # workload for list 3 near + start = traversal.from_sep_close_smaller_starts[i] + end = traversal.from_sep_close_smaller_starts[i + 1] + list3_near = traversal.from_sep_close_smaller_lists[start:end] + particle_count = 0 + for j in range(list3_near.shape[0]): + particle_count += tree.box_source_counts_nonchild[list3_near[j]] + workload[box_idx] += box_ntargets * particle_count + + for i in range(tree.nboxes): + # workload for multipole calculation + workload[i] += tree.box_source_counts_nonchild[i] * 5 + + total_workload = 0 + for i in range(tree.nboxes): + total_workload += workload[i] + + dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) + idx = 0 + stack = [0] + while len(stack) > 0: + box_id = stack.pop() + dfs_order[idx] = box_id + idx += 1 + for i in range(2**tree.dimensions): + child_box_id = tree.box_child_ids[i][box_id] + if child_box_id > 0: + stack.append(child_box_id) + + rank = 0 + start = 0 + workload_count = 0 + for i in range(tree.nboxes): + box_idx = dfs_order[i] + responsible_boxes_mask[rank][box_idx] = 1 + workload_count += workload[box_idx] + if (workload_count > (rank + 1)*total_workload/total_rank or + i == tree.nboxes - 1): + responsible_boxes_list[rank] = cl.array.to_device( + queue, dfs_order[start:i+1]) + start = i + 1 + rank += 1 return responsible_boxes_mask, responsible_boxes_list @@ -398,7 +439,7 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): # Each rank is responsible for calculating the multiple expansion as well as # evaluating target potentials in *responsible_boxes* responsible_boxes_mask, responsible_boxes_list = \ - partition_work(tree, total_rank, queue) + partition_work(traversal, total_rank, queue) # Calculate ancestors of responsible boxes ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), @@ -735,12 +776,11 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran local_src_weights) # these potentials are called alpha in [1] - print("Step 3: " + str(time.time()-last_time)) + print("List 1: " + str(time.time()-last_time)) # }}} # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local - last_time = time.time() logger.debug("translate separated siblings' ('list 2') mpoles to local") local_exps = wrangler.multipole_to_local( @@ -751,11 +791,11 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran mpole_exps) # local_exps represents both Gamma and Delta in [1] - print("Step 4: " + str(time.time()-last_time)) # }}} # {{{ "Stage 5:" evaluate sep. smaller mpoles ("list 3") at particles + last_time = time.time() logger.debug("evaluate sep. smaller mpoles at particles ('list 3 far')") @@ -779,6 +819,8 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran trav_global.from_sep_close_smaller_lists, local_src_weights) + print("List 3: " + str(time.time()-last_time)) + # }}} # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") -- GitLab From ac80df8e455781abc28a1e7d97c16ee149463f57 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Sun, 10 Dec 2017 20:23:00 -0600 Subject: [PATCH 039/260] Implement an all-reduce communication pattern. --- boxtree/distributed.py | 78 ++++++++++++++++++++++++++++++++++++++++++ test/test_tree.py | 7 ++-- 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 39fdcdc..b57fa20 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -42,6 +42,84 @@ print("Process %d of %d on %s with ctx %s.\n" % ( queue.context.devices)) +class AllReduceCommPattern(object): + """Describes a butterfly communication pattern for allreduce. Supports efficient + allreduce between an arbitrary number of processes. + """ + + def __init__(self, rank, nprocs): + """ + :arg rank: My rank + :arg nprocs: Total number of processors + """ + assert nprocs > 0 + self.rank = rank + self.left = 0 + self.right = nprocs + self.midpoint = nprocs // 2 + + def sources(self): + """Return the set of source nodes at this communication stage. The current + process receives messages from these nodes. + """ + if self.rank < self.midpoint: + partner = self.midpoint + (self.rank - self.left) + if self.rank == self.midpoint - 1 and partner == self.right: + partners = set() + elif self.rank == self.midpoint - 1 and partner == self.right - 2: + partners = set([partner, partner + 1]) + else: + partners = set([partner]) + else: + partner = self.left + (self.rank - self.midpoint) + if self.rank == self.right - 1 and partner == self.midpoint: + partners = set() + elif self.rank == self.right - 1 and partner == self.midpoint - 2: + partners = set([partner, partner + 1]) + else: + partners = set([partner]) + + return partners + + def sinks(self): + """Return the set of sink nodes at this communication stage. The current process + sends a message to these nodes. + """ + if self.rank < self.midpoint: + partner = self.midpoint + (self.rank - self.left) + if partner == self.right: + partner -= 1 + else: + partner = self.left + (self.rank - self.midpoint) + if partner == self.midpoint: + partner -= 1 + + return set([partner]) + + def messages(self): + """Return the set of relevant messages to send to the sinks. + """ + if self.rank < self.midpoint: + return set(range(self.midpoint, self.right)) + else: + return set(range(self.left, self.midpoint)) + + def advance(self): + """Advance to the next stage in the communication pattern. + """ + if self.rank < self.midpoint: + self.right = self.midpoint + self.midpoint = (self.midpoint + self.left) // 2 + else: + self.left = self.midpoint + self.midpoint = (self.midpoint + self.right) // 2 + + def done(self): + """Return whether this node is finished communicating. + """ + return self.left + 1 == self.right + + class LocalTree(Tree): @property diff --git a/test/test_tree.py b/test/test_tree.py index f68a885..d58c0cc 100644 --- a/test/test_tree.py +++ b/test/test_tree.py @@ -97,7 +97,9 @@ def run_build_test(builder, queue, dims, dtype, nparticles, do_plot, and dims == 2 and queue.device.platform.name == "Portable Computing Language"): # arg list lenghts disagree - pytest.xfail("2D float doesn't work on POCL") + # pytest.xfail("2D float doesn't work on POCL") + # pass + pass logger.info(75*"-") if max_particles_in_box is not None: @@ -1001,7 +1003,8 @@ def test_space_invader_query(ctx_getter, dims, dtype, do_plot=False): and dims == 2 and queue.device.platform.name == "Portable Computing Language"): # arg list lenghts disagree - pytest.xfail("2D float doesn't work on POCL") + # pytest.xfail("2D float doesn't work on POCL") + pass dtype = np.dtype(dtype) nparticles = 10**5 -- GitLab From 105f5989f7c0959cf392f9ee7aae1060f3711e25 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Sun, 10 Dec 2017 22:40:17 -0600 Subject: [PATCH 040/260] Minor updates to allreduce. --- boxtree/distributed.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index b57fa20..0834f91 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -41,6 +41,7 @@ print("Process %d of %d on %s with ctx %s.\n" % ( MPI.Get_processor_name(), queue.context.devices)) +# {{{ all-reduce class AllReduceCommPattern(object): """Describes a butterfly communication pattern for allreduce. Supports efficient @@ -60,7 +61,7 @@ class AllReduceCommPattern(object): def sources(self): """Return the set of source nodes at this communication stage. The current - process receives messages from these nodes. + process receives messages from these processes. """ if self.rank < self.midpoint: partner = self.midpoint + (self.rank - self.left) @@ -83,7 +84,7 @@ class AllReduceCommPattern(object): def sinks(self): """Return the set of sink nodes at this communication stage. The current process - sends a message to these nodes. + sends a message to these processes. """ if self.rank < self.midpoint: partner = self.midpoint + (self.rank - self.left) @@ -97,16 +98,20 @@ class AllReduceCommPattern(object): return set([partner]) def messages(self): - """Return the set of relevant messages to send to the sinks. + """Return the range of relevant messages to send to the sinks. This is returned + as a [start, end) pair. By design, it is a consecutive range. """ if self.rank < self.midpoint: - return set(range(self.midpoint, self.right)) + return (self.midpoint, self.right) else: - return set(range(self.left, self.midpoint)) + return (self.left, self.midpoint) def advance(self): """Advance to the next stage in the communication pattern. """ + if self.done(): + raise ValueError("finished communicating") + if self.rank < self.midpoint: self.right = self.midpoint self.midpoint = (self.midpoint + self.left) // 2 @@ -115,10 +120,12 @@ class AllReduceCommPattern(object): self.midpoint = (self.midpoint + self.right) // 2 def done(self): - """Return whether this node is finished communicating. + """Return whether this process is finished communicating. """ return self.left + 1 == self.right +# }}} + class LocalTree(Tree): -- GitLab From 9c084f24508830ae61fb8500e51ceb09e03edb57 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Sun, 10 Dec 2017 22:44:15 -0600 Subject: [PATCH 041/260] Add a test for the allreduce. --- test/test_distributed_tools.py | 85 ++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 test/test_distributed_tools.py diff --git a/test/test_distributed_tools.py b/test/test_distributed_tools.py new file mode 100644 index 0000000..947aa16 --- /dev/null +++ b/test/test_distributed_tools.py @@ -0,0 +1,85 @@ +from __future__ import division, absolute_import, print_function + +__copyright__ = "Copyright (C) 2017 Matt Wala" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import logging +import pytest +import sys + +logger = logging.getLogger(__name__) + + +@pytest.mark.parametrize("p", [1, 2, 3, 4, 5, 6, 7, 8, 16, 17]) +def test_allreduce_comm_pattern(p): + from boxtree.distributed import AllReduceCommPattern + + # This models the parallel allreduce communication pattern. + + # processor -> communication pattern of the processor + patterns = [AllReduceCommPattern(i, p) for i in range(p)] + # processor -> list of data items on the processor + data = [[i] for i in range(p)] + from copy import deepcopy + + while not all(pat.done() for pat in patterns): + new_data = deepcopy(data) + + for i in range(p): + if patterns[i].done(): + for pat in patterns: + if not pat.done(): + assert i not in pat.sources() | pat.sinks() + continue + + # Check sources / sinks match up + for s in patterns[i].sinks(): + assert i in patterns[s].sources() + + for s in patterns[i].sources(): + assert i in patterns[s].sinks() + + # Send / recv data + for s in patterns[i].sinks(): + new_data[s].extend(data[i]) + + for pat in patterns: + if not pat.done(): + pat.advance() + data = new_data + + for item in data: + assert len(item) == p + assert set(item) == set(range(p)) + + +# You can test individual routines by typing +# $ python test_tree.py 'test_routine(cl.create_some_context)' + +if __name__ == "__main__": + if len(sys.argv) > 1: + exec(sys.argv[1]) + else: + import py.test + py.test.cmdline.main([__file__]) + +# vim: fdm=marker -- GitLab From 1384f6e42083656ab0d30ae54eed48ad88cac228 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Sun, 10 Dec 2017 22:45:18 -0600 Subject: [PATCH 042/260] Undo changes to test_tree.py --- test/test_tree.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/test/test_tree.py b/test/test_tree.py index d58c0cc..f68a885 100644 --- a/test/test_tree.py +++ b/test/test_tree.py @@ -97,9 +97,7 @@ def run_build_test(builder, queue, dims, dtype, nparticles, do_plot, and dims == 2 and queue.device.platform.name == "Portable Computing Language"): # arg list lenghts disagree - # pytest.xfail("2D float doesn't work on POCL") - # pass - pass + pytest.xfail("2D float doesn't work on POCL") logger.info(75*"-") if max_particles_in_box is not None: @@ -1003,8 +1001,7 @@ def test_space_invader_query(ctx_getter, dims, dtype, do_plot=False): and dims == 2 and queue.device.platform.name == "Portable Computing Language"): # arg list lenghts disagree - # pytest.xfail("2D float doesn't work on POCL") - pass + pytest.xfail("2D float doesn't work on POCL") dtype = np.dtype(dtype) nparticles = 10**5 -- GitLab From 88a4c1405ee71d89f8ff13683512f63697a93d7a Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Sun, 10 Dec 2017 22:46:01 -0600 Subject: [PATCH 043/260] flake8 fix --- boxtree/distributed.py | 1 + 1 file changed, 1 insertion(+) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 859f96a..62ca5a9 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -41,6 +41,7 @@ print("Process %d of %d on %s with ctx %s.\n" % ( MPI.Get_processor_name(), queue.context.devices)) + # {{{ all-reduce class AllReduceCommPattern(object): -- GitLab From 06d04da0917df972df308dddd8dd2beceb7f96ab Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Sun, 10 Dec 2017 22:49:15 -0600 Subject: [PATCH 044/260] s/ValueError/RuntimeError --- boxtree/distributed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 62ca5a9..c214a32 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -111,7 +111,7 @@ class AllReduceCommPattern(object): """Advance to the next stage in the communication pattern. """ if self.done(): - raise ValueError("finished communicating") + raise RuntimeError("finished communicating") if self.rank < self.midpoint: self.right = self.midpoint -- GitLab From ffc4a35c3016489b3fff02bf1438bcd6e0929bed Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Mon, 11 Dec 2017 21:42:23 -0600 Subject: [PATCH 045/260] Implement a utility class to compress dense matrices into CSR lists. --- boxtree/tools.py | 60 ++++++++++++++++++++++++++++++++++ test/test_distributed_tools.py | 33 +++++++++++++++++++ 2 files changed, 93 insertions(+) diff --git a/boxtree/tools.py b/boxtree/tools.py index ece152d..1bff3c4 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -542,4 +542,64 @@ class InlineBinarySearch(object): # }}} + +# {{{ compress a matrix representation of a csr list + +MATRIX_COMPRESSOR_BODY = r""" +void generate(LIST_ARG_DECL USER_ARG_DECL index_type i) +{ + for (int j = 0; j < ncols; ++j) + { + if (matrix[ncols * i + j]) + { + APPEND_output(j); + } + } +} +""" + + +class MatrixCompressorKernel(object): + + def __init__(self, context): + self.context = context + + @memoize_method + def get_kernel(self, matrix_dtype, list_dtype): + from pyopencl.algorithm import ListOfListsBuilder + from pyopencl.tools import VectorArg, ScalarArg + + return ListOfListsBuilder( + self.context, + [("output", list_dtype)], + MATRIX_COMPRESSOR_BODY, + [ + ScalarArg(np.int32, "ncols"), + VectorArg(matrix_dtype, "matrix"), + ], + name_prefix="compress_matrix_to_csr") + + def __call__(self, queue, mat, list_dtype=None): + """Convert a dense matrix into a :ref:`csr` list. + + :arg mat: A matrix representation of a list of lists, so that mat[i,j] + is true if and only if j is in list i. + + :arg list_dtype: The dtype for the lists. Defaults to the matrix dtype. + + :returns: A tuple *(starts, lists, event)*. + """ + if len(mat.shape) != 2: + raise ValueError("not a matrix") + + if list_dtype is None: + list_dtype = mat.dtype + + knl = self.get_kernel(mat.dtype, list_dtype) + + result, evt = knl(queue, mat.shape[0], mat.shape[1], mat.data) + return (result["output"].starts, result["output"].lists, evt) + +# }}} + # vim: foldmethod=marker:filetype=pyopencl diff --git a/test/test_distributed_tools.py b/test/test_distributed_tools.py index 947aa16..3fd2855 100644 --- a/test/test_distributed_tools.py +++ b/test/test_distributed_tools.py @@ -26,6 +26,14 @@ import logging import pytest import sys +import numpy as np + +import pyopencl as cl +import pyopencl.array # noqa +from pyopencl.tools import ( # noqa + pytest_generate_tests_for_pyopencl as pytest_generate_tests) + + logger = logging.getLogger(__name__) @@ -72,6 +80,31 @@ def test_allreduce_comm_pattern(p): assert set(item) == set(range(p)) +def test_matrix_compressor(ctx_getter): + cl_context = ctx_getter() + + from boxtree.tools import MatrixCompressorKernel + matcompr = MatrixCompressorKernel(cl_context) + + n = 40 + m = 10 + + np.random.seed(15) + + arr = (np.random.rand(n, m) > 0.5).astype(np.int8) + + with cl.CommandQueue(cl_context) as q: + d_arr = cl.array.to_device(q, arr) + arr_starts, arr_lists, evt = matcompr(q, d_arr) + cl.wait_for_events([evt]) + arr_starts = arr_starts.get(q) + arr_lists = arr_lists.get(q) + + for i in range(n): + items = arr_lists[arr_starts[i]:arr_starts[i+1]] + assert set(items) == set(arr[i].nonzero()[0]) + + # You can test individual routines by typing # $ python test_tree.py 'test_routine(cl.create_some_context)' -- GitLab From df00afbc1d692aebd7e852306fc9b47a743dcbc8 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Tue, 12 Dec 2017 18:47:44 -0600 Subject: [PATCH 046/260] Compute box_to_users. --- boxtree/distributed.py | 107 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 103 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index c214a32..8d6a611 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -1,12 +1,15 @@ from __future__ import division from mpi4py import MPI import numpy as np +import loopy as lp import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype from pyopencl.scan import GenericScanKernel +from pytools import memoize_method, memoize_in from boxtree import Tree + __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2017 Hao Gao" @@ -129,6 +132,19 @@ class AllReduceCommPattern(object): class LocalTree(Tree): + """ + .. attribute:: box_to_users_starts + + ``box_id_t [nboxes + 1]`` + + .. attribute:: box_to_users_lists + + ``int32 [*]`` + + A :ref:`csr` array. For each box, the list of processes which own + targets that *use* the multipole expansion at this box, via either List + 3 or via the downward (L2L) pass. + """ @property def nboxes(self): @@ -144,10 +160,13 @@ class LocalTree(Tree): @classmethod def copy_from_global_tree(cls, global_tree, responsible_boxes_list, - ancestor_mask): + ancestor_mask, box_to_users_starts, + box_to_users_lists): local_tree = global_tree.copy( responsible_boxes_list=responsible_boxes_list, - ancestor_mask=ancestor_mask) + ancestor_mask=ancestor_mask, + box_to_users_starts=box_to_users_starts, + box_to_users_lists=box_to_users_lists) local_tree.__class__ = cls return local_tree @@ -158,7 +177,8 @@ class LocalTree(Tree): "box_source_counts_nonchild", "box_source_starts", "box_target_counts_cumul", "box_target_counts_nonchild", "box_target_starts", "level_start_box_nrs_dev", "sources", "targets", - "responsible_boxes_list", "ancestor_mask" + "responsible_boxes_list", "ancestor_mask", + "box_to_users_starts", "box_to_users_lists" ] d_tree = self.copy() for field in field_to_device: @@ -502,6 +522,42 @@ def gen_local_particles(queue, particles, nparticles, tree, def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): + # {{{ list 3 marker kernel + + @memoize_in(generate_local_tree, "loopy_cache") + def get_list_3_marker_kernel(): + knl = lp.make_kernel( + [ + "{[irank] : 0 <= irank < total_rank}", + "{[itgt_box] : 0 <= itgt_box < ntgt_boxes}", + "{[isrc_box] : isrc_box_start <= isrc_box < isrc_box_end}", + ], + """ + for irank, itgt_box + <> tgt_ibox = target_boxes[itgt_box] + <> is_responsible = responsible_boxes[irank, tgt_ibox] + if is_responsible + <> isrc_box_start = source_box_starts[itgt_box] + <> isrc_box_end = source_box_starts[itgt_box + 1] + for isrc_box + <> src_ibox = source_boxes[isrc_box] + box_mpole_is_used[irank, src_ibox] = 1 + end + end + end + """, + [ + lp.ValueArg("nboxes", np.int32), + lp.GlobalArg("responsible_boxes, box_mpole_is_used", + shape=("total_rank", "nboxes")), + lp.GlobalArg("source_boxes", shape=None), + "..." + ], + default_offset=lp.auto) + return knl + + # }}} + # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() @@ -632,6 +688,47 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): src_boxes_mask[rank] ) + # {{{ compute box_to_users + + logger.debug("computing box_to_users: start") + + # Compute the set of processes that use the multipole of a box. + box_mpole_is_used = cl.array.zeros( + queue, (total_rank, tree.nboxes), dtype=np.int8) + + # An mpole is used by process p if it is an ancestor of a box owned by p. + box_mpole_is_used = box_mpole_is_used | ancestor_boxes + + # An mpole is used by process p if it is in the List 3 of a box owned by p. + + list_3_marker_kernel = get_list_3_marker_kernel() + + for level in range(tree.nlevels): + source_box_starts = traversal.from_sep_smaller_by_level[level].starts + source_boxes = traversal.from_sep_smaller_by_level[level].lists + list_3_marker_kernel(queue, + total_rank=total_rank, + nboxes=tree.nboxes, + target_boxes=traversal.target_boxes, + responsible_boxes=responsible_boxes_mask, + source_box_starts=source_box_starts, + source_boxes=source_boxes, + box_mpole_is_used=box_mpole_is_used) + + from boxtree.tools import MatrixCompressorKernel + matcompr = MatrixCompressorKernel(ctx) + ( + box_to_users_starts, + box_to_users_lists, + evt) = matcompr(queue, box_mpole_is_used, list_dtype=np.int32) + + cl.wait_for_events([evt]) + del box_mpole_is_used + + logger.debug("computing box_to_users: done") + + # }}} + # }}} # Convert src_weights to tree order @@ -656,7 +753,9 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): for rank in range(total_rank): local_tree[rank] = LocalTree.copy_from_global_tree( tree, responsible_boxes_list[rank].get(), - ancestor_boxes[rank].get()) + ancestor_boxes[rank].get(), + box_to_users_starts.get(), + box_to_users_lists.get()) (local_tree[rank].sources, local_tree[rank].box_source_starts, -- GitLab From 7c5f986016b5006ed8f545da5d73907dfe06ac0c Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Tue, 12 Dec 2017 18:50:53 -0600 Subject: [PATCH 047/260] flake8 fix --- boxtree/distributed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 8d6a611..522198c 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -6,7 +6,7 @@ import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype from pyopencl.scan import GenericScanKernel -from pytools import memoize_method, memoize_in +from pytools import memoize_in from boxtree import Tree -- GitLab From dc4e89047e823f9edb55a719b8f41c0eb64b5247 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Tue, 12 Dec 2017 18:59:31 -0600 Subject: [PATCH 048/260] Move AllReduceCommPattern to boxtree.tools --- boxtree/distributed.py | 86 ------------------- boxtree/tools.py | 86 +++++++++++++++++++ ...est_distributed_tools.py => test_tools.py} | 2 +- 3 files changed, 87 insertions(+), 87 deletions(-) rename test/{test_distributed_tools.py => test_tools.py} (98%) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 522198c..f3db441 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -45,92 +45,6 @@ print("Process %d of %d on %s with ctx %s.\n" % ( queue.context.devices)) -# {{{ all-reduce - -class AllReduceCommPattern(object): - """Describes a butterfly communication pattern for allreduce. Supports efficient - allreduce between an arbitrary number of processes. - """ - - def __init__(self, rank, nprocs): - """ - :arg rank: My rank - :arg nprocs: Total number of processors - """ - assert nprocs > 0 - self.rank = rank - self.left = 0 - self.right = nprocs - self.midpoint = nprocs // 2 - - def sources(self): - """Return the set of source nodes at this communication stage. The current - process receives messages from these processes. - """ - if self.rank < self.midpoint: - partner = self.midpoint + (self.rank - self.left) - if self.rank == self.midpoint - 1 and partner == self.right: - partners = set() - elif self.rank == self.midpoint - 1 and partner == self.right - 2: - partners = set([partner, partner + 1]) - else: - partners = set([partner]) - else: - partner = self.left + (self.rank - self.midpoint) - if self.rank == self.right - 1 and partner == self.midpoint: - partners = set() - elif self.rank == self.right - 1 and partner == self.midpoint - 2: - partners = set([partner, partner + 1]) - else: - partners = set([partner]) - - return partners - - def sinks(self): - """Return the set of sink nodes at this communication stage. The current process - sends a message to these processes. - """ - if self.rank < self.midpoint: - partner = self.midpoint + (self.rank - self.left) - if partner == self.right: - partner -= 1 - else: - partner = self.left + (self.rank - self.midpoint) - if partner == self.midpoint: - partner -= 1 - - return set([partner]) - - def messages(self): - """Return the range of relevant messages to send to the sinks. This is returned - as a [start, end) pair. By design, it is a consecutive range. - """ - if self.rank < self.midpoint: - return (self.midpoint, self.right) - else: - return (self.left, self.midpoint) - - def advance(self): - """Advance to the next stage in the communication pattern. - """ - if self.done(): - raise RuntimeError("finished communicating") - - if self.rank < self.midpoint: - self.right = self.midpoint - self.midpoint = (self.midpoint + self.left) // 2 - else: - self.left = self.midpoint - self.midpoint = (self.midpoint + self.right) // 2 - - def done(self): - """Return whether this process is finished communicating. - """ - return self.left + 1 == self.right - -# }}} - - class LocalTree(Tree): """ .. attribute:: box_to_users_starts diff --git a/boxtree/tools.py b/boxtree/tools.py index 1bff3c4..1e5aed1 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -602,4 +602,90 @@ class MatrixCompressorKernel(object): # }}} + +# {{{ all-reduce + +class AllReduceCommPattern(object): + """Describes a butterfly communication pattern for allreduce. Supports efficient + allreduce between an arbitrary number of processes. + """ + + def __init__(self, rank, nprocs): + """ + :arg rank: My rank + :arg nprocs: Total number of processors + """ + assert nprocs > 0 + self.rank = rank + self.left = 0 + self.right = nprocs + self.midpoint = nprocs // 2 + + def sources(self): + """Return the set of source nodes at this communication stage. The current + process receives messages from these processes. + """ + if self.rank < self.midpoint: + partner = self.midpoint + (self.rank - self.left) + if self.rank == self.midpoint - 1 and partner == self.right: + partners = set() + elif self.rank == self.midpoint - 1 and partner == self.right - 2: + partners = set([partner, partner + 1]) + else: + partners = set([partner]) + else: + partner = self.left + (self.rank - self.midpoint) + if self.rank == self.right - 1 and partner == self.midpoint: + partners = set() + elif self.rank == self.right - 1 and partner == self.midpoint - 2: + partners = set([partner, partner + 1]) + else: + partners = set([partner]) + + return partners + + def sinks(self): + """Return the set of sink nodes at this communication stage. The current process + sends a message to these processes. + """ + if self.rank < self.midpoint: + partner = self.midpoint + (self.rank - self.left) + if partner == self.right: + partner -= 1 + else: + partner = self.left + (self.rank - self.midpoint) + if partner == self.midpoint: + partner -= 1 + + return set([partner]) + + def messages(self): + """Return the range of relevant messages to send to the sinks. This is returned + as a [start, end) pair. By design, it is a consecutive range. + """ + if self.rank < self.midpoint: + return (self.midpoint, self.right) + else: + return (self.left, self.midpoint) + + def advance(self): + """Advance to the next stage in the communication pattern. + """ + if self.done(): + raise RuntimeError("finished communicating") + + if self.rank < self.midpoint: + self.right = self.midpoint + self.midpoint = (self.midpoint + self.left) // 2 + else: + self.left = self.midpoint + self.midpoint = (self.midpoint + self.right) // 2 + + def done(self): + """Return whether this process is finished communicating. + """ + return self.left + 1 == self.right + +# }}} + # vim: foldmethod=marker:filetype=pyopencl diff --git a/test/test_distributed_tools.py b/test/test_tools.py similarity index 98% rename from test/test_distributed_tools.py rename to test/test_tools.py index 3fd2855..6a14f37 100644 --- a/test/test_distributed_tools.py +++ b/test/test_tools.py @@ -39,7 +39,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("p", [1, 2, 3, 4, 5, 6, 7, 8, 16, 17]) def test_allreduce_comm_pattern(p): - from boxtree.distributed import AllReduceCommPattern + from boxtree.tools import AllReduceCommPattern # This models the parallel allreduce communication pattern. -- GitLab From eb8930ef31a6d94de80fc2166b268e6061fa9a9b Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Thu, 14 Dec 2017 00:53:34 -0600 Subject: [PATCH 049/260] Generalize MatrixCompressor to compress both lists and matrices. --- boxtree/tools.py | 92 ++++++++++++++++++++++++++++++++-------------- test/test_tools.py | 27 ++++++++++++-- 2 files changed, 88 insertions(+), 31 deletions(-) diff --git a/boxtree/tools.py b/boxtree/tools.py index 1e5aed1..4f3bc67 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -543,14 +543,26 @@ class InlineBinarySearch(object): # }}} -# {{{ compress a matrix representation of a csr list +# {{{ compress a masked array into a list / list of lists -MATRIX_COMPRESSOR_BODY = r""" + +MASK_LIST_COMPRESSOR_BODY = r""" +void generate(LIST_ARG_DECL USER_ARG_DECL index_type i) +{ + if (mask[i]) + { + APPEND_output(i); + } +} +""" + + +MASK_MATRIX_COMPRESSOR_BODY = r""" void generate(LIST_ARG_DECL USER_ARG_DECL index_type i) { for (int j = 0; j < ncols; ++j) { - if (matrix[ncols * i + j]) + if (mask[ncols * i + j]) { APPEND_output(j); } @@ -559,46 +571,70 @@ void generate(LIST_ARG_DECL USER_ARG_DECL index_type i) """ -class MatrixCompressorKernel(object): +class MaskCompressorKernel(object): def __init__(self, context): self.context = context @memoize_method - def get_kernel(self, matrix_dtype, list_dtype): + def get_list_compressor_kernel(self, mask_dtype, list_dtype): + from pyopencl.algorithm import ListOfListsBuilder + from pyopencl.tools import VectorArg + + return ListOfListsBuilder( + self.context, + [("output", list_dtype)], + MASK_LIST_COMPRESSOR_BODY, + [ + VectorArg(mask_dtype, "mask"), + ], + name_prefix="compress_list") + + @memoize_method + def get_matrix_compressor_kernel(self, mask_dtype, list_dtype): from pyopencl.algorithm import ListOfListsBuilder from pyopencl.tools import VectorArg, ScalarArg return ListOfListsBuilder( self.context, [("output", list_dtype)], - MATRIX_COMPRESSOR_BODY, + MASK_MATRIX_COMPRESSOR_BODY, [ ScalarArg(np.int32, "ncols"), - VectorArg(matrix_dtype, "matrix"), + VectorArg(mask_dtype, "mask"), ], - name_prefix="compress_matrix_to_csr") + name_prefix="compress_matrix") - def __call__(self, queue, mat, list_dtype=None): - """Convert a dense matrix into a :ref:`csr` list. + def __call__(self, queue, mask, list_dtype=None): + """Convert a mask to a list in :ref:`csr` format. - :arg mat: A matrix representation of a list of lists, so that mat[i,j] - is true if and only if j is in list i. + :arg mask: Either a 1D or 2D array. + * If *mask* is 1D, it should represent a masked list, where + *mask[i]* is true if and only if *i* is in the list. + * If *mask* is 2D, it should represent a list of masked lists, + so that *mask[i,j]* is true if and only if *j* is in list *i*. - :arg list_dtype: The dtype for the lists. Defaults to the matrix dtype. + :arg list_dtype: The dtype for the output list(s). Defaults to the mask + dtype. - :returns: A tuple *(starts, lists, event)*. + :returns: The return value depends on the type of the input. + * If mask* is 1D, returns a tuple *(list, evt)*. + * If *mask* is 2D, returns a tuple *(starts, lists, event)*, as a + :ref:`csr` list. """ - if len(mat.shape) != 2: - raise ValueError("not a matrix") - if list_dtype is None: - list_dtype = mat.dtype - - knl = self.get_kernel(mat.dtype, list_dtype) - - result, evt = knl(queue, mat.shape[0], mat.shape[1], mat.data) - return (result["output"].starts, result["output"].lists, evt) + list_dtype = mask.dtype + + if len(mask.shape) == 1: + knl = self.get_list_compressor_kernel(mask.dtype, list_dtype) + result, evt = knl(queue, mask.shape[0], mask.data) + return (result["output"].lists, evt) + elif len(mask.shape) == 2: + knl = self.get_matrix_compressor_kernel(mask.dtype, list_dtype) + result, evt = knl(queue, mask.shape[0], mask.shape[1], mask.data) + return (result["output"].starts, result["output"].lists, evt) + else: + raise ValueError("unsupported dimensionality") # }}} @@ -610,16 +646,16 @@ class AllReduceCommPattern(object): allreduce between an arbitrary number of processes. """ - def __init__(self, rank, nprocs): + def __init__(self, rank, size): """ :arg rank: My rank - :arg nprocs: Total number of processors + :arg size: Total number of processors """ - assert nprocs > 0 + assert 0 <= rank < size self.rank = rank self.left = 0 - self.right = nprocs - self.midpoint = nprocs // 2 + self.right = size + self.midpoint = size // 2 def sources(self): """Return the set of source nodes at this communication stage. The current diff --git a/test/test_tools.py b/test/test_tools.py index 6a14f37..adad3fe 100644 --- a/test/test_tools.py +++ b/test/test_tools.py @@ -80,11 +80,11 @@ def test_allreduce_comm_pattern(p): assert set(item) == set(range(p)) -def test_matrix_compressor(ctx_getter): +def test_masked_matrix_compression(ctx_getter): cl_context = ctx_getter() - from boxtree.tools import MatrixCompressorKernel - matcompr = MatrixCompressorKernel(cl_context) + from boxtree.tools import MaskCompressorKernel + matcompr = MaskCompressorKernel(cl_context) n = 40 m = 10 @@ -105,6 +105,27 @@ def test_matrix_compressor(ctx_getter): assert set(items) == set(arr[i].nonzero()[0]) +def test_masked_list_compression(ctx_getter): + cl_context = ctx_getter() + + from boxtree.tools import MaskCompressorKernel + listcompr = MaskCompressorKernel(cl_context) + + n = 20 + + np.random.seed(15) + + arr = (np.random.rand(n) > 0.5).astype(np.int8) + + with cl.CommandQueue(cl_context) as q: + d_arr = cl.array.to_device(q, arr) + arr_list, evt = listcompr(q, d_arr) + cl.wait_for_events([evt]) + arr_list = arr_list.get(q) + + assert set(arr_list) == set(arr.nonzero()[0]) + + # You can test individual routines by typing # $ python test_tree.py 'test_routine(cl.create_some_context)' -- GitLab From 9465d684b52c5ec68c253b8978f30f5b73e2521e Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 15 Dec 2017 16:51:37 -0600 Subject: [PATCH 050/260] MaskCompressorKernel: Fix handling of transposed arrays. --- boxtree/tools.py | 11 +++++++++-- test/test_tools.py | 8 +++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/boxtree/tools.py b/boxtree/tools.py index 4f3bc67..b174eb8 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -562,7 +562,7 @@ void generate(LIST_ARG_DECL USER_ARG_DECL index_type i) { for (int j = 0; j < ncols; ++j) { - if (mask[ncols * i + j]) + if (mask[outer_stride * i + j * inner_stride]) { APPEND_output(j); } @@ -601,6 +601,8 @@ class MaskCompressorKernel(object): MASK_MATRIX_COMPRESSOR_BODY, [ ScalarArg(np.int32, "ncols"), + ScalarArg(np.int32, "outer_stride"), + ScalarArg(np.int32, "inner_stride"), VectorArg(mask_dtype, "mask"), ], name_prefix="compress_matrix") @@ -631,7 +633,12 @@ class MaskCompressorKernel(object): return (result["output"].lists, evt) elif len(mask.shape) == 2: knl = self.get_matrix_compressor_kernel(mask.dtype, list_dtype) - result, evt = knl(queue, mask.shape[0], mask.shape[1], mask.data) + size = mask.dtype.itemsize + print("ROWS COLS", mask.shape) + print("STRIDES", mask.strides[0] // size, mask.strides[1] // size) + result, evt = knl(queue, mask.shape[0], mask.shape[1], + mask.strides[0] // size, mask.strides[1] // size, + mask.data) return (result["output"].starts, result["output"].lists, evt) else: raise ValueError("unsupported dimensionality") diff --git a/test/test_tools.py b/test/test_tools.py index adad3fe..ad98379 100644 --- a/test/test_tools.py +++ b/test/test_tools.py @@ -80,7 +80,8 @@ def test_allreduce_comm_pattern(p): assert set(item) == set(range(p)) -def test_masked_matrix_compression(ctx_getter): +@pytest.mark.parametrize("order", "CF") +def test_masked_matrix_compression(ctx_getter, order): cl_context = ctx_getter() from boxtree.tools import MaskCompressorKernel @@ -91,10 +92,11 @@ def test_masked_matrix_compression(ctx_getter): np.random.seed(15) - arr = (np.random.rand(n, m) > 0.5).astype(np.int8) + arr = (np.random.rand(n, m) > 0.5).astype(np.int8).copy(order=order) with cl.CommandQueue(cl_context) as q: - d_arr = cl.array.to_device(q, arr) + d_arr = cl.array.Array(q, (n, m), arr.dtype, order=order) + d_arr[:] = arr arr_starts, arr_lists, evt = matcompr(q, d_arr) cl.wait_for_events([evt]) arr_starts = arr_starts.get(q) -- GitLab From 801452527dcb4759f189e04de218614e9e9d66ec Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 15 Dec 2017 16:53:16 -0600 Subject: [PATCH 051/260] Remove print statements. --- boxtree/tools.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/boxtree/tools.py b/boxtree/tools.py index b174eb8..08a4716 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -634,8 +634,6 @@ class MaskCompressorKernel(object): elif len(mask.shape) == 2: knl = self.get_matrix_compressor_kernel(mask.dtype, list_dtype) size = mask.dtype.itemsize - print("ROWS COLS", mask.shape) - print("STRIDES", mask.strides[0] // size, mask.strides[1] // size) result, evt = knl(queue, mask.shape[0], mask.shape[1], mask.strides[0] // size, mask.strides[1] // size, mask.data) -- GitLab From 6efe9321e7b490dd4ed41b17d29e1bcdbc66cd86 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Mon, 18 Dec 2017 16:12:38 -0600 Subject: [PATCH 052/260] Implement communicate_mpoles(). --- boxtree/distributed.py | 316 +++++++++++++++++++++++++++++++++------ test/test_distributed.py | 28 +++- 2 files changed, 291 insertions(+), 53 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index f3db441..3e06fe9 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -6,7 +6,7 @@ import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype from pyopencl.scan import GenericScanKernel -from pytools import memoize_in +from pytools import memoize_in, memoize_method from boxtree import Tree @@ -45,13 +45,16 @@ print("Process %d of %d on %s with ctx %s.\n" % ( queue.context.devices)) +COMMUNICATE_MPOLES_VIA_ALLREDUCE = False + + class LocalTree(Tree): """ - .. attribute:: box_to_users_starts + .. attribute:: box_to_user_starts ``box_id_t [nboxes + 1]`` - .. attribute:: box_to_users_lists + .. attribute:: box_to_user_lists ``int32 [*]`` @@ -74,13 +77,13 @@ class LocalTree(Tree): @classmethod def copy_from_global_tree(cls, global_tree, responsible_boxes_list, - ancestor_mask, box_to_users_starts, - box_to_users_lists): + ancestor_mask, box_to_user_starts, + box_to_user_lists): local_tree = global_tree.copy( responsible_boxes_list=responsible_boxes_list, ancestor_mask=ancestor_mask, - box_to_users_starts=box_to_users_starts, - box_to_users_lists=box_to_users_lists) + box_to_user_starts=box_to_user_starts, + box_to_user_lists=box_to_user_lists) local_tree.__class__ = cls return local_tree @@ -92,7 +95,7 @@ class LocalTree(Tree): "box_target_counts_cumul", "box_target_counts_nonchild", "box_target_starts", "level_start_box_nrs_dev", "sources", "targets", "responsible_boxes_list", "ancestor_mask", - "box_to_users_starts", "box_to_users_lists" + "box_to_user_starts", "box_to_user_lists" ] d_tree = self.copy() for field in field_to_device: @@ -114,10 +117,99 @@ class LocalTree(Tree): return d_tree +# {{{ parallel fmm wrangler + +class ParallelFMMLibExpansionWranglerCodeContainer(object): + + @memoize_method + def find_boxes_used_by_subrange_kernel(self): + knl = lp.make_kernel( + [ + "{[ibox]: 0 <= ibox < nboxes}", + "{[iuser]: iuser_start <= iuser < iuser_end}", + ], + """ + for ibox + <> iuser_start = box_to_user_starts[ibox] + <> iuser_end = box_to_user_starts[ibox + 1] + for iuser + <> useri = box_to_user_lists[iuser] + <> in_subrange = subrange_start <= useri and useri < subrange_end + if in_subrange + box_in_subrange[ibox] = 1 + end + end + end + """, + [ + lp.ValueArg("subrange_start, subrange_end", np.int32), + lp.GlobalArg("box_to_user_lists", shape=None), + "..." + ]) + knl = lp.split_iname(knl, "ibox", 16, outer_tag="g.0", inner_tag="l.0") + return knl + + def get_wrangler(self, queue, tree, helmholtz_k, fmm_order): + return ParallelFMMLibExpansionWrangler(self, queue, tree, helmholtz_k, + fmm_order) + + +from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler + + +class ParallelFMMLibExpansionWrangler(FMMLibExpansionWrangler): + + def __init__(self, code_container, queue, tree, helmholtz_k, fmm_order): + """ + :arg fmm_order: Only supports single order for now + """ + def fmm_level_to_nterms(tree, level): + return fmm_order + + FMMLibExpansionWrangler.__init__(self, tree, helmholtz_k, + fmm_level_to_nterms) + self.queue = queue + self.fmm_order = fmm_order + self.code_container = code_container + + def slice_mpoles(self, mpoles, slice_indices): + mpoles = mpoles.reshape((-1,) + self.expansion_shape(self.fmm_order)) + return mpoles[slice_indices, :].reshape((-1,)) + + def update_mpoles(self, mpoles, mpole_updates, slice_indices): + """ + :arg mpole_updates: The first *len(slice_indices)* entries should contain + the values to add to *mpoles* + """ + mpoles = mpoles.reshape((-1,) + self.expansion_shape(self.fmm_order)) + mpole_updates = mpole_updates.reshape( + (-1,) + self.expansion_shape(self.fmm_order)) + mpoles[slice_indices, :] += mpole_updates[:len(slice_indices), :] + + def empty_box_in_subrange_mask(self): + return cl.array.empty(self.queue, self.tree.nboxes, dtype=np.int8) + + def find_boxes_used_by_subrange(self, box_in_subrange, subrange, + box_to_user_starts, box_to_user_lists): + knl = self.code_container.find_boxes_used_by_subrange_kernel() + knl(self.queue, + subrange_start=subrange[0], + subrange_end=subrange[1], + box_to_user_starts=box_to_user_starts, + box_to_user_lists=box_to_user_lists, + box_in_subrange=box_in_subrange) + + box_in_subrange.finish() + +# }}} + + class MPITags(): DIST_TREE = 0 DIST_WEIGHT = 1 GATHER_POTENTIALS = 2 + REDUCE_POTENTIALS = 3 + REDUCE_INDICES = 4 def partition_work(traversal, total_rank, queue): @@ -436,10 +528,10 @@ def gen_local_particles(queue, particles, nparticles, tree, def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): - # {{{ list 3 marker kernel + # {{{ kernel to mark if a box mpole is used by a process via an interaction list @memoize_in(generate_local_tree, "loopy_cache") - def get_list_3_marker_kernel(): + def get_box_mpole_is_used_marker_kernel(): knl = lp.make_kernel( [ "{[irank] : 0 <= irank < total_rank}", @@ -449,12 +541,12 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): """ for irank, itgt_box <> tgt_ibox = target_boxes[itgt_box] - <> is_responsible = responsible_boxes[irank, tgt_ibox] - if is_responsible + <> is_relevant = relevant_boxes_mask[irank, tgt_ibox] + if is_relevant <> isrc_box_start = source_box_starts[itgt_box] <> isrc_box_end = source_box_starts[itgt_box + 1] for isrc_box - <> src_ibox = source_boxes[isrc_box] + <> src_ibox = source_box_lists[isrc_box] box_mpole_is_used[irank, src_ibox] = 1 end end @@ -462,12 +554,14 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): """, [ lp.ValueArg("nboxes", np.int32), - lp.GlobalArg("responsible_boxes, box_mpole_is_used", - shape=("total_rank", "nboxes")), - lp.GlobalArg("source_boxes", shape=None), + lp.GlobalArg("relevant_boxes_mask, box_mpole_is_used", + shape=("total_rank", "nboxes")), + lp.GlobalArg("source_box_lists", shape=None), "..." ], default_offset=lp.auto) + + knl = lp.split_iname(knl, "itgt_box", 16, outer_tag="g.0", inner_tag="l.0") return knl # }}} @@ -602,46 +696,56 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): src_boxes_mask[rank] ) - # {{{ compute box_to_users + # {{{ compute box_to_user - logger.debug("computing box_to_users: start") + logger.debug("computing box_to_user: start") - # Compute the set of processes that use the multipole of a box. - box_mpole_is_used = cl.array.zeros( - queue, (total_rank, tree.nboxes), dtype=np.int8) + box_mpole_is_used = cl.array.zeros(queue, (total_rank, tree.nboxes), + dtype=np.int8) # An mpole is used by process p if it is an ancestor of a box owned by p. - box_mpole_is_used = box_mpole_is_used | ancestor_boxes - - # An mpole is used by process p if it is in the List 3 of a box owned by p. - - list_3_marker_kernel = get_list_3_marker_kernel() - + knl = get_box_mpole_is_used_marker_kernel() + + # A mpole is used by process p if it is in the List 2 of either a box + # owned by p or one of its ancestors. + knl(queue, + total_rank=total_rank, + nboxes=tree.nboxes, + target_boxes=traversal.target_or_target_parent_boxes, + relevant_boxes_mask=responsible_boxes_mask | ancestor_boxes, + source_box_starts=traversal.from_sep_siblings_starts, + source_box_lists=traversal.from_sep_siblings_lists, + box_mpole_is_used=box_mpole_is_used) + + box_mpole_is_used.finish() + + # A mpole is used by process p if it is in the List 3 of a box owned by p. for level in range(tree.nlevels): source_box_starts = traversal.from_sep_smaller_by_level[level].starts - source_boxes = traversal.from_sep_smaller_by_level[level].lists - list_3_marker_kernel(queue, + source_box_lists = traversal.from_sep_smaller_by_level[level].lists + knl(queue, total_rank=total_rank, nboxes=tree.nboxes, target_boxes=traversal.target_boxes, - responsible_boxes=responsible_boxes_mask, + relevant_boxes_mask=responsible_boxes_mask, source_box_starts=source_box_starts, - source_boxes=source_boxes, + source_box_lists=source_box_lists, box_mpole_is_used=box_mpole_is_used) - from boxtree.tools import MatrixCompressorKernel - matcompr = MatrixCompressorKernel(ctx) + box_mpole_is_used.finish() + + from boxtree.tools import MaskCompressorKernel + matcompr = MaskCompressorKernel(ctx) ( - box_to_users_starts, - box_to_users_lists, - evt) = matcompr(queue, box_mpole_is_used, list_dtype=np.int32) + box_to_user_starts, + box_to_user_lists, + evt) = matcompr(queue, box_mpole_is_used.transpose(), + list_dtype=np.int32) cl.wait_for_events([evt]) del box_mpole_is_used - logger.debug("computing box_to_users: done") - - # }}} + logger.debug("computing box_to_user: done") # }}} @@ -668,8 +772,8 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): local_tree[rank] = LocalTree.copy_from_global_tree( tree, responsible_boxes_list[rank].get(), ancestor_boxes[rank].get(), - box_to_users_starts.get(), - box_to_users_lists.get()) + box_to_user_starts.get(), + box_to_user_lists.get()) (local_tree[rank].sources, local_tree[rank].box_source_starts, @@ -822,6 +926,124 @@ def generate_local_travs(local_tree, local_src_weights, box_bounding_box=None, return trav_local, trav_global +# {{{ communicate mpoles + +def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): + """Based on Algorithm 3: Reduce and Scatter in [1]. + + The main idea is to mimic a hypercube allreduce, but to reduce bandwidth by + sending only necessary information. + + .. [1] Lashuk, Ilya, Aparna Chandramowlishwaran, Harper Langston, + Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, Lexing + Ying, Denis Zorin, and George Biros. “A massively parallel adaptive fast + multipole method on heterogeneous architectures." Communications of the + ACM 55, no. 5 (2012): 101-109. + """ + rank = comm.Get_rank() + nprocs = comm.Get_size() + + stats = {} + + from time import time + t_start = time() + logger.debug("communicate multipoles: start") + + # contributing_boxes: + # + # A mask of the the set of boxes that the current process contributes + # to. This process contributes to a box when: + # + # (a) this process owns sources that contribute to the multipole expansion + # in the box (via the upward pass) or + # (b) this process has received a portion of the multipole expansion in this + # box from another process. + # + # Initially, this set consists of the boxes satisfying condition (a), which + # are precisely the boxes owned by this process and their ancestors. + contributing_boxes = trav.tree.ancestor_mask.copy() + contributing_boxes[trav.tree.responsible_boxes_list] = 1 + + from boxtree.tools import AllReduceCommPattern + comm_pattern = AllReduceCommPattern(rank, nprocs) + + # Temporary buffers for receiving data + mpole_exps_buf = np.empty(mpole_exps.shape, dtype=mpole_exps.dtype) + boxes_list_buf = np.empty(trav.tree.nboxes, dtype=trav.tree.box_id_dtype) + + # Temporary buffer for holding the mask + box_in_subrange = wrangler.empty_box_in_subrange_mask() + + stats["bytes_sent_by_stage"] = [] + stats["bytes_recvd_by_stage"] = [] + + while not comm_pattern.done(): + send_requests = [] + + # Send data to other processors. + if comm_pattern.sinks(): + # Compute the subset of boxes to be sent. + message_subrange = comm_pattern.messages() + + box_in_subrange.fill(0) + + wrangler.find_boxes_used_by_subrange( + box_in_subrange, message_subrange, + trav.tree.box_to_user_starts, trav.tree.box_to_user_lists) + + box_in_subrange_host = ( + box_in_subrange.map_to_host(flags=cl.map_flags.READ)) + + with box_in_subrange_host.data: + relevant_boxes_list = ( + np.nonzero(box_in_subrange_host & contributing_boxes) + [0] + .astype(trav.tree.box_id_dtype)) + + del box_in_subrange_host + + relevant_mpole_exps = wrangler.slice_mpoles(mpole_exps, + relevant_boxes_list) + + # Send the box subset to the other processors. + for sink in comm_pattern.sinks(): + req = comm.Isend(relevant_mpole_exps, dest=sink, + tag=MPITags.REDUCE_POTENTIALS) + send_requests.append(req) + + req = comm.Isend(relevant_boxes_list, dest=sink, + tag=MPITags.REDUCE_INDICES) + send_requests.append(req) + + # Receive data from other processors. + for source in comm_pattern.sources(): + comm.Recv(mpole_exps_buf, source=source, tag=MPITags.REDUCE_POTENTIALS) + + status = MPI.Status() + comm.Recv(boxes_list_buf, source=source, tag=MPITags.REDUCE_INDICES, + status=status) + nboxes = status.Get_count() // boxes_list_buf.dtype.itemsize + + # Update data structures. + wrangler.update_mpoles(mpole_exps, mpole_exps_buf, + boxes_list_buf[:nboxes]) + + contributing_boxes[boxes_list_buf[:nboxes]] = 1 + + for req in send_requests: + req.wait() + + comm_pattern.advance() + + stats["total_time"] = time() - t_start + logger.debug("communicate multipoles: done in %.2f s" % stats["total_time"]) + + if return_stats: + return stats + +# }}} + + def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wrangler, local_target_mask, local_target_scan, local_ntargets, comm=MPI.COMM_WORLD): @@ -853,13 +1075,17 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} - # {{{ Communicate mpole + # {{{ Communicate mpoles + last_time = time.time() - mpole_exps_all = np.zeros_like(mpole_exps) - comm.Allreduce(mpole_exps, mpole_exps_all) + if COMMUNICATE_MPOLES_VIA_ALLREDUCE: + mpole_exps_all = np.zeros_like(mpole_exps) + comm.Allreduce(mpole_exps, mpole_exps_all) + mpole_exps = mpole_exps_all + else: + communicate_mpoles(wrangler, comm, trav_local, mpole_exps) - mpole_exps = mpole_exps_all print("Communication: " + str(time.time()-last_time)) # }}} diff --git a/test/test_distributed.py b/test/test_distributed.py index 064d663..212e623 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -6,10 +6,13 @@ import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler import time +import logging +logging.basicConfig(level=logging.INFO) + # Parameters dims = 2 -nsources = 50000 -ntargets = 50000 +nsources = 10000 +ntargets = 10000 dtype = np.float64 # Get the current rank @@ -22,6 +25,10 @@ sources_weights = None wrangler = None +ORDER = 3 +HELMHOLTZ_K = 0 + + # Generate particles and run shared-memory parallelism on rank 0 if rank == 0: last_time = time.time() @@ -88,9 +95,10 @@ if rank == 0: # Get pyfmmlib expansion wrangler def fmm_level_to_nterms(tree, level): - return 3 + return ORDER + wrangler = FMMLibExpansionWrangler( - trav.tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) + trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) # Compute FMM using shared memory parallelism from boxtree.fmm import drive_fmm @@ -120,14 +128,18 @@ last_time = now def fmm_level_to_nterms(tree, level): - return 3 + return ORDER + + +from boxtree.distributed import ParallelFMMLibExpansionWranglerCodeContainer, queue +local_wrangler = ( + ParallelFMMLibExpansionWranglerCodeContainer() + .get_wrangler(queue, local_tree, HELMHOLTZ_K, ORDER)) -local_wrangler = FMMLibExpansionWrangler( - local_tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) if rank == 0: global_wrangler = FMMLibExpansionWrangler( - trav.tree, 0, fmm_level_to_nterms=fmm_level_to_nterms) + trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) else: global_wrangler = None -- GitLab From 3fb7add84f2dc975338e1bf10179c087ef0552a1 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Wed, 20 Dec 2017 03:08:19 -0600 Subject: [PATCH 053/260] s/ParallelFMMLib/DistributedFMMLib/ --- boxtree/distributed.py | 6 +++--- test/test_distributed.py | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 3e06fe9..fe4aed3 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -119,7 +119,7 @@ class LocalTree(Tree): # {{{ parallel fmm wrangler -class ParallelFMMLibExpansionWranglerCodeContainer(object): +class DistributedFMMLibExpansionWranglerCodeContainer(object): @memoize_method def find_boxes_used_by_subrange_kernel(self): @@ -150,14 +150,14 @@ class ParallelFMMLibExpansionWranglerCodeContainer(object): return knl def get_wrangler(self, queue, tree, helmholtz_k, fmm_order): - return ParallelFMMLibExpansionWrangler(self, queue, tree, helmholtz_k, + return DistributedFMMLibExpansionWrangler(self, queue, tree, helmholtz_k, fmm_order) from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler -class ParallelFMMLibExpansionWrangler(FMMLibExpansionWrangler): +class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): def __init__(self, code_container, queue, tree, helmholtz_k, fmm_order): """ diff --git a/test/test_distributed.py b/test/test_distributed.py index 212e623..e395168 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -131,10 +131,11 @@ def fmm_level_to_nterms(tree, level): return ORDER -from boxtree.distributed import ParallelFMMLibExpansionWranglerCodeContainer, queue +from boxtree.distributed import ( + DistributedFMMLibExpansionWranglerCodeContainer, queue) local_wrangler = ( - ParallelFMMLibExpansionWranglerCodeContainer() + DistributedFMMLibExpansionWranglerCodeContainer() .get_wrangler(queue, local_tree, HELMHOLTZ_K, ORDER)) if rank == 0: -- GitLab From e15f6f09a334671122cf9474c398b0128b4c7b59 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Wed, 20 Dec 2017 03:14:53 -0600 Subject: [PATCH 054/260] Fix comments. --- boxtree/distributed.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index fe4aed3..9715103 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -60,7 +60,7 @@ class LocalTree(Tree): A :ref:`csr` array. For each box, the list of processes which own targets that *use* the multipole expansion at this box, via either List - 3 or via the downward (L2L) pass. + 3 or (possibly downward propagated from an ancestor) List 2. """ @property @@ -702,8 +702,6 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): box_mpole_is_used = cl.array.zeros(queue, (total_rank, tree.nboxes), dtype=np.int8) - - # An mpole is used by process p if it is an ancestor of a box owned by p. knl = get_box_mpole_is_used_marker_kernel() # A mpole is used by process p if it is in the List 2 of either a box -- GitLab From 041dddf00d834b14db93b2891a089545d6a4eca2 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Wed, 20 Dec 2017 17:29:34 -0600 Subject: [PATCH 055/260] Make COMMUNICATE_MPOLES_VIA_ALLREDUCE a parameter. --- boxtree/distributed.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 9715103..61834df 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -45,9 +45,6 @@ print("Process %d of %d on %s with ctx %s.\n" % ( queue.context.devices)) -COMMUNICATE_MPOLES_VIA_ALLREDUCE = False - - class LocalTree(Tree): """ .. attribute:: box_to_user_starts @@ -1044,7 +1041,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wrangler, local_target_mask, local_target_scan, local_ntargets, - comm=MPI.COMM_WORLD): + comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() @@ -1077,7 +1074,7 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran last_time = time.time() - if COMMUNICATE_MPOLES_VIA_ALLREDUCE: + if _communicate_mpoles_via_allreduce: mpole_exps_all = np.zeros_like(mpole_exps) comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all -- GitLab From 95dde15e1c948dba65e35001e88aeaf2c9183294 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 22 Dec 2017 00:06:04 -0600 Subject: [PATCH 056/260] Refactoring local tree build --- boxtree/distributed.py | 542 ++++++++++++++++++++++------------------- 1 file changed, 287 insertions(+), 255 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 9715103..ade3dfd 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -45,7 +45,35 @@ print("Process %d of %d on %s with ctx %s.\n" % ( queue.context.devices)) -COMMUNICATE_MPOLES_VIA_ALLREDUCE = False +COMMUNICATE_MPOLES_VIA_ALLREDUCE = True + + +def tree_to_device(queue, tree, additional_fields_to_device=[]): + field_to_device = [ + "box_centers", "box_child_ids", "box_flags", "box_levels", + "box_parent_ids", "box_source_counts_cumul", + "box_source_counts_nonchild", "box_source_starts", + "box_target_counts_cumul", "box_target_counts_nonchild", + "box_target_starts", "level_start_box_nrs_dev", "sources", "targets", + ] + additional_fields_to_device + d_tree = tree.copy() + for field in field_to_device: + current_obj = d_tree.__getattribute__(field) + if current_obj.dtype == object: + new_obj = np.empty_like(current_obj) + for i in range(current_obj.shape[0]): + new_obj[i] = cl.array.to_device(queue, current_obj[i]) + d_tree.__setattr__(field, new_obj) + else: + d_tree.__setattr__( + field, cl.array.to_device(queue, current_obj)) + + if tree.sources_have_extent: + d_tree.source_radii = cl.array.to_device(queue, d_tree.source_radii) + if tree.targets_have_extent: + d_tree.target_radii = cl.array.to_device(queue, d_tree.target_radii) + + return d_tree class LocalTree(Tree): @@ -88,34 +116,10 @@ class LocalTree(Tree): return local_tree def to_device(self, queue): - field_to_device = [ - "box_centers", "box_child_ids", "box_flags", "box_levels", - "box_parent_ids", "box_source_counts_cumul", - "box_source_counts_nonchild", "box_source_starts", - "box_target_counts_cumul", "box_target_counts_nonchild", - "box_target_starts", "level_start_box_nrs_dev", "sources", "targets", - "responsible_boxes_list", "ancestor_mask", - "box_to_user_starts", "box_to_user_lists" - ] - d_tree = self.copy() - for field in field_to_device: - current_obj = d_tree.__getattribute__(field) - if current_obj.dtype == object: - new_obj = np.empty_like(current_obj) - for i in range(current_obj.shape[0]): - new_obj[i] = cl.array.to_device(queue, current_obj[i]) - d_tree.__setattr__(field, new_obj) - else: - d_tree.__setattr__( - field, cl.array.to_device(queue, current_obj)) - - if self.sources_have_extent: - d_tree.source_radii = cl.array.to_device(queue, d_tree.source_radii) - if self.targets_have_extent: - d_tree.target_radii = cl.array.to_device(queue, d_tree.target_radii) - - return d_tree + additional_fields_to_device = ["responsible_boxes_list", "ancestor_mask", + "box_to_user_starts", "box_to_user_lists"] + return tree_to_device(queue, self, additional_fields_to_device) # {{{ parallel fmm wrangler @@ -236,13 +240,14 @@ def partition_work(traversal, total_rank, queue): workload[box_idx] += box_ntargets * particle_count # workload for list 3 near - start = traversal.from_sep_close_smaller_starts[i] - end = traversal.from_sep_close_smaller_starts[i + 1] - list3_near = traversal.from_sep_close_smaller_lists[start:end] - particle_count = 0 - for j in range(list3_near.shape[0]): - particle_count += tree.box_source_counts_nonchild[list3_near[j]] - workload[box_idx] += box_ntargets * particle_count + if tree.targets_have_extent: + start = traversal.from_sep_close_smaller_starts[i] + end = traversal.from_sep_close_smaller_starts[i + 1] + list3_near = traversal.from_sep_close_smaller_lists[start:end] + particle_count = 0 + for j in range(list3_near.shape[0]): + particle_count += tree.box_source_counts_nonchild[list3_near[j]] + workload[box_idx] += box_ntargets * particle_count for i in range(tree.nboxes): # workload for multipole calculation @@ -281,31 +286,8 @@ def partition_work(traversal, total_rank, queue): return responsible_boxes_mask, responsible_boxes_list -def gen_local_particles(queue, particles, nparticles, tree, - responsible_boxes, - box_particle_starts, - box_particle_counts_nonchild, - box_particle_counts_cumul, - particle_radii=None, - particle_weights=None, - return_mask_scan=False): - """ - This helper function generates the sources/targets related fields for - a local tree - """ - # Put particle structures to device memory - d_box_particle_starts = cl.array.to_device(queue, box_particle_starts) - d_box_particle_counts_nonchild = cl.array.to_device( - queue, box_particle_counts_nonchild) - d_box_particle_counts_cumul = cl.array.to_device( - queue, box_particle_counts_cumul) - d_particles = np.empty((tree.dimensions,), dtype=object) - for i in range(tree.dimensions): - d_particles[i] = cl.array.to_device(queue, particles[i]) - - # Generate the particle mask array - d_particle_mask = cl.array.zeros(queue, (nparticles,), - dtype=tree.particle_id_dtype) +def get_gen_local_tree_helper(queue, tree): + d_tree = tree_to_device(queue, tree) particle_mask_knl = cl.elementwise.ElementwiseKernel( queue.context, @@ -327,10 +309,7 @@ def gen_local_particles(queue, particles, nparticles, tree, } """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)) ) - particle_mask_knl(responsible_boxes, d_box_particle_starts, - d_box_particle_counts_nonchild, d_particle_mask) - # Generate the scan of the particle mask array mask_scan_knl = GenericScanKernel( queue.context, tree.particle_id_dtype, arguments=Template(""" @@ -343,77 +322,63 @@ def gen_local_particles(queue, particles, nparticles, tree, scan_expr="a+b", neutral="0", output_statement="scan[i + 1] = item;" ) - d_particle_scan = cl.array.empty(queue, (nparticles + 1,), - dtype=tree.particle_id_dtype) - d_particle_scan[0] = 0 - mask_scan_knl(d_particle_mask, d_particle_scan) - - # Generate particles for rank's local tree - local_nparticles = d_particle_scan[-1].get(queue) - d_local_particles = np.empty((tree.dimensions,), dtype=object) - for i in range(tree.dimensions): - d_local_particles[i] = cl.array.empty(queue, (local_nparticles,), - dtype=tree.coord_dtype) - d_paticles_list = d_particles.tolist() - for i in range(tree.dimensions): - d_paticles_list[i] = d_paticles_list[i] - d_local_particles_list = d_local_particles.tolist() - for i in range(tree.dimensions): - d_local_particles_list[i] = d_local_particles_list[i] - - fetch_local_particles_knl = cl.elementwise.ElementwiseKernel( - ctx, - Template(""" - __global const ${mask_t} *particle_mask, - __global const ${mask_t} *particle_scan + FECTCH_LOCAL_PARTICLES_ARGUMENTS = Template(""" + __global const ${mask_t} *particle_mask, + __global const ${mask_t} *particle_scan + % for dim in range(ndims): + , __global const ${coord_t} *particles_${dim} + % endfor + % for dim in range(ndims): + , __global ${coord_t} *local_particles_${dim} + % endfor + % if particles_have_extent: + , __global const ${coord_t} *particle_radii + , __global ${coord_t} *local_particle_radii + % endif + """, strict_undefined=True) + + FETCH_LOCAL_PARTICLES_PRG = Template(""" + if(particle_mask[i]) { + ${particle_id_t} des = particle_scan[i]; % for dim in range(ndims): - , __global const ${coord_t} *particles_${dim} - % endfor - % for dim in range(ndims): - , __global ${coord_t} *local_particles_${dim} + local_particles_${dim}[des] = particles_${dim}[i]; % endfor % if particles_have_extent: - , __global const ${coord_t} *particle_radii - , __global ${coord_t} *local_particle_radii + local_particle_radii[des] = particle_radii[i]; % endif - """, strict_undefined=True).render( + } + """, strict_undefined=True) + + fetch_local_src_knl = cl.elementwise.ElementwiseKernel( + queue.context, + FECTCH_LOCAL_PARTICLES_ARGUMENTS.render( mask_t=dtype_to_ctype(tree.particle_id_dtype), coord_t=dtype_to_ctype(tree.coord_dtype), ndims=tree.dimensions, - particles_have_extent=(particle_radii is not None) + particles_have_extent=tree.sources_have_extent ), - Template(""" - if(particle_mask[i]) { - ${particle_id_t} des = particle_scan[i]; - % for dim in range(ndims): - local_particles_${dim}[des] = particles_${dim}[i]; - % endfor - % if particles_have_extent: - local_particle_radii[des] = particle_radii[i]; - % endif - } - """, strict_undefined=True).render( + FETCH_LOCAL_PARTICLES_PRG.render( particle_id_t=dtype_to_ctype(tree.particle_id_dtype), ndims=tree.dimensions, - particles_have_extent=(particle_radii is not None) + particles_have_extent=tree.sources_have_extent ) ) - if particle_radii is None: - fetch_local_particles_knl(d_particle_mask, d_particle_scan, - *d_paticles_list, *d_local_particles_list) - else: - d_particle_radii = cl.array.to_device(queue, particle_radii) - d_local_particle_radii = cl.array.empty(queue, (local_nparticles,), - dtype=tree.coord_dtype) - fetch_local_particles_knl(d_particle_mask, d_particle_scan, - *d_paticles_list, *d_local_particles_list, - d_particle_radii, d_local_particle_radii) - - # Generate "box_particle_starts" of the local tree - local_box_particle_starts = cl.array.empty(queue, (tree.nboxes,), - dtype=tree.particle_id_dtype) + fetch_local_tgt_knl = cl.elementwise.ElementwiseKernel( + queue.context, + FECTCH_LOCAL_PARTICLES_ARGUMENTS.render( + mask_t=dtype_to_ctype(tree.particle_id_dtype), + coord_t=dtype_to_ctype(tree.coord_dtype), + ndims=tree.dimensions, + particles_have_extent=tree.targets_have_extent + ), + FETCH_LOCAL_PARTICLES_PRG.render( + particle_id_t=dtype_to_ctype(tree.particle_id_dtype), + ndims=tree.dimensions, + particles_have_extent=tree.targets_have_extent + ) + ) generate_box_particle_starts = cl.elementwise.ElementwiseKernel( queue.context, @@ -428,13 +393,6 @@ def gen_local_particles(queue, particles, nparticles, tree, name="generate_box_particle_starts" ) - generate_box_particle_starts(d_box_particle_starts, d_particle_scan, - local_box_particle_starts) - - # Generate "box_particle_counts_nonchild" of the local tree - local_box_particle_counts_nonchild = cl.array.zeros( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - generate_box_particle_counts_nonchild = cl.elementwise.ElementwiseKernel( queue.context, Template(""" @@ -447,14 +405,6 @@ def gen_local_particles(queue, particles, nparticles, tree, "if(res_boxes[i]) new_counts_nonchild[i] = old_counts_nonchild[i];" ) - generate_box_particle_counts_nonchild(responsible_boxes, - d_box_particle_counts_nonchild, - local_box_particle_counts_nonchild) - - # Generate "box_particle_counts_cumul" - local_box_particle_counts_cumul = cl.array.empty( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - generate_box_particle_counts_cumul = cl.elementwise.ElementwiseKernel( queue.context, Template(""" @@ -472,59 +422,164 @@ def gen_local_particles(queue, particles, nparticles, tree, """ ) - generate_box_particle_counts_cumul(d_box_particle_counts_cumul, - d_box_particle_starts, - local_box_particle_counts_cumul, - d_particle_scan) - - local_particles = np.empty((tree.dimensions,), dtype=object) - for i in range(tree.dimensions): - local_particles[i] = d_local_particles[i].get() - local_box_particle_starts = local_box_particle_starts.get() - local_box_particle_counts_nonchild = local_box_particle_counts_nonchild.get() - local_box_particle_counts_cumul = local_box_particle_counts_cumul.get() - - # {{{ Generate source weights - if particle_weights is not None: - local_particle_weights = cl.array.empty(queue, (local_nparticles,), - dtype=particle_weights.dtype) - gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( - queue.context, - arguments=Template(""" - __global ${weight_t} *src_weights, - __global ${particle_id_t} *particle_mask, - __global ${particle_id_t} *particle_scan, - __global ${weight_t} *local_weights - """, strict_undefined=True).render( - weight_t=dtype_to_ctype(particle_weights.dtype), - particle_id_t=dtype_to_ctype(tree.particle_id_dtype) - ), - operation=""" - if(particle_mask[i]) { - local_weights[particle_scan[i]] = src_weights[i]; - } - """ - ) - gen_local_source_weights_knl(particle_weights, d_particle_mask, - d_particle_scan, local_particle_weights) - - # }}} - - rtv = (local_particles, - local_box_particle_starts, - local_box_particle_counts_nonchild, - local_box_particle_counts_cumul) - - if particle_radii is not None: - rtv = rtv + (d_local_particle_radii.get(),) - - if particle_weights is not None: - rtv = rtv + (local_particle_weights.get(),) + def gen_local_tree_helper(src_box_mask, tgt_box_mask, local_tree): + """ This helper function generates a copy of the tree but with subset of + particles, and fetch the generated fields to *local_tree*. + """ + nsources = tree.nsources + + # source particle mask + src_particle_mask = cl.array.zeros(queue, (nsources,), + dtype=tree.particle_id_dtype) + particle_mask_knl(src_box_mask, + d_tree.box_source_starts, + d_tree.box_source_counts_nonchild, + src_particle_mask) + + # scan of source particle mask + src_particle_scan = cl.array.empty(queue, (nsources + 1,), + dtype=tree.particle_id_dtype) + src_particle_scan[0] = 0 + mask_scan_knl(src_particle_mask, src_particle_scan) + + # local sources + local_nsources = src_particle_scan[-1].get(queue) + local_sources = np.empty((tree.dimensions,), dtype=object) + for i in range(tree.dimensions): + local_sources[i] = cl.array.empty(queue, (local_nsources,), + dtype=tree.coord_dtype) + assert(tree.sources_have_extent is False) + fetch_local_src_knl(src_particle_mask, src_particle_scan, + *d_tree.sources.tolist(), + *local_sources.tolist()) + + # box_source_starts + local_box_source_starts = cl.array.empty(queue, (tree.nboxes,), + dtype=tree.particle_id_dtype) + generate_box_particle_starts(d_tree.box_source_starts, src_particle_scan, + local_box_source_starts) + + # box_source_counts_nonchild + local_box_source_counts_nonchild = cl.array.zeros( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + generate_box_particle_counts_nonchild(src_box_mask, + d_tree.box_source_counts_nonchild, + local_box_source_counts_nonchild) + + # box_source_counts_cumul + local_box_source_counts_cumul = cl.array.empty( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + generate_box_particle_counts_cumul(d_tree.box_source_counts_cumul, + d_tree.box_source_starts, + local_box_source_counts_cumul, + src_particle_scan) + + ntargets = tree.ntargets + # target particle mask + tgt_particle_mask = cl.array.zeros(queue, (ntargets,), + dtype=tree.particle_id_dtype) + particle_mask_knl(tgt_box_mask, + d_tree.box_target_starts, + d_tree.box_target_counts_nonchild, + tgt_particle_mask) + + # scan of target particle mask + tgt_particle_scan = cl.array.empty(queue, (ntargets + 1,), + dtype=tree.particle_id_dtype) + tgt_particle_scan[0] = 0 + mask_scan_knl(tgt_particle_mask, tgt_particle_scan) + + # local targets + local_ntargets = tgt_particle_scan[-1].get(queue) + local_targets = np.empty((tree.dimensions,), dtype=object) + for i in range(tree.dimensions): + local_targets[i] = cl.array.empty(queue, (local_ntargets,), + dtype=tree.coord_dtype) + if tree.targets_have_extent: + local_target_radii = cl.array.empty(queue, (local_ntargets,), + dtype=tree.coord_dtype) + fetch_local_tgt_knl(tgt_particle_mask, tgt_particle_scan, + *d_tree.targets.tolist(), *local_targets.tolist(), + d_tree.target_radii, local_target_radii) + else: + fetch_local_tgt_knl(tgt_particle_mask, tgt_particle_scan, + *d_tree.targets.tolist(), + *local_targets.tolist()) + + # box_target_starts + local_box_target_starts = cl.array.empty(queue, (tree.nboxes,), + dtype=tree.particle_id_dtype) + generate_box_particle_starts(d_tree.box_target_starts, tgt_particle_scan, + local_box_target_starts) + + # box_target_counts_nonchild + local_box_target_counts_nonchild = cl.array.zeros( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + generate_box_particle_counts_nonchild(tgt_box_mask, + d_tree.box_target_counts_nonchild, + local_box_target_counts_nonchild) + + # box_target_counts_cumul + local_box_target_counts_cumul = cl.array.empty( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + generate_box_particle_counts_cumul(d_tree.box_target_counts_cumul, + d_tree.box_target_starts, + local_box_target_counts_cumul, + tgt_particle_scan) + + # Fetch fields to local_tree + for i in range(tree.dimensions): + local_sources[i] = local_sources[i].get(queue=queue) + local_tree.sources = local_sources + for i in range(tree.dimensions): + local_targets[i] = local_targets[i].get(queue=queue) + local_tree.targets = local_targets + if tree.targets_have_extent: + local_tree.target_radii = local_target_radii.get(queue=queue) + local_tree.box_source_starts = local_box_source_starts.get(queue=queue) + local_tree.box_source_counts_nonchild = \ + local_box_source_counts_nonchild.get(queue=queue) + local_tree.box_source_counts_cumul = \ + local_box_source_counts_cumul.get(queue=queue) + local_tree.box_target_starts = local_box_target_starts.get(queue=queue) + local_tree.box_target_counts_nonchild = \ + local_box_target_counts_nonchild.get(queue=queue) + local_tree.box_target_counts_cumul = \ + local_box_target_counts_cumul.get(queue=queue) + return (src_particle_mask, src_particle_scan, tgt_particle_mask, + tgt_particle_scan) + + return gen_local_tree_helper + + +def get_gen_local_weights_helper(queue, particle_dtype, weight_dtype): + gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( + queue.context, + arguments=Template(""" + __global ${weight_t} *src_weights, + __global ${particle_id_t} *particle_mask, + __global ${particle_id_t} *particle_scan, + __global ${weight_t} *local_weights + """, strict_undefined=True).render( + weight_t=dtype_to_ctype(weight_dtype), + particle_id_t=dtype_to_ctype(particle_dtype) + ), + operation=""" + if(particle_mask[i]) { + local_weights[particle_scan[i]] = src_weights[i]; + } + """ + ) - if return_mask_scan: - rtv = rtv + (d_particle_mask, d_particle_scan, local_nparticles) + def gen_local_weights(global_weights, source_mask, source_scan): + local_nsources = source_scan[-1].get(queue) + local_weights = cl.array.empty(queue, (local_nsources,), + dtype=weight_dtype) + gen_local_source_weights_knl(global_weights, source_mask, source_scan, + local_weights) + return local_weights.get(queue) - return rtv + return gen_local_weights def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): @@ -682,19 +737,19 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): src_boxes_mask[rank] ) - # Add list 3 direct - d_from_sep_close_smaller_starts = cl.array.to_device( - queue, traversal.from_sep_close_smaller_starts) - d_from_sep_close_smaller_lists = cl.array.to_device( - queue, traversal.from_sep_close_smaller_lists) + # Add list 3 direct + d_from_sep_close_smaller_starts = cl.array.to_device( + queue, traversal.from_sep_close_smaller_starts) + d_from_sep_close_smaller_lists = cl.array.to_device( + queue, traversal.from_sep_close_smaller_lists) - add_interaction_list_boxes( - d_target_boxes, - responsible_boxes_mask[rank], - d_from_sep_close_smaller_starts, - d_from_sep_close_smaller_lists, - src_boxes_mask[rank] - ) + add_interaction_list_boxes( + d_target_boxes, + responsible_boxes_mask[rank], + d_from_sep_close_smaller_starts, + d_from_sep_close_smaller_lists, + src_boxes_mask[rank] + ) # {{{ compute box_to_user @@ -756,16 +811,9 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): tree_req = np.empty((total_rank,), dtype=object) weight_req = np.empty((total_rank,), dtype=object) - if tree.sources_have_extent: - source_radii = tree.source_radii - else: - source_radii = None - - if tree.targets_have_extent: - target_radii = tree.target_radii - else: - target_radii = None - + gen_local_tree_helper = get_gen_local_tree_helper(queue, tree) + gen_local_weights_helper = get_gen_local_weights_helper(queue, + tree.particle_id_dtype, src_weights.dtype) for rank in range(total_rank): local_tree[rank] = LocalTree.copy_from_global_tree( tree, responsible_boxes_list[rank].get(), @@ -773,36 +821,21 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): box_to_user_starts.get(), box_to_user_lists.get()) - (local_tree[rank].sources, - local_tree[rank].box_source_starts, - local_tree[rank].box_source_counts_nonchild, - local_tree[rank].box_source_counts_cumul, - local_src_weights[rank]) = \ - gen_local_particles(queue, tree.sources, tree.nsources, tree, - src_boxes_mask[rank], - tree.box_source_starts, - tree.box_source_counts_nonchild, - tree.box_source_counts_cumul, - source_radii, src_weights) - - (local_tree[rank].targets, - local_tree[rank].box_target_starts, - local_tree[rank].box_target_counts_nonchild, - local_tree[rank].box_target_counts_cumul, - local_tree[rank].target_radii, - local_target_mask[rank], - local_target_scan[rank], - local_ntargets[rank]) = \ - gen_local_particles(queue, tree.targets, tree.ntargets, tree, - responsible_boxes_mask[rank], - tree.box_target_starts, - tree.box_target_counts_nonchild, - tree.box_target_counts_cumul, - target_radii, None, return_mask_scan=True) - local_tree[rank].user_source_ids = None local_tree[rank].sorted_target_ids = None + (src_mask, src_scan, tgt_mask, tgt_scan) = \ + gen_local_tree_helper(src_boxes_mask[rank], + responsible_boxes_mask[rank], + local_tree[rank]) + + local_src_weights[rank] = gen_local_weights_helper( + src_weights, src_mask, src_scan) + + local_target_mask[rank] = tgt_mask + local_target_scan[rank] = tgt_scan + local_ntargets[rank] = tgt_scan[-1].get(queue) + tree_req[rank] = comm.isend(local_tree[rank], dest=rank, tag=MPITags.DIST_TREE) weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, @@ -829,26 +862,25 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): rtv = (local_tree, local_src_weights, local_target) # Recieve box extent - if local_tree.targets_have_extent: - if current_rank == 0: - box_target_bounding_box_min = traversal.box_target_bounding_box_min - box_target_bounding_box_max = traversal.box_target_bounding_box_max - else: - box_target_bounding_box_min = np.empty( - (local_tree.dimensions, local_tree.aligned_nboxes), - dtype=local_tree.coord_dtype - ) - box_target_bounding_box_max = np.empty( - (local_tree.dimensions, local_tree.aligned_nboxes), - dtype=local_tree.coord_dtype - ) - comm.Bcast(box_target_bounding_box_min, root=0) - comm.Bcast(box_target_bounding_box_max, root=0) - box_bounding_box = { - "min": box_target_bounding_box_min, - "max": box_target_bounding_box_max - } - rtv += (box_bounding_box,) + if current_rank == 0: + box_target_bounding_box_min = traversal.box_target_bounding_box_min + box_target_bounding_box_max = traversal.box_target_bounding_box_max + else: + box_target_bounding_box_min = np.empty( + (local_tree.dimensions, local_tree.aligned_nboxes), + dtype=local_tree.coord_dtype + ) + box_target_bounding_box_max = np.empty( + (local_tree.dimensions, local_tree.aligned_nboxes), + dtype=local_tree.coord_dtype + ) + comm.Bcast(box_target_bounding_box_min, root=0) + comm.Bcast(box_target_bounding_box_max, root=0) + box_bounding_box = { + "min": box_target_bounding_box_min, + "max": box_target_bounding_box_max + } + rtv += (box_bounding_box,) return rtv -- GitLab From 5362dd219c0624c1e829bfed7addee7f9f027f4e Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 22 Dec 2017 00:37:42 -0600 Subject: [PATCH 057/260] Fix Flake8 --- boxtree/distributed.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index c89dd76..231ea21 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -321,7 +321,7 @@ def get_gen_local_tree_helper(queue, tree): output_statement="scan[i + 1] = item;" ) - FECTCH_LOCAL_PARTICLES_ARGUMENTS = Template(""" + fetch_local_paticles_arguments = Template(""" __global const ${mask_t} *particle_mask, __global const ${mask_t} *particle_scan % for dim in range(ndims): @@ -336,7 +336,7 @@ def get_gen_local_tree_helper(queue, tree): % endif """, strict_undefined=True) - FETCH_LOCAL_PARTICLES_PRG = Template(""" + fetch_local_particles_prg = Template(""" if(particle_mask[i]) { ${particle_id_t} des = particle_scan[i]; % for dim in range(ndims): @@ -350,13 +350,13 @@ def get_gen_local_tree_helper(queue, tree): fetch_local_src_knl = cl.elementwise.ElementwiseKernel( queue.context, - FECTCH_LOCAL_PARTICLES_ARGUMENTS.render( + fetch_local_paticles_arguments.render( mask_t=dtype_to_ctype(tree.particle_id_dtype), coord_t=dtype_to_ctype(tree.coord_dtype), ndims=tree.dimensions, particles_have_extent=tree.sources_have_extent ), - FETCH_LOCAL_PARTICLES_PRG.render( + fetch_local_particles_prg.render( particle_id_t=dtype_to_ctype(tree.particle_id_dtype), ndims=tree.dimensions, particles_have_extent=tree.sources_have_extent @@ -365,13 +365,13 @@ def get_gen_local_tree_helper(queue, tree): fetch_local_tgt_knl = cl.elementwise.ElementwiseKernel( queue.context, - FECTCH_LOCAL_PARTICLES_ARGUMENTS.render( + fetch_local_paticles_arguments.render( mask_t=dtype_to_ctype(tree.particle_id_dtype), coord_t=dtype_to_ctype(tree.coord_dtype), ndims=tree.dimensions, particles_have_extent=tree.targets_have_extent ), - FETCH_LOCAL_PARTICLES_PRG.render( + fetch_local_particles_prg.render( particle_id_t=dtype_to_ctype(tree.particle_id_dtype), ndims=tree.dimensions, particles_have_extent=tree.targets_have_extent -- GitLab From 514c527f00dd8b1422d7c3ab6d3fa1bc1dc6fd59 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 22 Dec 2017 16:08:28 -0600 Subject: [PATCH 058/260] Print absolute and relative errors. --- test/test_distributed.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index e395168..646a55d 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -155,4 +155,6 @@ last_time = now if rank == 0: print("Total time " + str(time.time() - start_time)) - print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) + print("rel error", la.norm((pot_fmm - pot_dfmm * 2 * np.pi) / pot_fmm, + ord=np.inf)) + print("abs error", la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) -- GitLab From 501db499df642a5071e8f7654efeba0f9ab3b4da Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 22 Dec 2017 18:30:16 -0600 Subject: [PATCH 059/260] Add stats collection code. --- boxtree/distributed.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 231ea21..e2d61fd 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -956,7 +956,7 @@ def generate_local_travs(local_tree, local_src_weights, box_bounding_box=None, # {{{ communicate mpoles -def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): +def communicate_mpoles(wrangler, comm, trav, mpole_exps, _stats=None): """Based on Algorithm 3: Reduce and Scatter in [1]. The main idea is to mimic a hypercube allreduce, but to reduce bandwidth by @@ -1002,13 +1002,14 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # Temporary buffer for holding the mask box_in_subrange = wrangler.empty_box_in_subrange_mask() - stats["bytes_sent_by_stage"] = [] - stats["bytes_recvd_by_stage"] = [] + stats["mpoles_sent_per_round"] = [] + stats["mpoles_recvd_per_round"] = [] while not comm_pattern.done(): send_requests = [] # Send data to other processors. + nmpoles_sent = 0 if comm_pattern.sinks(): # Compute the subset of boxes to be sent. message_subrange = comm_pattern.messages() @@ -1043,7 +1044,12 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): tag=MPITags.REDUCE_INDICES) send_requests.append(req) + nmpoles_sent += len(relevant_boxes_list) + + stats["mpoles_sent_per_round"].append(nmpoles_sent) + # Receive data from other processors. + nmpoles_recvd = 0 for source in comm_pattern.sources(): comm.Recv(mpole_exps_buf, source=source, tag=MPITags.REDUCE_POTENTIALS) @@ -1052,32 +1058,37 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): status=status) nboxes = status.Get_count() // boxes_list_buf.dtype.itemsize + nmpoles_recvd += nboxes + # Update data structures. wrangler.update_mpoles(mpole_exps, mpole_exps_buf, boxes_list_buf[:nboxes]) contributing_boxes[boxes_list_buf[:nboxes]] = 1 + stats["mpoles_recvd_per_round"].append(nmpoles_recvd) + for req in send_requests: req.wait() comm_pattern.advance() - stats["total_time"] = time() - t_start - logger.debug("communicate multipoles: done in %.2f s" % stats["total_time"]) + logger.debug("communicate multipoles: done in %.2f s" % (time() - t_start)) - if return_stats: - return stats + if _stats is not None: + _stats.update(stats) # }}} def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wrangler, local_target_mask, local_target_scan, local_ntargets, - comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): + comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False, + _stats=None): # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() + stats = {} # {{{ "Step 2.1:" Construct local multipoles @@ -1112,7 +1123,7 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all else: - communicate_mpoles(wrangler, comm, trav_local, mpole_exps) + communicate_mpoles(wrangler, comm, trav_local, mpole_exps, stats) print("Communication: " + str(time.time()-last_time)) @@ -1220,6 +1231,9 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} + if _stats is not None: + _stats.update(stats) + potentials_mpi_type = MPI._typedict[potentials.dtype.char] if current_rank == 0: potentials_all_ranks = np.empty((total_rank,), dtype=object) -- GitLab From 3210a175c93ba596083bdc46b763dfed0e90c05c Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 22 Dec 2017 18:30:25 -0600 Subject: [PATCH 060/260] Add scaling test. --- test/test_distributed_scaling.py | 110 +++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 test/test_distributed_scaling.py diff --git a/test/test_distributed_scaling.py b/test/test_distributed_scaling.py new file mode 100644 index 0000000..60a7845 --- /dev/null +++ b/test/test_distributed_scaling.py @@ -0,0 +1,110 @@ +import numpy as np +import sys +from mpi4py import MPI +from boxtree.distributed import generate_local_tree, generate_local_travs, drive_dfmm +import numpy.linalg as la +from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +import time +import pyopencl as cl +import pyopencl.array + +import logging +logging.basicConfig(level=logging.INFO) + +# Global parameters +DIMS = 2 +DTYPE = np.float64 + +ORDER = 3 +HELMHOLTZ_K = 0 + + +def build_global_traversal_and_weights(ctx, nsources, ntargets): + queue = cl.CommandQueue(ctx) + + # Generate random particles and source weights + from boxtree.tools import make_uniform_particle_array as p_uniform + sources = p_uniform(queue, nsources, DIMS, DTYPE, seed=15) + targets = p_uniform(queue, ntargets, DIMS, DTYPE, seed=18) + + from boxtree.tools import particle_array_to_host + sources_host = particle_array_to_host(sources) + targets_host = particle_array_to_host(targets) + + sources_weights = np.ones(nsources, DTYPE) + + # Build the tree and interaction lists + from boxtree import TreeBuilder + tb = TreeBuilder(ctx) + tree, _ = tb(queue, sources, targets=targets, + max_particles_in_box=30, debug=True) + + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(ctx) + d_trav, _ = tg(queue, tree, debug=True) + trav = d_trav.get(queue=queue) + + return (trav, sources_weights) + + +def get_dfmm_stats(ctx, comm, nsources, ntargets): + if comm.Get_rank() == 0: + trav, sources_weights = ( + build_global_traversal_and_weights(ctx, nsources, ntargets)) + else: + trav, sources_weights = 2 * (None,) + + comm.barrier() + + # Compute FMM using distributed memory parallelism + local_tree, local_src_weights, local_target, box_bounding_box = ( + generate_local_tree(trav, sources_weights)) + + trav_local, trav_global = ( + generate_local_travs(local_tree, local_src_weights, box_bounding_box)) + + def fmm_level_to_nterms(tree, level): + return ORDER + + from boxtree.distributed import ( + DistributedFMMLibExpansionWranglerCodeContainer, queue) + + local_wrangler = ( + DistributedFMMLibExpansionWranglerCodeContainer() + .get_wrangler(queue, local_tree, HELMHOLTZ_K, ORDER)) + + if comm.Get_rank() == 0: + global_wrangler = FMMLibExpansionWrangler( + trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) + else: + global_wrangler = None + + stats = {} + + _ = drive_dfmm( + local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, + local_target["mask"], local_target["scan"], local_target["size"], + _stats=stats) + + return stats + + +def get_mpole_communication_data(ctx, comm, nsources, ntargets): + stats = get_dfmm_stats(ctx, comm, nsources, ntargets) + + my_mpoles_sent = np.zeros(1, int) + max_mpoles_sent = np.zeros(1, int) + + my_mpoles_sent[0] = sum(stats["mpoles_sent_per_round"]) + + comm.barrier() + comm.Reduce(my_mpoles_sent, max_mpoles_sent, op=MPI.MAX, root=0) + + if comm.Get_rank() == 0: + nrounds = len(stats["mpoles_sent_per_round"]) + print(f"{comm.Get_size()} & {nrounds} & {max_mpoles_sent[0]} \\\\") + + +if __name__ == "__main__": + ctx = cl._csc(interactive=False) + get_mpole_communication_data(ctx, MPI.COMM_WORLD, 100 ** 2, 100 ** 2) -- GitLab From 8d15b5a57e448c03e467edc8c4b70731afe1866d Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 22 Dec 2017 20:03:47 -0600 Subject: [PATCH 061/260] Generate responsible_boxes_mask on the host. This saves a lot of time compared with keeping it on the device. --- boxtree/distributed.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index e2d61fd..d8e539a 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -219,9 +219,6 @@ def partition_work(traversal, total_rank, queue): the (i,j) entry is 1 iff rank i is responsible for box j. """ tree = traversal.tree - responsible_boxes_mask = cl.array.zeros(queue, (total_rank, tree.nboxes), - dtype=np.int8) - responsible_boxes_list = np.empty((total_rank,), dtype=object) workload = np.zeros((tree.nboxes,), dtype=np.float64) for i in range(traversal.target_boxes.shape[0]): @@ -267,6 +264,9 @@ def partition_work(traversal, total_rank, queue): if child_box_id > 0: stack.append(child_box_id) + responsible_boxes_mask = np.zeros((total_rank, tree.nboxes), dtype=np.int8) + responsible_boxes_list = np.empty((total_rank,), dtype=object) + rank = 0 start = 0 workload_count = 0 @@ -281,6 +281,7 @@ def partition_work(traversal, total_rank, queue): start = i + 1 rank += 1 + responsible_boxes_mask = cl.array.to_device(queue, responsible_boxes_mask) return responsible_boxes_mask, responsible_boxes_list -- GitLab From 903585b9f7e9ba4de577e7429e04de8c742f76f5 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 22 Dec 2017 20:05:33 -0600 Subject: [PATCH 062/260] Revert "Add scaling test." This reverts commit 3210a175c93ba596083bdc46b763dfed0e90c05c. --- test/test_distributed_scaling.py | 110 ------------------------------- 1 file changed, 110 deletions(-) delete mode 100644 test/test_distributed_scaling.py diff --git a/test/test_distributed_scaling.py b/test/test_distributed_scaling.py deleted file mode 100644 index 60a7845..0000000 --- a/test/test_distributed_scaling.py +++ /dev/null @@ -1,110 +0,0 @@ -import numpy as np -import sys -from mpi4py import MPI -from boxtree.distributed import generate_local_tree, generate_local_travs, drive_dfmm -import numpy.linalg as la -from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler -import time -import pyopencl as cl -import pyopencl.array - -import logging -logging.basicConfig(level=logging.INFO) - -# Global parameters -DIMS = 2 -DTYPE = np.float64 - -ORDER = 3 -HELMHOLTZ_K = 0 - - -def build_global_traversal_and_weights(ctx, nsources, ntargets): - queue = cl.CommandQueue(ctx) - - # Generate random particles and source weights - from boxtree.tools import make_uniform_particle_array as p_uniform - sources = p_uniform(queue, nsources, DIMS, DTYPE, seed=15) - targets = p_uniform(queue, ntargets, DIMS, DTYPE, seed=18) - - from boxtree.tools import particle_array_to_host - sources_host = particle_array_to_host(sources) - targets_host = particle_array_to_host(targets) - - sources_weights = np.ones(nsources, DTYPE) - - # Build the tree and interaction lists - from boxtree import TreeBuilder - tb = TreeBuilder(ctx) - tree, _ = tb(queue, sources, targets=targets, - max_particles_in_box=30, debug=True) - - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(ctx) - d_trav, _ = tg(queue, tree, debug=True) - trav = d_trav.get(queue=queue) - - return (trav, sources_weights) - - -def get_dfmm_stats(ctx, comm, nsources, ntargets): - if comm.Get_rank() == 0: - trav, sources_weights = ( - build_global_traversal_and_weights(ctx, nsources, ntargets)) - else: - trav, sources_weights = 2 * (None,) - - comm.barrier() - - # Compute FMM using distributed memory parallelism - local_tree, local_src_weights, local_target, box_bounding_box = ( - generate_local_tree(trav, sources_weights)) - - trav_local, trav_global = ( - generate_local_travs(local_tree, local_src_weights, box_bounding_box)) - - def fmm_level_to_nterms(tree, level): - return ORDER - - from boxtree.distributed import ( - DistributedFMMLibExpansionWranglerCodeContainer, queue) - - local_wrangler = ( - DistributedFMMLibExpansionWranglerCodeContainer() - .get_wrangler(queue, local_tree, HELMHOLTZ_K, ORDER)) - - if comm.Get_rank() == 0: - global_wrangler = FMMLibExpansionWrangler( - trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) - else: - global_wrangler = None - - stats = {} - - _ = drive_dfmm( - local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, - local_target["mask"], local_target["scan"], local_target["size"], - _stats=stats) - - return stats - - -def get_mpole_communication_data(ctx, comm, nsources, ntargets): - stats = get_dfmm_stats(ctx, comm, nsources, ntargets) - - my_mpoles_sent = np.zeros(1, int) - max_mpoles_sent = np.zeros(1, int) - - my_mpoles_sent[0] = sum(stats["mpoles_sent_per_round"]) - - comm.barrier() - comm.Reduce(my_mpoles_sent, max_mpoles_sent, op=MPI.MAX, root=0) - - if comm.Get_rank() == 0: - nrounds = len(stats["mpoles_sent_per_round"]) - print(f"{comm.Get_size()} & {nrounds} & {max_mpoles_sent[0]} \\\\") - - -if __name__ == "__main__": - ctx = cl._csc(interactive=False) - get_mpole_communication_data(ctx, MPI.COMM_WORLD, 100 ** 2, 100 ** 2) -- GitLab From 2f037e281b9607d5a4890adeb61d890beaede768 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 22 Dec 2017 20:05:38 -0600 Subject: [PATCH 063/260] Revert "Add stats collection code." This reverts commit 501db499df642a5071e8f7654efeba0f9ab3b4da. --- boxtree/distributed.py | 32 +++++++++----------------------- 1 file changed, 9 insertions(+), 23 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index d8e539a..be98dba 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -957,7 +957,7 @@ def generate_local_travs(local_tree, local_src_weights, box_bounding_box=None, # {{{ communicate mpoles -def communicate_mpoles(wrangler, comm, trav, mpole_exps, _stats=None): +def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): """Based on Algorithm 3: Reduce and Scatter in [1]. The main idea is to mimic a hypercube allreduce, but to reduce bandwidth by @@ -1003,14 +1003,13 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, _stats=None): # Temporary buffer for holding the mask box_in_subrange = wrangler.empty_box_in_subrange_mask() - stats["mpoles_sent_per_round"] = [] - stats["mpoles_recvd_per_round"] = [] + stats["bytes_sent_by_stage"] = [] + stats["bytes_recvd_by_stage"] = [] while not comm_pattern.done(): send_requests = [] # Send data to other processors. - nmpoles_sent = 0 if comm_pattern.sinks(): # Compute the subset of boxes to be sent. message_subrange = comm_pattern.messages() @@ -1045,12 +1044,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, _stats=None): tag=MPITags.REDUCE_INDICES) send_requests.append(req) - nmpoles_sent += len(relevant_boxes_list) - - stats["mpoles_sent_per_round"].append(nmpoles_sent) - # Receive data from other processors. - nmpoles_recvd = 0 for source in comm_pattern.sources(): comm.Recv(mpole_exps_buf, source=source, tag=MPITags.REDUCE_POTENTIALS) @@ -1059,37 +1053,32 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, _stats=None): status=status) nboxes = status.Get_count() // boxes_list_buf.dtype.itemsize - nmpoles_recvd += nboxes - # Update data structures. wrangler.update_mpoles(mpole_exps, mpole_exps_buf, boxes_list_buf[:nboxes]) contributing_boxes[boxes_list_buf[:nboxes]] = 1 - stats["mpoles_recvd_per_round"].append(nmpoles_recvd) - for req in send_requests: req.wait() comm_pattern.advance() - logger.debug("communicate multipoles: done in %.2f s" % (time() - t_start)) + stats["total_time"] = time() - t_start + logger.debug("communicate multipoles: done in %.2f s" % stats["total_time"]) - if _stats is not None: - _stats.update(stats) + if return_stats: + return stats # }}} def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wrangler, local_target_mask, local_target_scan, local_ntargets, - comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False, - _stats=None): + comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() - stats = {} # {{{ "Step 2.1:" Construct local multipoles @@ -1124,7 +1113,7 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all else: - communicate_mpoles(wrangler, comm, trav_local, mpole_exps, stats) + communicate_mpoles(wrangler, comm, trav_local, mpole_exps) print("Communication: " + str(time.time()-last_time)) @@ -1232,9 +1221,6 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran # }}} - if _stats is not None: - _stats.update(stats) - potentials_mpi_type = MPI._typedict[potentials.dtype.char] if current_rank == 0: potentials_all_ranks = np.empty((total_rank,), dtype=object) -- GitLab From 36b1a1523f31e326b27eb696c6ad8253e1171081 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 22 Dec 2017 21:06:47 -0600 Subject: [PATCH 064/260] Add test script using ConstantOneExpansionWrangler --- test/test_constantone.py | 240 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 240 insertions(+) create mode 100644 test/test_constantone.py diff --git a/test/test_constantone.py b/test/test_constantone.py new file mode 100644 index 0000000..8eca7c7 --- /dev/null +++ b/test/test_constantone.py @@ -0,0 +1,240 @@ +import numpy as np +from mpi4py import MPI +from boxtree.distributed import generate_local_tree, generate_local_travs, drive_dfmm + + +class ConstantOneExpansionWrangler(object): + """This implements the 'analytical routines' for a Green's function that is + constant 1 everywhere. For 'charges' of 'ones', this should get every particle + a copy of the particle count. + """ + + def __init__(self, tree): + self.tree = tree + + def multipole_expansion_zeros(self): + return np.zeros(self.tree.nboxes, dtype=np.float64) + + local_expansion_zeros = multipole_expansion_zeros + + def potential_zeros(self): + return np.zeros(self.tree.ntargets, dtype=np.float64) + + def _get_source_slice(self, ibox): + pstart = self.tree.box_source_starts[ibox] + return slice( + pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) + + def _get_target_slice(self, ibox): + pstart = self.tree.box_target_starts[ibox] + return slice( + pstart, pstart + self.tree.box_target_counts_nonchild[ibox]) + + def reorder_sources(self, source_array): + return source_array[self.tree.user_source_ids] + + def reorder_potentials(self, potentials): + return potentials[self.tree.sorted_target_ids] + + def form_multipoles(self, level_start_source_box_nrs, source_boxes, src_weights): + mpoles = self.multipole_expansion_zeros() + for ibox in source_boxes: + pslice = self._get_source_slice(ibox) + mpoles[ibox] += np.sum(src_weights[pslice]) + + return mpoles + + def coarsen_multipoles(self, level_start_source_parent_box_nrs, + source_parent_boxes, mpoles): + tree = self.tree + + # nlevels-1 is the last valid level index + # nlevels-2 is the last valid level that could have children + # + # 3 is the last relevant source_level. + # 2 is the last relevant target_level. + # (because no level 1 box will be well-separated from another) + for source_level in range(tree.nlevels-1, 2, -1): + target_level = source_level - 1 + start, stop = level_start_source_parent_box_nrs[ + target_level:target_level+2] + for ibox in source_parent_boxes[start:stop]: + for child in tree.box_child_ids[:, ibox]: + if child: + mpoles[ibox] += mpoles[child] + + def eval_direct(self, target_boxes, neighbor_sources_starts, + neighbor_sources_lists, src_weights): + pot = self.potential_zeros() + + for itgt_box, tgt_ibox in enumerate(target_boxes): + tgt_pslice = self._get_target_slice(tgt_ibox) + + src_sum = 0 + start, end = neighbor_sources_starts[itgt_box:itgt_box+2] + #print "DIR: %s <- %s" % (tgt_ibox, neighbor_sources_lists[start:end]) + for src_ibox in neighbor_sources_lists[start:end]: + src_pslice = self._get_source_slice(src_ibox) + + src_sum += np.sum(src_weights[src_pslice]) + + pot[tgt_pslice] = src_sum + + return pot + + def multipole_to_local(self, + level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, + starts, lists, mpole_exps): + local_exps = self.local_expansion_zeros() + + for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): + start, end = starts[itgt_box:itgt_box+2] + + contrib = 0 + #print tgt_ibox, "<-", lists[start:end] + for src_ibox in lists[start:end]: + contrib += mpole_exps[src_ibox] + + local_exps[tgt_ibox] += contrib + + return local_exps + + def eval_multipoles(self, level_start_target_box_nrs, target_boxes, + from_sep_smaller_nonsiblings_by_level, mpole_exps): + pot = self.potential_zeros() + + for ssn in from_sep_smaller_nonsiblings_by_level: + for itgt_box, tgt_ibox in enumerate(target_boxes): + tgt_pslice = self._get_target_slice(tgt_ibox) + + contrib = 0 + + start, end = ssn.starts[itgt_box:itgt_box+2] + for src_ibox in ssn.lists[start:end]: + contrib += mpole_exps[src_ibox] + + pot[tgt_pslice] += contrib + + return pot + + def form_locals(self, + level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, starts, lists, src_weights): + local_exps = self.local_expansion_zeros() + + for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): + start, end = starts[itgt_box:itgt_box+2] + + #print "LIST 4", tgt_ibox, "<-", lists[start:end] + contrib = 0 + for src_ibox in lists[start:end]: + src_pslice = self._get_source_slice(src_ibox) + + contrib += np.sum(src_weights[src_pslice]) + + local_exps[tgt_ibox] += contrib + + return local_exps + + def refine_locals(self, level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, local_exps): + + for target_lev in range(1, self.tree.nlevels): + start, stop = level_start_target_or_target_parent_box_nrs[ + target_lev:target_lev+2] + for ibox in target_or_target_parent_boxes[start:stop]: + local_exps[ibox] += local_exps[self.tree.box_parent_ids[ibox]] + + return local_exps + + def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): + pot = self.potential_zeros() + + for ibox in target_boxes: + tgt_pslice = self._get_target_slice(ibox) + pot[tgt_pslice] += local_exps[ibox] + + return pot + + def finalize_potentials(self, potentials): + return potentials + + +# Parameters +dims = 2 +nsources = 100000 +ntargets = 100000 +dtype = np.float64 + +# Get the current rank +comm = MPI.COMM_WORLD +rank = comm.Get_rank() + +# Initialization +trav = None +sources_weights = None +wrangler = None + +# Generate particles and run shared-memory parallelism on rank 0 +if rank == 0: + # Configure PyOpenCL + import pyopencl as cl + ctx = cl.create_some_context() + queue = cl.CommandQueue(ctx) + print(queue.context.devices) + + # Generate random particles and source weights + from boxtree.tools import make_normal_particle_array as p_normal + sources = p_normal(queue, nsources, dims, dtype, seed=15) + targets = (p_normal(queue, ntargets, dims, dtype, seed=18) + + np.array([2, 0, 0])[:dims]) + + from boxtree.tools import particle_array_to_host + sources_host = particle_array_to_host(sources) + targets_host = particle_array_to_host(targets) + + from pyopencl.clrandom import PhiloxGenerator + rng = PhiloxGenerator(queue.context, seed=20) + # sources_weights = rng.uniform(queue, nsources, dtype=np.float64).get() + sources_weights = np.ones((nsources,)) + + # Build the tree and interaction lists + from boxtree import TreeBuilder + tb = TreeBuilder(ctx) + tree, _ = tb(queue, sources, targets=targets, max_particles_in_box=30, + debug=True) + + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(ctx) + d_trav, _ = tg(queue, tree, debug=True) + trav = d_trav.get(queue=queue) + + wrangler = ConstantOneExpansionWrangler(trav.tree) + + # Compute FMM using shared memory parallelism + from boxtree.fmm import drive_fmm + pot_fmm = drive_fmm(trav, wrangler, sources_weights) + +local_tree, local_src_weights, local_target, box_bounding_box = \ + generate_local_tree(trav, sources_weights) + +trav_local, trav_global = generate_local_travs(local_tree, local_src_weights, + box_bounding_box) + +local_wrangler = ConstantOneExpansionWrangler(local_tree) + +if rank == 0: + global_wrangler = ConstantOneExpansionWrangler(trav.tree) +else: + global_wrangler = None + +pot_dfmm = drive_dfmm( + local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, + local_target["mask"], local_target["scan"], local_target["size"], + _communicate_mpoles_via_allreduce=True +) + +if rank == 0: + assert(np.all(pot_fmm == nsources)) + assert(np.all(pot_dfmm == nsources)) -- GitLab From 3545bc748a5766b94806a8b01bd84e2b63c31d7c Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 22 Dec 2017 21:21:59 -0600 Subject: [PATCH 065/260] Revert "Revert "Add scaling test."" This reverts commit 903585b9f7e9ba4de577e7429e04de8c742f76f5. --- test/test_distributed_scaling.py | 110 +++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 test/test_distributed_scaling.py diff --git a/test/test_distributed_scaling.py b/test/test_distributed_scaling.py new file mode 100644 index 0000000..60a7845 --- /dev/null +++ b/test/test_distributed_scaling.py @@ -0,0 +1,110 @@ +import numpy as np +import sys +from mpi4py import MPI +from boxtree.distributed import generate_local_tree, generate_local_travs, drive_dfmm +import numpy.linalg as la +from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +import time +import pyopencl as cl +import pyopencl.array + +import logging +logging.basicConfig(level=logging.INFO) + +# Global parameters +DIMS = 2 +DTYPE = np.float64 + +ORDER = 3 +HELMHOLTZ_K = 0 + + +def build_global_traversal_and_weights(ctx, nsources, ntargets): + queue = cl.CommandQueue(ctx) + + # Generate random particles and source weights + from boxtree.tools import make_uniform_particle_array as p_uniform + sources = p_uniform(queue, nsources, DIMS, DTYPE, seed=15) + targets = p_uniform(queue, ntargets, DIMS, DTYPE, seed=18) + + from boxtree.tools import particle_array_to_host + sources_host = particle_array_to_host(sources) + targets_host = particle_array_to_host(targets) + + sources_weights = np.ones(nsources, DTYPE) + + # Build the tree and interaction lists + from boxtree import TreeBuilder + tb = TreeBuilder(ctx) + tree, _ = tb(queue, sources, targets=targets, + max_particles_in_box=30, debug=True) + + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(ctx) + d_trav, _ = tg(queue, tree, debug=True) + trav = d_trav.get(queue=queue) + + return (trav, sources_weights) + + +def get_dfmm_stats(ctx, comm, nsources, ntargets): + if comm.Get_rank() == 0: + trav, sources_weights = ( + build_global_traversal_and_weights(ctx, nsources, ntargets)) + else: + trav, sources_weights = 2 * (None,) + + comm.barrier() + + # Compute FMM using distributed memory parallelism + local_tree, local_src_weights, local_target, box_bounding_box = ( + generate_local_tree(trav, sources_weights)) + + trav_local, trav_global = ( + generate_local_travs(local_tree, local_src_weights, box_bounding_box)) + + def fmm_level_to_nterms(tree, level): + return ORDER + + from boxtree.distributed import ( + DistributedFMMLibExpansionWranglerCodeContainer, queue) + + local_wrangler = ( + DistributedFMMLibExpansionWranglerCodeContainer() + .get_wrangler(queue, local_tree, HELMHOLTZ_K, ORDER)) + + if comm.Get_rank() == 0: + global_wrangler = FMMLibExpansionWrangler( + trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) + else: + global_wrangler = None + + stats = {} + + _ = drive_dfmm( + local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, + local_target["mask"], local_target["scan"], local_target["size"], + _stats=stats) + + return stats + + +def get_mpole_communication_data(ctx, comm, nsources, ntargets): + stats = get_dfmm_stats(ctx, comm, nsources, ntargets) + + my_mpoles_sent = np.zeros(1, int) + max_mpoles_sent = np.zeros(1, int) + + my_mpoles_sent[0] = sum(stats["mpoles_sent_per_round"]) + + comm.barrier() + comm.Reduce(my_mpoles_sent, max_mpoles_sent, op=MPI.MAX, root=0) + + if comm.Get_rank() == 0: + nrounds = len(stats["mpoles_sent_per_round"]) + print(f"{comm.Get_size()} & {nrounds} & {max_mpoles_sent[0]} \\\\") + + +if __name__ == "__main__": + ctx = cl._csc(interactive=False) + get_mpole_communication_data(ctx, MPI.COMM_WORLD, 100 ** 2, 100 ** 2) -- GitLab From 257220af6c306d4fbb22a1e48d0f6e9e89662b44 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 22 Dec 2017 21:22:04 -0600 Subject: [PATCH 066/260] Revert "Print absolute and relative errors." This reverts commit 514c527f00dd8b1422d7c3ab6d3fa1bc1dc6fd59. --- test/test_distributed.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index 646a55d..e395168 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -155,6 +155,4 @@ last_time = now if rank == 0: print("Total time " + str(time.time() - start_time)) - print("rel error", la.norm((pot_fmm - pot_dfmm * 2 * np.pi) / pot_fmm, - ord=np.inf)) - print("abs error", la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) + print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) -- GitLab From dcf25693cc6e6db1b88d9aef0fdc663549ff3a98 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 22 Dec 2017 22:13:57 -0600 Subject: [PATCH 067/260] Collect local mask and scan --- boxtree/distributed.py | 59 ++++++++++++++++++++++------------------ test/test_distributed.py | 10 +++---- 2 files changed, 38 insertions(+), 31 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index be98dba..85ead26 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -421,7 +421,7 @@ def get_gen_local_tree_helper(queue, tree): """ ) - def gen_local_tree_helper(src_box_mask, tgt_box_mask, local_tree): + def gen_local_tree_helper(src_box_mask, tgt_box_mask, local_tree, local_data): """ This helper function generates a copy of the tree but with subset of particles, and fetch the generated fields to *local_tree*. """ @@ -545,8 +545,14 @@ def get_gen_local_tree_helper(queue, tree): local_box_target_counts_nonchild.get(queue=queue) local_tree.box_target_counts_cumul = \ local_box_target_counts_cumul.get(queue=queue) - return (src_particle_mask, src_particle_scan, tgt_particle_mask, - tgt_particle_scan) + + # Fetch fields to local_data + local_data["src_mask"] = src_particle_mask + local_data["src_scan"] = src_particle_scan + local_data["nsources"] = local_nsources + local_data["tgt_mask"] = tgt_particle_mask + local_data["tgt_scan"] = tgt_particle_scan + local_data["ntargets"] = local_ntargets return gen_local_tree_helper @@ -625,20 +631,22 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): total_rank = comm.Get_size() # {{{ Construct local tree for each rank on root - local_target = {"mask": None, "scan": None, "size": None} + if current_rank == 0: + local_data = np.empty((total_rank,), dtype=object) + for i in range(total_rank): + local_data[i] = { + "src_mask": None, "src_scan": None, "nsources": None, + "tgt_mask": None, "tgt_scan": None, "ntargets": None + } + else: + local_data = None + if current_rank == 0: tree = traversal.tree local_tree = np.empty((total_rank,), dtype=object) - local_target_mask = np.empty((total_rank,), dtype=object) - local_target_scan = np.empty((total_rank,), dtype=object) - local_ntargets = np.empty((total_rank,), dtype=tree.particle_id_dtype) - local_target["mask"] = local_target_mask - local_target["scan"] = local_target_scan - local_target["size"] = local_ntargets - - d_box_parent_ids = cl.array.to_device(queue, tree.box_parent_ids) # {{{ Partition the work + d_box_parent_ids = cl.array.to_device(queue, tree.box_parent_ids) # Each rank is responsible for calculating the multiple expansion as well as # evaluating target potentials in *responsible_boxes* @@ -823,17 +831,16 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): local_tree[rank].user_source_ids = None local_tree[rank].sorted_target_ids = None - (src_mask, src_scan, tgt_mask, tgt_scan) = \ - gen_local_tree_helper(src_boxes_mask[rank], - responsible_boxes_mask[rank], - local_tree[rank]) + gen_local_tree_helper(src_boxes_mask[rank], + responsible_boxes_mask[rank], + local_tree[rank], + local_data[rank]) local_src_weights[rank] = gen_local_weights_helper( - src_weights, src_mask, src_scan) - - local_target_mask[rank] = tgt_mask - local_target_scan[rank] = tgt_scan - local_ntargets[rank] = tgt_scan[-1].get(queue) + src_weights, + local_data[rank]["src_mask"], + local_data[rank]["src_scan"] + ) tree_req[rank] = comm.isend(local_tree[rank], dest=rank, tag=MPITags.DIST_TREE) @@ -858,7 +865,7 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): else: local_src_weights = comm.recv(source=0, tag=MPITags.DIST_WEIGHT) - rtv = (local_tree, local_src_weights, local_target) + rtv = (local_tree, local_src_weights, local_data) # Recieve box extent if current_rank == 0: @@ -1074,8 +1081,8 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wrangler, - local_target_mask, local_target_scan, local_ntargets, - comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): + local_data, comm=MPI.COMM_WORLD, + _communicate_mpoles_via_allreduce=False): # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() @@ -1227,7 +1234,7 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran potentials_all_ranks[0] = potentials for i in range(1, total_rank): potentials_all_ranks[i] = np.empty( - (local_ntargets[i],), dtype=potentials.dtype) + (local_data[i]["ntargets"],), dtype=potentials.dtype) comm.Recv([potentials_all_ranks[i], potentials_mpi_type], source=i, tag=MPITags.GATHER_POTENTIALS) else: @@ -1257,7 +1264,7 @@ def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wran for i in range(total_rank): local_potentials = cl.array.to_device(queue, potentials_all_ranks[i]) fill_potentials_knl( - local_target_mask[i], local_target_scan[i], + local_data[i]["tgt_mask"], local_data[i]["tgt_scan"], local_potentials, d_potentials) potentials = d_potentials.get() diff --git a/test/test_distributed.py b/test/test_distributed.py index 646a55d..d870a7e 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -11,8 +11,8 @@ logging.basicConfig(level=logging.INFO) # Parameters dims = 2 -nsources = 10000 -ntargets = 10000 +nsources = 100000 +ntargets = 100000 dtype = np.float64 # Get the current rank @@ -55,7 +55,7 @@ if rank == 0: from pyopencl.clrandom import PhiloxGenerator rng = PhiloxGenerator(queue.context, seed=22) - target_radii = rng.uniform(queue, ntargets, a=0, b=0.25, dtype=np.float64).get() + target_radii = rng.uniform(queue, ntargets, a=0, b=0.05, dtype=np.float64).get() # Display sources and targets if "--display" in sys.argv: @@ -112,7 +112,7 @@ comm.barrier() start_time = last_time = time.time() # Compute FMM using distributed memory parallelism -local_tree, local_src_weights, local_target, box_bounding_box = \ +local_tree, local_src_weights, local_data, box_bounding_box = \ generate_local_tree(trav, sources_weights) now = time.time() @@ -146,7 +146,7 @@ else: pot_dfmm = drive_dfmm( local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, - local_target["mask"], local_target["scan"], local_target["size"] + local_data ) now = time.time() -- GitLab From 89df3523c432a7d1ea58acb0ff95328d03b1a75e Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 22 Dec 2017 22:20:12 -0600 Subject: [PATCH 068/260] Remove unnecessary argument --- boxtree/distributed.py | 3 +-- test/test_distributed.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 85ead26..c5416a2 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -891,8 +891,7 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): return rtv -def generate_local_travs(local_tree, local_src_weights, box_bounding_box=None, - comm=MPI.COMM_WORLD): +def generate_local_travs(local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD): d_tree = local_tree.to_device(queue) # Modify box flags for targets diff --git a/test/test_distributed.py b/test/test_distributed.py index d870a7e..86de75d 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -119,8 +119,7 @@ now = time.time() print("Generate local tree " + str(now - last_time)) last_time = now -trav_local, trav_global = generate_local_travs(local_tree, local_src_weights, - box_bounding_box) +trav_local, trav_global = generate_local_travs(local_tree, box_bounding_box) now = time.time() print("Generate local trav " + str(now - last_time)) -- GitLab From cd4203c79a438b2c770beb79ec69410a656c1fc6 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 22 Dec 2017 22:51:53 -0600 Subject: [PATCH 069/260] Move local weights construction to drive_dfmm --- boxtree/distributed.py | 125 ++++++++++++++++++++------------------- test/test_distributed.py | 5 +- 2 files changed, 67 insertions(+), 63 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index c5416a2..07ccca0 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -557,37 +557,7 @@ def get_gen_local_tree_helper(queue, tree): return gen_local_tree_helper -def get_gen_local_weights_helper(queue, particle_dtype, weight_dtype): - gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( - queue.context, - arguments=Template(""" - __global ${weight_t} *src_weights, - __global ${particle_id_t} *particle_mask, - __global ${particle_id_t} *particle_scan, - __global ${weight_t} *local_weights - """, strict_undefined=True).render( - weight_t=dtype_to_ctype(weight_dtype), - particle_id_t=dtype_to_ctype(particle_dtype) - ), - operation=""" - if(particle_mask[i]) { - local_weights[particle_scan[i]] = src_weights[i]; - } - """ - ) - - def gen_local_weights(global_weights, source_mask, source_scan): - local_nsources = source_scan[-1].get(queue) - local_weights = cl.array.empty(queue, (local_nsources,), - dtype=weight_dtype) - gen_local_source_weights_knl(global_weights, source_mask, source_scan, - local_weights) - return local_weights.get(queue) - - return gen_local_weights - - -def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): +def generate_local_tree(traversal, comm=MPI.COMM_WORLD): # {{{ kernel to mark if a box mpole is used by a process via an interaction list @memoize_in(generate_local_tree, "loopy_cache") @@ -809,18 +779,10 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): # }}} - # Convert src_weights to tree order - src_weights = src_weights[tree.user_source_ids] - src_weights = cl.array.to_device(queue, src_weights) - local_src_weights = np.empty((total_rank,), dtype=object) - # request objects for non-blocking communication tree_req = np.empty((total_rank,), dtype=object) - weight_req = np.empty((total_rank,), dtype=object) gen_local_tree_helper = get_gen_local_tree_helper(queue, tree) - gen_local_weights_helper = get_gen_local_weights_helper(queue, - tree.particle_id_dtype, src_weights.dtype) for rank in range(total_rank): local_tree[rank] = LocalTree.copy_from_global_tree( tree, responsible_boxes_list[rank].get(), @@ -836,16 +798,8 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): local_tree[rank], local_data[rank]) - local_src_weights[rank] = gen_local_weights_helper( - src_weights, - local_data[rank]["src_mask"], - local_data[rank]["src_scan"] - ) - tree_req[rank] = comm.isend(local_tree[rank], dest=rank, tag=MPITags.DIST_TREE) - weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, - tag=MPITags.DIST_WEIGHT) # }}} @@ -857,16 +811,6 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): else: local_tree = comm.recv(source=0, tag=MPITags.DIST_TREE) - # Recieve source weights from root - if current_rank == 0: - for rank in range(1, total_rank): - weight_req[rank].wait() - local_src_weights = local_src_weights[0] - else: - local_src_weights = comm.recv(source=0, tag=MPITags.DIST_WEIGHT) - - rtv = (local_tree, local_src_weights, local_data) - # Recieve box extent if current_rank == 0: box_target_bounding_box_min = traversal.box_target_bounding_box_min @@ -886,9 +830,8 @@ def generate_local_tree(traversal, src_weights, comm=MPI.COMM_WORLD): "min": box_target_bounding_box_min, "max": box_target_bounding_box_max } - rtv += (box_bounding_box,) - return rtv + return local_tree, local_data, box_bounding_box def generate_local_travs(local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD): @@ -1079,13 +1022,75 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # }}} -def drive_dfmm(wrangler, trav_local, trav_global, local_src_weights, global_wrangler, +def get_gen_local_weights_helper(queue, particle_dtype, weight_dtype): + gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( + queue.context, + arguments=Template(""" + __global ${weight_t} *src_weights, + __global ${particle_id_t} *particle_mask, + __global ${particle_id_t} *particle_scan, + __global ${weight_t} *local_weights + """, strict_undefined=True).render( + weight_t=dtype_to_ctype(weight_dtype), + particle_id_t=dtype_to_ctype(particle_dtype) + ), + operation=""" + if(particle_mask[i]) { + local_weights[particle_scan[i]] = src_weights[i]; + } + """ + ) + + def gen_local_weights(global_weights, source_mask, source_scan): + local_nsources = source_scan[-1].get(queue) + local_weights = cl.array.empty(queue, (local_nsources,), + dtype=weight_dtype) + gen_local_source_weights_knl(global_weights, source_mask, source_scan, + local_weights) + return local_weights.get(queue) + + return gen_local_weights + + +def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weights, local_data, comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() + # {{{ Distribute source weights + + if current_rank == 0: + weight_req = np.empty((total_rank,), dtype=object) + + # Convert src_weights to tree order + src_weights = source_weights[global_wrangler.tree.user_source_ids] + src_weights = cl.array.to_device(queue, src_weights) + local_src_weights = np.empty((total_rank,), dtype=object) + + # Generate local_weights + gen_local_weights_helper = get_gen_local_weights_helper( + queue, global_wrangler.tree.particle_id_dtype, src_weights.dtype) + for rank in range(total_rank): + local_src_weights[rank] = gen_local_weights_helper( + src_weights, + local_data[rank]["src_mask"], + local_data[rank]["src_scan"] + ) + weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, + tag=MPITags.DIST_WEIGHT) + + # Recieve source weights from root + if current_rank == 0: + for rank in range(1, total_rank): + weight_req[rank].wait() + local_src_weights = local_src_weights[0] + else: + local_src_weights = comm.recv(source=0, tag=MPITags.DIST_WEIGHT) + + # }}} + # {{{ "Step 2.1:" Construct local multipoles import time diff --git a/test/test_distributed.py b/test/test_distributed.py index 86de75d..c9cd4ca 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -112,8 +112,7 @@ comm.barrier() start_time = last_time = time.time() # Compute FMM using distributed memory parallelism -local_tree, local_src_weights, local_data, box_bounding_box = \ - generate_local_tree(trav, sources_weights) +local_tree, local_data, box_bounding_box = generate_local_tree(trav) now = time.time() print("Generate local tree " + str(now - last_time)) @@ -144,7 +143,7 @@ else: global_wrangler = None pot_dfmm = drive_dfmm( - local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, + local_wrangler, trav_local, global_wrangler, trav_global, sources_weights, local_data ) -- GitLab From 37b5808937df4d9512b9f204441c8fb8a257505b Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Sat, 23 Dec 2017 00:29:36 -0600 Subject: [PATCH 070/260] Improve comment. --- boxtree/distributed.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 07ccca0..6c0a20f 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -909,8 +909,9 @@ def generate_local_travs(local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD) def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): """Based on Algorithm 3: Reduce and Scatter in [1]. - The main idea is to mimic a hypercube allreduce, but to reduce bandwidth by - sending only necessary information. + The main idea is to mimic a allreduce as done on a hypercube network, but to + decrease the bandwidth cost by sending only information that is relevant to + the processes receiving the message. .. [1] Lashuk, Ilya, Aparna Chandramowlishwaran, Harper Langston, Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, Lexing -- GitLab From c1126181ee5c6e60f8c12faf2b453a32336e8477 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Wed, 3 Jan 2018 01:32:32 -0600 Subject: [PATCH 071/260] s/butterfly/tree-like/ --- boxtree/tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boxtree/tools.py b/boxtree/tools.py index 08a4716..fdcefc9 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -647,7 +647,7 @@ class MaskCompressorKernel(object): # {{{ all-reduce class AllReduceCommPattern(object): - """Describes a butterfly communication pattern for allreduce. Supports efficient + """Describes a tree-like communication pattern for allreduce. Supports efficient allreduce between an arbitrary number of processes. """ -- GitLab From 5e617b0216d398bb2ccb2395424edd23b78aaa35 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Wed, 3 Jan 2018 01:34:44 -0600 Subject: [PATCH 072/260] Revert "Revert "Revert "Add scaling test.""" This reverts commit 3545bc748a5766b94806a8b01bd84e2b63c31d7c. --- test/test_distributed_scaling.py | 110 ------------------------------- 1 file changed, 110 deletions(-) delete mode 100644 test/test_distributed_scaling.py diff --git a/test/test_distributed_scaling.py b/test/test_distributed_scaling.py deleted file mode 100644 index 60a7845..0000000 --- a/test/test_distributed_scaling.py +++ /dev/null @@ -1,110 +0,0 @@ -import numpy as np -import sys -from mpi4py import MPI -from boxtree.distributed import generate_local_tree, generate_local_travs, drive_dfmm -import numpy.linalg as la -from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler -import time -import pyopencl as cl -import pyopencl.array - -import logging -logging.basicConfig(level=logging.INFO) - -# Global parameters -DIMS = 2 -DTYPE = np.float64 - -ORDER = 3 -HELMHOLTZ_K = 0 - - -def build_global_traversal_and_weights(ctx, nsources, ntargets): - queue = cl.CommandQueue(ctx) - - # Generate random particles and source weights - from boxtree.tools import make_uniform_particle_array as p_uniform - sources = p_uniform(queue, nsources, DIMS, DTYPE, seed=15) - targets = p_uniform(queue, ntargets, DIMS, DTYPE, seed=18) - - from boxtree.tools import particle_array_to_host - sources_host = particle_array_to_host(sources) - targets_host = particle_array_to_host(targets) - - sources_weights = np.ones(nsources, DTYPE) - - # Build the tree and interaction lists - from boxtree import TreeBuilder - tb = TreeBuilder(ctx) - tree, _ = tb(queue, sources, targets=targets, - max_particles_in_box=30, debug=True) - - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(ctx) - d_trav, _ = tg(queue, tree, debug=True) - trav = d_trav.get(queue=queue) - - return (trav, sources_weights) - - -def get_dfmm_stats(ctx, comm, nsources, ntargets): - if comm.Get_rank() == 0: - trav, sources_weights = ( - build_global_traversal_and_weights(ctx, nsources, ntargets)) - else: - trav, sources_weights = 2 * (None,) - - comm.barrier() - - # Compute FMM using distributed memory parallelism - local_tree, local_src_weights, local_target, box_bounding_box = ( - generate_local_tree(trav, sources_weights)) - - trav_local, trav_global = ( - generate_local_travs(local_tree, local_src_weights, box_bounding_box)) - - def fmm_level_to_nterms(tree, level): - return ORDER - - from boxtree.distributed import ( - DistributedFMMLibExpansionWranglerCodeContainer, queue) - - local_wrangler = ( - DistributedFMMLibExpansionWranglerCodeContainer() - .get_wrangler(queue, local_tree, HELMHOLTZ_K, ORDER)) - - if comm.Get_rank() == 0: - global_wrangler = FMMLibExpansionWrangler( - trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) - else: - global_wrangler = None - - stats = {} - - _ = drive_dfmm( - local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, - local_target["mask"], local_target["scan"], local_target["size"], - _stats=stats) - - return stats - - -def get_mpole_communication_data(ctx, comm, nsources, ntargets): - stats = get_dfmm_stats(ctx, comm, nsources, ntargets) - - my_mpoles_sent = np.zeros(1, int) - max_mpoles_sent = np.zeros(1, int) - - my_mpoles_sent[0] = sum(stats["mpoles_sent_per_round"]) - - comm.barrier() - comm.Reduce(my_mpoles_sent, max_mpoles_sent, op=MPI.MAX, root=0) - - if comm.Get_rank() == 0: - nrounds = len(stats["mpoles_sent_per_round"]) - print(f"{comm.Get_size()} & {nrounds} & {max_mpoles_sent[0]} \\\\") - - -if __name__ == "__main__": - ctx = cl._csc(interactive=False) - get_mpole_communication_data(ctx, MPI.COMM_WORLD, 100 ** 2, 100 ** 2) -- GitLab From f99f48c6a1e59f15658dc73d9844bc8252b34f3b Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Wed, 3 Jan 2018 05:51:22 -0500 Subject: [PATCH 073/260] Add a comment about efficiency of the mask compressor kernel --- boxtree/tools.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/boxtree/tools.py b/boxtree/tools.py index fdcefc9..b2dc9ea 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -632,6 +632,8 @@ class MaskCompressorKernel(object): result, evt = knl(queue, mask.shape[0], mask.data) return (result["output"].lists, evt) elif len(mask.shape) == 2: + # FIXME: This is efficient for small column sizes but may not be + # for larger ones since the work is partitioned by row. knl = self.get_matrix_compressor_kernel(mask.dtype, list_dtype) size = mask.dtype.itemsize result, evt = knl(queue, mask.shape[0], mask.shape[1], -- GitLab From 7318ff20c07ee59482f9ba669850e638ecbea977 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 13 Mar 2018 20:26:51 -0500 Subject: [PATCH 074/260] Integrate distributed FMM with compressed list 3 --- boxtree/distributed.py | 8 ++++---- test/test_distributed.py | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 6c0a20f..bfbd59e 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -591,7 +591,7 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD): ], default_offset=lp.auto) - knl = lp.split_iname(knl, "itgt_box", 16, outer_tag="g.0", inner_tag="l.0") + # knl = lp.split_iname(knl, "itgt_box", 16, outer_tag="g.0", inner_tag="l.0") return knl # }}} @@ -756,7 +756,8 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD): knl(queue, total_rank=total_rank, nboxes=tree.nboxes, - target_boxes=traversal.target_boxes, + target_boxes=( + traversal.target_boxes_sep_smaller_by_source_level[level]), relevant_boxes_mask=responsible_boxes_mask, source_box_starts=source_box_starts, source_box_lists=source_box_lists, @@ -1169,8 +1170,7 @@ def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weight # contribution *out* of the downward-propagating local expansions) potentials = potentials + wrangler.eval_multipoles( - trav_global.level_start_target_box_nrs, - trav_global.target_boxes, + trav_global.target_boxes_sep_smaller_by_source_level, trav_global.from_sep_smaller_by_level, mpole_exps) diff --git a/test/test_distributed.py b/test/test_distributed.py index ac7ad6c..6838dc6 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -153,4 +153,5 @@ last_time = now if rank == 0: print("Total time " + str(time.time() - start_time)) - print(la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf)) + print((la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf) / + la.norm(pot_fmm, ord=np.inf))) -- GitLab From f21f4e6f0b1567849e5bd38100830698a48a98a4 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 19 Mar 2018 19:15:13 -0400 Subject: [PATCH 075/260] Dict is more proper than class without methods --- boxtree/distributed.py | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index bfbd59e..23466ec 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -206,12 +206,13 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): # }}} -class MPITags(): - DIST_TREE = 0 - DIST_WEIGHT = 1 - GATHER_POTENTIALS = 2 - REDUCE_POTENTIALS = 3 - REDUCE_INDICES = 4 +MPITags = dict( + DIST_TREE=0, + DIST_WEIGHT=1, + GATHER_POTENTIALS=2, + REDUCE_POTENTIALS=3, + REDUCE_INDICES=4 +) def partition_work(traversal, total_rank, queue): @@ -800,7 +801,7 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD): local_data[rank]) tree_req[rank] = comm.isend(local_tree[rank], dest=rank, - tag=MPITags.DIST_TREE) + tag=MPITags["DIST_TREE"]) # }}} @@ -810,7 +811,7 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD): tree_req[rank].wait() local_tree = local_tree[0] else: - local_tree = comm.recv(source=0, tag=MPITags.DIST_TREE) + local_tree = comm.recv(source=0, tag=MPITags["DIST_TREE"]) # Recieve box extent if current_rank == 0: @@ -988,19 +989,20 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # Send the box subset to the other processors. for sink in comm_pattern.sinks(): req = comm.Isend(relevant_mpole_exps, dest=sink, - tag=MPITags.REDUCE_POTENTIALS) + tag=MPITags["REDUCE_POTENTIALS"]) send_requests.append(req) req = comm.Isend(relevant_boxes_list, dest=sink, - tag=MPITags.REDUCE_INDICES) + tag=MPITags["REDUCE_INDICES"]) send_requests.append(req) # Receive data from other processors. for source in comm_pattern.sources(): - comm.Recv(mpole_exps_buf, source=source, tag=MPITags.REDUCE_POTENTIALS) + comm.Recv(mpole_exps_buf, source=source, + tag=MPITags["REDUCE_POTENTIALS"]) status = MPI.Status() - comm.Recv(boxes_list_buf, source=source, tag=MPITags.REDUCE_INDICES, + comm.Recv(boxes_list_buf, source=source, tag=MPITags["REDUCE_INDICES"], status=status) nboxes = status.Get_count() // boxes_list_buf.dtype.itemsize @@ -1081,7 +1083,7 @@ def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weight local_data[rank]["src_scan"] ) weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, - tag=MPITags.DIST_WEIGHT) + tag=MPITags["DIST_WEIGHT"]) # Recieve source weights from root if current_rank == 0: @@ -1089,7 +1091,7 @@ def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weight weight_req[rank].wait() local_src_weights = local_src_weights[0] else: - local_src_weights = comm.recv(source=0, tag=MPITags.DIST_WEIGHT) + local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) # }}} @@ -1241,10 +1243,10 @@ def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weight potentials_all_ranks[i] = np.empty( (local_data[i]["ntargets"],), dtype=potentials.dtype) comm.Recv([potentials_all_ranks[i], potentials_mpi_type], - source=i, tag=MPITags.GATHER_POTENTIALS) + source=i, tag=MPITags["GATHER_POTENTIALS"]) else: comm.Send([potentials, potentials_mpi_type], - dest=0, tag=MPITags.GATHER_POTENTIALS) + dest=0, tag=MPITags["GATHER_POTENTIALS"]) if current_rank == 0: d_potentials = cl.array.empty(queue, (global_wrangler.tree.ntargets,), -- GitLab From fb9fb36f4bb6c550bd937e6adbcfbdc3f17263c4 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 19 Mar 2018 20:16:00 -0400 Subject: [PATCH 076/260] Add workload_weight interface --- boxtree/distributed.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 23466ec..634f703 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -8,6 +8,7 @@ from pyopencl.tools import dtype_to_ctype from pyopencl.scan import GenericScanKernel from pytools import memoize_in, memoize_method from boxtree import Tree +from collections import namedtuple __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ @@ -214,8 +215,10 @@ MPITags = dict( REDUCE_INDICES=4 ) +WorkloadWeight = namedtuple('Workload', ['direct', 'm2l', 'm2p', 'p2l', 'multipole']) -def partition_work(traversal, total_rank, queue): + +def partition_work(traversal, total_rank, workload_weight): """ This function returns a pyopencl array of size total_rank*nboxes, where the (i,j) entry is 1 iff rank i is responsible for box j. """ @@ -233,7 +236,7 @@ def partition_work(traversal, total_rank, queue): particle_count = 0 for j in range(list1.shape[0]): particle_count += tree.box_source_counts_nonchild[list1[j]] - workload[box_idx] += box_ntargets * particle_count + workload[box_idx] += box_ntargets * particle_count * workload_weight.direct # workload for list 3 near if tree.targets_have_extent: @@ -243,11 +246,12 @@ def partition_work(traversal, total_rank, queue): particle_count = 0 for j in range(list3_near.shape[0]): particle_count += tree.box_source_counts_nonchild[list3_near[j]] - workload[box_idx] += box_ntargets * particle_count + workload[box_idx] += ( + box_ntargets * particle_count * workload_weight.direct) for i in range(tree.nboxes): # workload for multipole calculation - workload[i] += tree.box_source_counts_nonchild[i] * 5 + workload[i] += tree.box_source_counts_nonchild[i] * workload_weight.multipole total_workload = 0 for i in range(tree.nboxes): @@ -558,7 +562,7 @@ def get_gen_local_tree_helper(queue, tree): return gen_local_tree_helper -def generate_local_tree(traversal, comm=MPI.COMM_WORLD): +def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): # {{{ kernel to mark if a box mpole is used by a process via an interaction list @memoize_in(generate_local_tree, "loopy_cache") @@ -621,8 +625,16 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD): # Each rank is responsible for calculating the multiple expansion as well as # evaluating target potentials in *responsible_boxes* - responsible_boxes_mask, responsible_boxes_list = \ - partition_work(traversal, total_rank, queue) + if workload_weight is None: + workload_weight = WorkloadWeight( + direct=1, + m2l=1, + m2p=1, + p2l=1, + multipole=5 + ) + responsible_boxes_mask, responsible_boxes_list = partition_work( + traversal, total_rank, workload_weight) # Calculate ancestors of responsible boxes ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), -- GitLab From edf6b16f6830776bdcdcb2bfbdb0fd0bc4103e9e Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 22 Mar 2018 19:35:36 -0400 Subject: [PATCH 077/260] Improve work partition to consider m2l, m2p and p2l --- boxtree/distributed.py | 74 +++++++++++++++++++++++++++++----------- test/test_distributed.py | 38 ++++++++++----------- 2 files changed, 73 insertions(+), 39 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 634f703..4013582 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -2,6 +2,7 @@ from __future__ import division from mpi4py import MPI import numpy as np import loopy as lp +from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_1 import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype @@ -223,31 +224,69 @@ def partition_work(traversal, total_rank, workload_weight): the (i,j) entry is 1 iff rank i is responsible for box j. """ tree = traversal.tree - workload = np.zeros((tree.nboxes,), dtype=np.float64) - for i in range(traversal.target_boxes.shape[0]): - box_idx = traversal.target_boxes[i] - box_ntargets = tree.box_target_counts_nonchild[box_idx] - # workload for list 1 - start = traversal.neighbor_source_boxes_starts[i] - end = traversal.neighbor_source_boxes_starts[i + 1] + # workload for list 1 + for itarget_box, box_idx in enumerate(traversal.target_boxes): + box_ntargets = tree.box_target_counts_nonchild[box_idx] + start = traversal.neighbor_source_boxes_starts[itarget_box] + end = traversal.neighbor_source_boxes_starts[itarget_box + 1] list1 = traversal.neighbor_source_boxes_lists[start:end] particle_count = 0 - for j in range(list1.shape[0]): - particle_count += tree.box_source_counts_nonchild[list1[j]] + for ibox in list1: + particle_count += tree.box_source_counts_nonchild[ibox] workload[box_idx] += box_ntargets * particle_count * workload_weight.direct + # workload for list 2 + for itarget_or_target_parent_boxes, box_idx in enumerate( + traversal.target_or_target_parent_boxes): + start = traversal.from_sep_siblings_starts[itarget_or_target_parent_boxes] + end = traversal.from_sep_siblings_starts[itarget_or_target_parent_boxes + 1] + workload[box_idx] += (end - start) * workload_weight.m2l + + for ilevel in range(tree.nlevels): + # workload for list 3 far + for itarget_box, box_idx in enumerate( + traversal.target_boxes_sep_smaller_by_source_level[ilevel]): + box_ntargets = tree.box_target_counts_nonchild[box_idx] + start = traversal.from_sep_smaller_by_level[ilevel].starts[itarget_box] + end = traversal.from_sep_smaller_by_level[ilevel].starts[ + itarget_box + 1] + workload[box_idx] += (end - start) * box_ntargets + # workload for list 3 near if tree.targets_have_extent: - start = traversal.from_sep_close_smaller_starts[i] - end = traversal.from_sep_close_smaller_starts[i + 1] - list3_near = traversal.from_sep_close_smaller_lists[start:end] + for itarget_box, box_idx in enumerate(traversal.target_boxes): + box_ntargets = tree.box_target_counts_nonchild[box_idx] + start = traversal.from_sep_close_smaller_starts[itarget_box] + end = traversal.from_sep_close_smaller_starts[itarget_box + 1] + particle_count = 0 + for near_box_id in traversal.from_sep_close_smaller_lists[start:end]: + particle_count += tree.box_source_counts_nonchild[near_box_id] + workload[box_idx] += ( + box_ntargets * particle_count * workload_weight.direct) + + # workload for list 4 + for itarget_or_target_parent_boxes, box_idx in enumerate( + traversal.target_or_target_parent_boxes): + start = traversal.from_sep_bigger_starts[itarget_or_target_parent_boxes] + end = traversal.from_sep_bigger_starts[itarget_or_target_parent_boxes + 1] + particle_count = 0 + for far_box_id in traversal.from_sep_bigger_lists[start:end]: + particle_count += tree.box_source_counts_nonchild[far_box_id] + workload[box_idx] += particle_count * workload_weight.p2l + + if tree.targets_have_extent: + box_ntargets = tree.box_target_counts_nonchild[box_idx] + start = traversal.from_sep_close_bigger_starts[ + itarget_or_target_parent_boxes] + end = traversal.from_sep_close_bigger_starts[ + itarget_or_target_parent_boxes + 1] particle_count = 0 - for j in range(list3_near.shape[0]): - particle_count += tree.box_source_counts_nonchild[list3_near[j]] + for direct_box_id in traversal.from_sep_close_bigger_lists[start:end]: + particle_count += tree.box_source_counts_nonchild[direct_box_id] workload[box_idx] += ( - box_ntargets * particle_count * workload_weight.direct) + box_ntargets * particle_count * workload_weight.direct) for i in range(tree.nboxes): # workload for multipole calculation @@ -1147,7 +1186,6 @@ def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weight # }}} # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") - last_time = time.time() logger.debug("direct evaluation from neighbor source boxes ('list 1')") potentials = wrangler.eval_direct( @@ -1157,7 +1195,6 @@ def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weight local_src_weights) # these potentials are called alpha in [1] - print("List 1: " + str(time.time()-last_time)) # }}} @@ -1176,7 +1213,6 @@ def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weight # }}} # {{{ "Stage 5:" evaluate sep. smaller mpoles ("list 3") at particles - last_time = time.time() logger.debug("evaluate sep. smaller mpoles at particles ('list 3 far')") @@ -1199,8 +1235,6 @@ def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weight trav_global.from_sep_close_smaller_lists, local_src_weights) - print("List 3: " + str(time.time()-last_time)) - # }}} # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") diff --git a/test/test_distributed.py b/test/test_distributed.py index 6838dc6..fac1da0 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,18 +1,20 @@ import numpy as np import sys from mpi4py import MPI -from boxtree.distributed import generate_local_tree, generate_local_travs, drive_dfmm +from boxtree.distributed import (generate_local_tree, generate_local_travs, + drive_dfmm, WorkloadWeight, + DistributedFMMLibExpansionWranglerCodeContainer) import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler import time -import logging -logging.basicConfig(level=logging.INFO) +# import logging +# logging.basicConfig(level=logging.INFO) # Parameters dims = 2 -nsources = 100000 -ntargets = 100000 +nsources = 10000 +ntargets = 10000 dtype = np.float64 # Get the current rank @@ -112,26 +114,26 @@ comm.barrier() start_time = last_time = time.time() # Compute FMM using distributed memory parallelism -local_tree, local_data, box_bounding_box = generate_local_tree(trav) - -now = time.time() -print("Generate local tree " + str(now - last_time)) -last_time = now +workload_weight = WorkloadWeight( + direct=15, + m2l=ORDER*ORDER, + m2p=ORDER*ORDER, + p2l=ORDER*ORDER, + multipole=ORDER*ORDER*5 +) +local_tree, local_data, box_bounding_box = generate_local_tree(trav) trav_local, trav_global = generate_local_travs(local_tree, box_bounding_box) -now = time.time() -print("Generate local trav " + str(now - last_time)) -last_time = now +comm.barrier() +last_time = time.time() def fmm_level_to_nterms(tree, level): return ORDER -from boxtree.distributed import ( - DistributedFMMLibExpansionWranglerCodeContainer, queue) - +from boxtree.distributed import queue local_wrangler = ( DistributedFMMLibExpansionWranglerCodeContainer() .get_wrangler(queue, local_tree, HELMHOLTZ_K, ORDER)) @@ -147,9 +149,7 @@ pot_dfmm = drive_dfmm( local_data ) -now = time.time() -print("Distributed FMM " + str(now - last_time)) -last_time = now +print("Distributed FMM " + str(time.time() - last_time)) if rank == 0: print("Total time " + str(time.time() - start_time)) -- GitLab From ce99393ea13c068b03d355d5acba9bda203705ae Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 27 Mar 2018 20:45:36 -0500 Subject: [PATCH 078/260] Handle different FMM order by level --- boxtree/distributed.py | 74 ++++++++++++++++++++++++++-------------- test/test_distributed.py | 61 +++++++-------------------------- 2 files changed, 62 insertions(+), 73 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 4013582..3322dfe 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -10,7 +10,7 @@ from pyopencl.scan import GenericScanKernel from pytools import memoize_in, memoize_method from boxtree import Tree from collections import namedtuple - +from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2017 Hao Gao" @@ -116,7 +116,7 @@ class LocalTree(Tree): def to_device(self, queue): additional_fields_to_device = ["responsible_boxes_list", "ancestor_mask", - "box_to_user_starts", "box_to_user_lists"] + "box_to_user_starts", "box_to_user_lists"] return tree_to_device(queue, self, additional_fields_to_device) @@ -153,42 +153,66 @@ class DistributedFMMLibExpansionWranglerCodeContainer(object): knl = lp.split_iname(knl, "ibox", 16, outer_tag="g.0", inner_tag="l.0") return knl - def get_wrangler(self, queue, tree, helmholtz_k, fmm_order): + def get_wrangler(self, queue, tree, helmholtz_k, fmm_level_to_nterms): return DistributedFMMLibExpansionWrangler(self, queue, tree, helmholtz_k, - fmm_order) - - -from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler + fmm_level_to_nterms) class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): - def __init__(self, code_container, queue, tree, helmholtz_k, fmm_order): - """ - :arg fmm_order: Only supports single order for now - """ - def fmm_level_to_nterms(tree, level): - return fmm_order - + def __init__(self, code_container, queue, tree, helmholtz_k, + fmm_level_to_nterms=None): FMMLibExpansionWrangler.__init__(self, tree, helmholtz_k, fmm_level_to_nterms) self.queue = queue - self.fmm_order = fmm_order self.code_container = code_container def slice_mpoles(self, mpoles, slice_indices): - mpoles = mpoles.reshape((-1,) + self.expansion_shape(self.fmm_order)) - return mpoles[slice_indices, :].reshape((-1,)) + if len(slice_indices) == 0: + return np.empty((0,), dtype=mpoles.dtype) + + level_start_slice_indices = np.searchsorted( + slice_indices, self.tree.level_start_box_nrs) + mpoles_list = [] + + for ilevel in range(self.tree.nlevels): + start, stop = level_start_slice_indices[ilevel:ilevel+2] + if stop > start: + level_start_box_idx, mpoles_current_level = \ + self.multipole_expansions_view(mpoles, ilevel) + mpoles_list.append( + mpoles_current_level[ + slice_indices[start:stop] - level_start_box_idx + ].reshape(-1) + ) + + return np.concatenate(mpoles_list) def update_mpoles(self, mpoles, mpole_updates, slice_indices): - """ - :arg mpole_updates: The first *len(slice_indices)* entries should contain - the values to add to *mpoles* - """ - mpoles = mpoles.reshape((-1,) + self.expansion_shape(self.fmm_order)) - mpole_updates = mpole_updates.reshape( - (-1,) + self.expansion_shape(self.fmm_order)) - mpoles[slice_indices, :] += mpole_updates[:len(slice_indices), :] + if len(slice_indices) == 0: + return + + level_start_slice_indices = np.searchsorted( + slice_indices, self.tree.level_start_box_nrs) + mpole_updates_start = 0 + + for ilevel in range(self.tree.nlevels): + start, stop = level_start_slice_indices[ilevel:ilevel+2] + if stop > start: + level_start_box_idx, mpoles_current_level = \ + self.multipole_expansions_view(mpoles, ilevel) + mpoles_shape = (stop - start,) + mpoles_current_level.shape[1:] + + from pytools import product + mpole_updates_end = mpole_updates_start + product(mpoles_shape) + + mpoles_current_level[ + slice_indices[start:stop] - level_start_box_idx + ] += mpole_updates[ + mpole_updates_start:mpole_updates_end + ].reshape(mpoles_shape) + + mpole_updates_start = mpole_updates_end def empty_box_in_subrange_mask(self): return cl.array.empty(self.queue, self.tree.nboxes, dtype=np.int8) diff --git a/test/test_distributed.py b/test/test_distributed.py index fac1da0..10591df 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,18 +1,15 @@ import numpy as np import sys from mpi4py import MPI -from boxtree.distributed import (generate_local_tree, generate_local_travs, - drive_dfmm, WorkloadWeight, - DistributedFMMLibExpansionWranglerCodeContainer) +from boxtree.distributed import ( + generate_local_tree, generate_local_travs, drive_dfmm, WorkloadWeight, + DistributedFMMLibExpansionWranglerCodeContainer +) import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler -import time - -# import logging -# logging.basicConfig(level=logging.INFO) # Parameters -dims = 2 +dims = 3 nsources = 10000 ntargets = 10000 dtype = np.float64 @@ -25,16 +22,15 @@ rank = comm.Get_rank() trav = None sources_weights = None wrangler = None +HELMHOLTZ_K = 0 -ORDER = 3 -HELMHOLTZ_K = 0 +def fmm_level_to_nterms(tree, level): + return max(level, 3) # Generate particles and run shared-memory parallelism on rank 0 if rank == 0: - last_time = time.time() - # Configure PyOpenCL import pyopencl as cl ctx = cl.create_some_context() @@ -66,10 +62,6 @@ if rank == 0: plt.plot(targets_host[:, 0], targets_host[:, 1], "ro") plt.show() - now = time.time() - print("Generate particles " + str(now - last_time)) - last_time = now - # Calculate potentials using direct evaluation # distances = la.norm(sources_host.reshape(1, nsources, 2) - \ # targets_host.reshape(ntargets, 1, 2), @@ -82,23 +74,12 @@ if rank == 0: tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, stick_out_factor=0.25, max_particles_in_box=30, debug=True) - now = time.time() - print("Generate tree " + str(now - last_time)) - last_time = now - from boxtree.traversal import FMMTraversalBuilder tg = FMMTraversalBuilder(ctx) d_trav, _ = tg(queue, tree, debug=True) trav = d_trav.get(queue=queue) - now = time.time() - print("Generate traversal " + str(now - last_time)) - last_time = now - # Get pyfmmlib expansion wrangler - def fmm_level_to_nterms(tree, level): - return ORDER - wrangler = FMMLibExpansionWrangler( trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) @@ -106,37 +87,24 @@ if rank == 0: from boxtree.fmm import drive_fmm pot_fmm = drive_fmm(trav, wrangler, sources_weights) * 2 * np.pi - now = time.time() - print("Shared memory FMM " + str(now - last_time)) # print(la.norm(pot_fmm - pot_naive, ord=2)) -comm.barrier() -start_time = last_time = time.time() - # Compute FMM using distributed memory parallelism workload_weight = WorkloadWeight( direct=15, - m2l=ORDER*ORDER, - m2p=ORDER*ORDER, - p2l=ORDER*ORDER, - multipole=ORDER*ORDER*5 + m2l=25, + m2p=25, + p2l=25, + multipole=25*5 ) local_tree, local_data, box_bounding_box = generate_local_tree(trav) trav_local, trav_global = generate_local_travs(local_tree, box_bounding_box) -comm.barrier() -last_time = time.time() - - -def fmm_level_to_nterms(tree, level): - return ORDER - - from boxtree.distributed import queue local_wrangler = ( DistributedFMMLibExpansionWranglerCodeContainer() - .get_wrangler(queue, local_tree, HELMHOLTZ_K, ORDER)) + .get_wrangler(queue, local_tree, HELMHOLTZ_K, fmm_level_to_nterms)) if rank == 0: global_wrangler = FMMLibExpansionWrangler( @@ -149,9 +117,6 @@ pot_dfmm = drive_dfmm( local_data ) -print("Distributed FMM " + str(time.time() - last_time)) - if rank == 0: - print("Total time " + str(time.time() - start_time)) print((la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf) / la.norm(pot_fmm, ord=np.inf))) -- GitLab From 6e748b3b902a2f6a280ec8eb6ce5ec09b963546a Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 2 Apr 2018 21:27:05 -0500 Subject: [PATCH 079/260] Memorize local tree and traversal build --- boxtree/distributed.py | 123 +++++++++++++++++++++++++-------------- test/test_distributed.py | 35 +++-------- 2 files changed, 87 insertions(+), 71 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 3322dfe..8d41691 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -121,51 +121,15 @@ class LocalTree(Tree): return tree_to_device(queue, self, additional_fields_to_device) -# {{{ parallel fmm wrangler - -class DistributedFMMLibExpansionWranglerCodeContainer(object): - - @memoize_method - def find_boxes_used_by_subrange_kernel(self): - knl = lp.make_kernel( - [ - "{[ibox]: 0 <= ibox < nboxes}", - "{[iuser]: iuser_start <= iuser < iuser_end}", - ], - """ - for ibox - <> iuser_start = box_to_user_starts[ibox] - <> iuser_end = box_to_user_starts[ibox + 1] - for iuser - <> useri = box_to_user_lists[iuser] - <> in_subrange = subrange_start <= useri and useri < subrange_end - if in_subrange - box_in_subrange[ibox] = 1 - end - end - end - """, - [ - lp.ValueArg("subrange_start, subrange_end", np.int32), - lp.GlobalArg("box_to_user_lists", shape=None), - "..." - ]) - knl = lp.split_iname(knl, "ibox", 16, outer_tag="g.0", inner_tag="l.0") - return knl - - def get_wrangler(self, queue, tree, helmholtz_k, fmm_level_to_nterms): - return DistributedFMMLibExpansionWrangler(self, queue, tree, helmholtz_k, - fmm_level_to_nterms) - +# {{{ distributed fmm wrangler class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): - def __init__(self, code_container, queue, tree, helmholtz_k, - fmm_level_to_nterms=None): - FMMLibExpansionWrangler.__init__(self, tree, helmholtz_k, - fmm_level_to_nterms) + def __init__(self, queue, tree, helmholtz_k, fmm_level_to_nterms=None): + super(DistributedFMMLibExpansionWrangler, self).__init__( + tree, helmholtz_k, fmm_level_to_nterms + ) self.queue = queue - self.code_container = code_container def slice_mpoles(self, mpoles, slice_indices): if len(slice_indices) == 0: @@ -217,9 +181,37 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): def empty_box_in_subrange_mask(self): return cl.array.empty(self.queue, self.tree.nboxes, dtype=np.int8) + @memoize_method + def find_boxes_used_by_subrange_kernel(self): + knl = lp.make_kernel( + [ + "{[ibox]: 0 <= ibox < nboxes}", + "{[iuser]: iuser_start <= iuser < iuser_end}", + ], + """ + for ibox + <> iuser_start = box_to_user_starts[ibox] + <> iuser_end = box_to_user_starts[ibox + 1] + for iuser + <> useri = box_to_user_lists[iuser] + <> in_subrange = subrange_start <= useri and useri < subrange_end + if in_subrange + box_in_subrange[ibox] = 1 + end + end + end + """, + [ + lp.ValueArg("subrange_start, subrange_end", np.int32), + lp.GlobalArg("box_to_user_lists", shape=None), + "..." + ]) + knl = lp.split_iname(knl, "ibox", 16, outer_tag="g.0", inner_tag="l.0") + return knl + def find_boxes_used_by_subrange(self, box_in_subrange, subrange, box_to_user_starts, box_to_user_lists): - knl = self.code_container.find_boxes_used_by_subrange_kernel() + knl = self.find_boxes_used_by_subrange_kernel() knl(self.queue, subrange_start=subrange[0], subrange_end=subrange[1], @@ -1131,9 +1123,9 @@ def get_gen_local_weights_helper(queue, particle_dtype, weight_dtype): return gen_local_weights -def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weights, - local_data, comm=MPI.COMM_WORLD, - _communicate_mpoles_via_allreduce=False): +def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_weights, + local_data, comm=MPI.COMM_WORLD, + _communicate_mpoles_via_allreduce=False): # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() @@ -1355,3 +1347,44 @@ def drive_dfmm(wrangler, trav_local, global_wrangler, trav_global, source_weight logger.info("fmm complete") return result + + +class DistributedFMMInfo(object): + + def __init__(self, global_trav, distributed_expansion_wrangler_factory, + comm=MPI.COMM_WORLD): + self.global_trav = global_trav + self.distributed_expansion_wrangler_factory = \ + distributed_expansion_wrangler_factory + self.comm = comm + + @memoize_method + def get_local_tree(self): + return generate_local_tree(self.global_trav) + + @memoize_method + def get_local_trav(self): + local_tree, _, box_bounding_box = self.get_local_tree() + return generate_local_travs(local_tree, box_bounding_box) + + @memoize_method + def get_local_expansion_wrangler(self): + local_tree, _, _ = self.get_local_tree() + return self.distributed_expansion_wrangler_factory(local_tree) + + @memoize_method + def get_global_expansion_wrangler(self): + rank = self.comm.Get_rank() + if rank == 0: + return self.distributed_expansion_wrangler_factory(self.global_trav.tree) + else: + return None + + def drive_dfmm(self, source_weights): + _, local_data, _ = self.get_local_tree() + trav_local, trav_global = self.get_local_trav() + local_wrangler = self.get_local_expansion_wrangler() + global_wrangler = self.get_global_expansion_wrangler() + pot = calculate_pot(local_wrangler, trav_local, global_wrangler, trav_global, + source_weights, local_data) + return pot diff --git a/test/test_distributed.py b/test/test_distributed.py index 10591df..c4f1cd5 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,10 +1,8 @@ import numpy as np import sys from mpi4py import MPI -from boxtree.distributed import ( - generate_local_tree, generate_local_travs, drive_dfmm, WorkloadWeight, - DistributedFMMLibExpansionWranglerCodeContainer -) +from boxtree.distributed import (DistributedFMMInfo, + DistributedFMMLibExpansionWrangler) import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler @@ -90,32 +88,17 @@ if rank == 0: # print(la.norm(pot_fmm - pot_naive, ord=2)) # Compute FMM using distributed memory parallelism -workload_weight = WorkloadWeight( - direct=15, - m2l=25, - m2p=25, - p2l=25, - multipole=25*5 -) +from boxtree.distributed import queue -local_tree, local_data, box_bounding_box = generate_local_tree(trav) -trav_local, trav_global = generate_local_travs(local_tree, box_bounding_box) -from boxtree.distributed import queue -local_wrangler = ( - DistributedFMMLibExpansionWranglerCodeContainer() - .get_wrangler(queue, local_tree, HELMHOLTZ_K, fmm_level_to_nterms)) +def distributed_expansion_wrangler_factory(tree): + return DistributedFMMLibExpansionWrangler( + queue, tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) -if rank == 0: - global_wrangler = FMMLibExpansionWrangler( - trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) -else: - global_wrangler = None -pot_dfmm = drive_dfmm( - local_wrangler, trav_local, global_wrangler, trav_global, sources_weights, - local_data -) +distribued_fmm_info = DistributedFMMInfo( + trav, distributed_expansion_wrangler_factory, comm=comm) +pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) if rank == 0: print((la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf) / -- GitLab From de99b949c0cd1c26ecc326730fd159e562560d78 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 2 Apr 2018 22:21:31 -0500 Subject: [PATCH 080/260] Make test script more concise --- test/test_distributed.py | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index c4f1cd5..4ef4c21 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,8 +1,7 @@ import numpy as np -import sys from mpi4py import MPI -from boxtree.distributed import (DistributedFMMInfo, - DistributedFMMLibExpansionWrangler) +from boxtree.distributed import ( + DistributedFMMInfo, DistributedFMMLibExpansionWrangler) import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler @@ -19,7 +18,6 @@ rank = comm.Get_rank() # Initialization trav = None sources_weights = None -wrangler = None HELMHOLTZ_K = 0 @@ -38,8 +36,7 @@ if rank == 0: # Generate random particles and source weights from boxtree.tools import make_normal_particle_array as p_normal sources = p_normal(queue, nsources, dims, dtype, seed=15) - targets = (p_normal(queue, ntargets, dims, dtype, seed=18) + - np.array([2, 0, 0])[:dims]) + targets = p_normal(queue, ntargets, dims, dtype, seed=18) from boxtree.tools import particle_array_to_host sources_host = particle_array_to_host(sources) @@ -53,19 +50,6 @@ if rank == 0: rng = PhiloxGenerator(queue.context, seed=22) target_radii = rng.uniform(queue, ntargets, a=0, b=0.05, dtype=np.float64).get() - # Display sources and targets - if "--display" in sys.argv: - import matplotlib.pyplot as plt - plt.plot(sources_host[:, 0], sources_host[:, 1], "bo") - plt.plot(targets_host[:, 0], targets_host[:, 1], "ro") - plt.show() - - # Calculate potentials using direct evaluation - # distances = la.norm(sources_host.reshape(1, nsources, 2) - \ - # targets_host.reshape(ntargets, 1, 2), - # ord=2, axis=2) - # pot_naive = np.sum(-np.log(distances)*sources_weights, axis=1) - # Build the tree and interaction lists from boxtree import TreeBuilder tb = TreeBuilder(ctx) @@ -85,8 +69,6 @@ if rank == 0: from boxtree.fmm import drive_fmm pot_fmm = drive_fmm(trav, wrangler, sources_weights) * 2 * np.pi - # print(la.norm(pot_fmm - pot_naive, ord=2)) - # Compute FMM using distributed memory parallelism from boxtree.distributed import queue -- GitLab From a3189194d3c985e8fec80db54ba5e9e4a3432bab Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 4 Apr 2018 11:31:04 -0500 Subject: [PATCH 081/260] Revert memorization to prevent potential deadlock --- boxtree/distributed.py | 40 +++++++++++++--------------------------- 1 file changed, 13 insertions(+), 27 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 8d41691..146c941 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -1358,33 +1358,19 @@ class DistributedFMMInfo(object): distributed_expansion_wrangler_factory self.comm = comm - @memoize_method - def get_local_tree(self): - return generate_local_tree(self.global_trav) - - @memoize_method - def get_local_trav(self): - local_tree, _, box_bounding_box = self.get_local_tree() - return generate_local_travs(local_tree, box_bounding_box) - - @memoize_method - def get_local_expansion_wrangler(self): - local_tree, _, _ = self.get_local_tree() - return self.distributed_expansion_wrangler_factory(local_tree) - - @memoize_method - def get_global_expansion_wrangler(self): - rank = self.comm.Get_rank() - if rank == 0: - return self.distributed_expansion_wrangler_factory(self.global_trav.tree) + self.local_tree, self.local_data, self.box_bounding_box = \ + generate_local_tree(self.global_trav) + self.trav_local, self.trav_global = generate_local_travs( + self.local_tree, self.box_bounding_box) + self.local_wrangler = self.distributed_expansion_wrangler_factory( + self.local_tree) + if self.comm.Get_rank() == 0: + self.global_wrangler = self.distributed_expansion_wrangler_factory( + self.global_trav.tree) else: - return None + self.global_wrangler = None def drive_dfmm(self, source_weights): - _, local_data, _ = self.get_local_tree() - trav_local, trav_global = self.get_local_trav() - local_wrangler = self.get_local_expansion_wrangler() - global_wrangler = self.get_global_expansion_wrangler() - pot = calculate_pot(local_wrangler, trav_local, global_wrangler, trav_global, - source_weights, local_data) - return pot + return calculate_pot( + self.local_wrangler, self.trav_local, self.global_wrangler, + self.trav_global, source_weights, self.local_data) -- GitLab From 17ee05b0ed9a3eeaeb592abe8c43e8c537a91de8 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 11 Apr 2018 10:21:18 -0500 Subject: [PATCH 082/260] Support well_sep_is_n_away flag --- boxtree/distributed.py | 11 +++++++---- test/test_distributed.py | 4 ++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 146c941..0dc8ff4 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -903,7 +903,9 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): return local_tree, local_data, box_bounding_box -def generate_local_travs(local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD): +def generate_local_travs( + local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD, + well_sep_is_n_away=1): d_tree = local_tree.to_device(queue) # Modify box flags for targets @@ -933,7 +935,7 @@ def generate_local_travs(local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD) d_tree.box_flags) from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(queue.context) + tg = FMMTraversalBuilder(queue.context, well_sep_is_n_away=well_sep_is_n_away) d_trav_global, _ = tg(queue, d_tree, debug=True, box_bounding_box=box_bounding_box) trav_global = d_trav_global.get(queue=queue) @@ -1352,7 +1354,7 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei class DistributedFMMInfo(object): def __init__(self, global_trav, distributed_expansion_wrangler_factory, - comm=MPI.COMM_WORLD): + comm=MPI.COMM_WORLD, well_sep_is_n_away=1): self.global_trav = global_trav self.distributed_expansion_wrangler_factory = \ distributed_expansion_wrangler_factory @@ -1361,7 +1363,8 @@ class DistributedFMMInfo(object): self.local_tree, self.local_data, self.box_bounding_box = \ generate_local_tree(self.global_trav) self.trav_local, self.trav_global = generate_local_travs( - self.local_tree, self.box_bounding_box) + self.local_tree, self.box_bounding_box, comm=comm, + well_sep_is_n_away=well_sep_is_n_away) self.local_wrangler = self.distributed_expansion_wrangler_factory( self.local_tree) if self.comm.Get_rank() == 0: diff --git a/test/test_distributed.py b/test/test_distributed.py index 4ef4c21..3768d99 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -57,7 +57,7 @@ if rank == 0: stick_out_factor=0.25, max_particles_in_box=30, debug=True) from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(ctx) + tg = FMMTraversalBuilder(ctx, well_sep_is_n_away=2) d_trav, _ = tg(queue, tree, debug=True) trav = d_trav.get(queue=queue) @@ -79,7 +79,7 @@ def distributed_expansion_wrangler_factory(tree): distribued_fmm_info = DistributedFMMInfo( - trav, distributed_expansion_wrangler_factory, comm=comm) + trav, distributed_expansion_wrangler_factory, comm=comm, well_sep_is_n_away=2) pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) if rank == 0: -- GitLab From 960a2a208e94561bbef2e47939195eed3bc56fde Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 12 Apr 2018 09:36:58 -0500 Subject: [PATCH 083/260] Fix flake8 --- boxtree/distributed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 0dc8ff4..31ade59 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -2,7 +2,7 @@ from __future__ import division from mpi4py import MPI import numpy as np import loopy as lp -from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_1 +from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_1 # noqa: F401 import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype -- GitLab From ca1ef41b4207e025bf93db1e148986b6d10fc4f6 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 3 May 2018 20:27:46 -0500 Subject: [PATCH 084/260] Handle list 3, 4 near merged with list 1 --- boxtree/distributed.py | 54 +++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 31ade59..741631e 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -271,7 +271,8 @@ def partition_work(traversal, total_rank, workload_weight): workload[box_idx] += (end - start) * box_ntargets # workload for list 3 near - if tree.targets_have_extent: + if tree.targets_have_extent and \ + traversal.from_sep_close_smaller_starts is not None: for itarget_box, box_idx in enumerate(traversal.target_boxes): box_ntargets = tree.box_target_counts_nonchild[box_idx] start = traversal.from_sep_close_smaller_starts[itarget_box] @@ -292,7 +293,8 @@ def partition_work(traversal, total_rank, workload_weight): particle_count += tree.box_source_counts_nonchild[far_box_id] workload[box_idx] += particle_count * workload_weight.p2l - if tree.targets_have_extent: + if tree.targets_have_extent and \ + traversal.from_sep_close_bigger_starts is not None: box_ntargets = tree.box_target_counts_nonchild[box_idx] start = traversal.from_sep_close_bigger_starts[ itarget_or_target_parent_boxes] @@ -770,31 +772,33 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): range=range(0, traversal.target_or_target_parent_boxes.shape[0])) if tree.targets_have_extent: - d_from_sep_close_bigger_starts = cl.array.to_device( - queue, traversal.from_sep_close_bigger_starts) - d_from_sep_close_bigger_lists = cl.array.to_device( - queue, traversal.from_sep_close_bigger_lists) - add_interaction_list_boxes( - d_target_or_target_parent_boxes, - responsible_boxes_mask[rank] | ancestor_boxes[rank], - d_from_sep_close_bigger_starts, - d_from_sep_close_bigger_lists, - src_boxes_mask[rank] - ) + if traversal.from_sep_close_bigger_starts is not None: + d_from_sep_close_bigger_starts = cl.array.to_device( + queue, traversal.from_sep_close_bigger_starts) + d_from_sep_close_bigger_lists = cl.array.to_device( + queue, traversal.from_sep_close_bigger_lists) + add_interaction_list_boxes( + d_target_or_target_parent_boxes, + responsible_boxes_mask[rank] | ancestor_boxes[rank], + d_from_sep_close_bigger_starts, + d_from_sep_close_bigger_lists, + src_boxes_mask[rank] + ) # Add list 3 direct - d_from_sep_close_smaller_starts = cl.array.to_device( - queue, traversal.from_sep_close_smaller_starts) - d_from_sep_close_smaller_lists = cl.array.to_device( - queue, traversal.from_sep_close_smaller_lists) - - add_interaction_list_boxes( - d_target_boxes, - responsible_boxes_mask[rank], - d_from_sep_close_smaller_starts, - d_from_sep_close_smaller_lists, - src_boxes_mask[rank] - ) + if traversal.from_sep_close_smaller_starts is not None: + d_from_sep_close_smaller_starts = cl.array.to_device( + queue, traversal.from_sep_close_smaller_starts) + d_from_sep_close_smaller_lists = cl.array.to_device( + queue, traversal.from_sep_close_smaller_lists) + + add_interaction_list_boxes( + d_target_boxes, + responsible_boxes_mask[rank], + d_from_sep_close_smaller_starts, + d_from_sep_close_smaller_lists, + src_boxes_mask[rank] + ) # {{{ compute box_to_user -- GitLab From 53e5e41a3778dc66b020e7490cb8af4fc4ff9462 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 3 May 2018 21:58:49 -0500 Subject: [PATCH 085/260] Get well_sep_is_n_away from trav object on root --- boxtree/distributed.py | 12 ++++++++++-- test/test_distributed.py | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 741631e..3048c78 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -1358,11 +1358,19 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei class DistributedFMMInfo(object): def __init__(self, global_trav, distributed_expansion_wrangler_factory, - comm=MPI.COMM_WORLD, well_sep_is_n_away=1): + comm=MPI.COMM_WORLD): self.global_trav = global_trav self.distributed_expansion_wrangler_factory = \ distributed_expansion_wrangler_factory + self.comm = comm + current_rank = comm.Get_rank() + + if current_rank == 0: + well_sep_is_n_away = global_trav.well_sep_is_n_away + else: + well_sep_is_n_away = None + well_sep_is_n_away = comm.bcast(well_sep_is_n_away, root=0) self.local_tree, self.local_data, self.box_bounding_box = \ generate_local_tree(self.global_trav) @@ -1371,7 +1379,7 @@ class DistributedFMMInfo(object): well_sep_is_n_away=well_sep_is_n_away) self.local_wrangler = self.distributed_expansion_wrangler_factory( self.local_tree) - if self.comm.Get_rank() == 0: + if current_rank == 0: self.global_wrangler = self.distributed_expansion_wrangler_factory( self.global_trav.tree) else: diff --git a/test/test_distributed.py b/test/test_distributed.py index 3768d99..44c67a5 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -79,7 +79,7 @@ def distributed_expansion_wrangler_factory(tree): distribued_fmm_info = DistributedFMMInfo( - trav, distributed_expansion_wrangler_factory, comm=comm, well_sep_is_n_away=2) + trav, distributed_expansion_wrangler_factory, comm=comm) pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) if rank == 0: -- GitLab From d9d2e0b83910e325f6b19d60a6c0fabcf457a541 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 4 May 2018 00:28:04 -0500 Subject: [PATCH 086/260] Refactor source weight distribution --- boxtree/distributed.py | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 3048c78..3ae1acd 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -1129,26 +1129,22 @@ def get_gen_local_weights_helper(queue, particle_dtype, weight_dtype): return gen_local_weights -def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_weights, - local_data, comm=MPI.COMM_WORLD, - _communicate_mpoles_via_allreduce=False): - # Get MPI information +def distribute_source_weights(source_weights, global_tree, local_data, + comm=MPI.COMM_WORLD): current_rank = comm.Get_rank() total_rank = comm.Get_size() - # {{{ Distribute source weights - if current_rank == 0: weight_req = np.empty((total_rank,), dtype=object) # Convert src_weights to tree order - src_weights = source_weights[global_wrangler.tree.user_source_ids] + src_weights = source_weights[global_tree.user_source_ids] src_weights = cl.array.to_device(queue, src_weights) local_src_weights = np.empty((total_rank,), dtype=object) # Generate local_weights gen_local_weights_helper = get_gen_local_weights_helper( - queue, global_wrangler.tree.particle_id_dtype, src_weights.dtype) + queue, global_tree.particle_id_dtype, src_weights.dtype) for rank in range(total_rank): local_src_weights[rank] = gen_local_weights_helper( src_weights, @@ -1158,14 +1154,32 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, tag=MPITags["DIST_WEIGHT"]) - # Recieve source weights from root - if current_rank == 0: for rank in range(1, total_rank): weight_req[rank].wait() local_src_weights = local_src_weights[0] else: local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) + return local_src_weights + + +def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_weights, + local_data, comm=MPI.COMM_WORLD, + _communicate_mpoles_via_allreduce=False): + # Get MPI information + current_rank = comm.Get_rank() + total_rank = comm.Get_size() + + # {{{ Distribute source weights + + if current_rank == 0: + global_tree = global_wrangler.tree + else: + global_tree = None + + local_src_weights = distribute_source_weights( + source_weights, global_tree, local_data, comm=comm) + # }}} # {{{ "Step 2.1:" Construct local multipoles -- GitLab From c3c06bca2d90c566a4334efedface6de9f5d8e3b Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 4 May 2018 10:32:27 -0500 Subject: [PATCH 087/260] put source weight to tree order before distribute_source_weight --- boxtree/distributed.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 3ae1acd..bfa2bbb 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -1131,23 +1131,25 @@ def get_gen_local_weights_helper(queue, particle_dtype, weight_dtype): def distribute_source_weights(source_weights, global_tree, local_data, comm=MPI.COMM_WORLD): + """ + source_weights: source weights in tree order + global_tree: complete tree structure on root, None otherwise. + local_data: returned from *generate_local_tree* + """ current_rank = comm.Get_rank() total_rank = comm.Get_size() if current_rank == 0: weight_req = np.empty((total_rank,), dtype=object) - - # Convert src_weights to tree order - src_weights = source_weights[global_tree.user_source_ids] - src_weights = cl.array.to_device(queue, src_weights) local_src_weights = np.empty((total_rank,), dtype=object) # Generate local_weights + source_weights = cl.array.to_device(queue, source_weights) gen_local_weights_helper = get_gen_local_weights_helper( - queue, global_tree.particle_id_dtype, src_weights.dtype) + queue, global_tree.particle_id_dtype, source_weights.dtype) for rank in range(total_rank): local_src_weights[rank] = gen_local_weights_helper( - src_weights, + source_weights, local_data[rank]["src_mask"], local_data[rank]["src_scan"] ) @@ -1174,6 +1176,8 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei if current_rank == 0: global_tree = global_wrangler.tree + # Convert src_weights to tree order + source_weights = source_weights[global_tree.user_source_ids] else: global_tree = None -- GitLab From 6a9bd1e8dd5c1b172456bb1ff0bf1fdb8220d805 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 5 May 2018 20:54:44 -0500 Subject: [PATCH 088/260] Temporarily use one global command queue for worker process instead of an argument --- boxtree/distributed.py | 7 +++---- test/test_distributed.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index bfa2bbb..cb5f40a 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -125,11 +125,10 @@ class LocalTree(Tree): class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): - def __init__(self, queue, tree, helmholtz_k, fmm_level_to_nterms=None): + def __init__(self, tree, helmholtz_k, fmm_level_to_nterms=None): super(DistributedFMMLibExpansionWrangler, self).__init__( tree, helmholtz_k, fmm_level_to_nterms ) - self.queue = queue def slice_mpoles(self, mpoles, slice_indices): if len(slice_indices) == 0: @@ -179,7 +178,7 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): mpole_updates_start = mpole_updates_end def empty_box_in_subrange_mask(self): - return cl.array.empty(self.queue, self.tree.nboxes, dtype=np.int8) + return cl.array.empty(queue, self.tree.nboxes, dtype=np.int8) @memoize_method def find_boxes_used_by_subrange_kernel(self): @@ -212,7 +211,7 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): def find_boxes_used_by_subrange(self, box_in_subrange, subrange, box_to_user_starts, box_to_user_lists): knl = self.find_boxes_used_by_subrange_kernel() - knl(self.queue, + knl(queue, subrange_start=subrange[0], subrange_end=subrange[1], box_to_user_starts=box_to_user_starts, diff --git a/test/test_distributed.py b/test/test_distributed.py index 44c67a5..f719347 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -75,7 +75,7 @@ from boxtree.distributed import queue def distributed_expansion_wrangler_factory(tree): return DistributedFMMLibExpansionWrangler( - queue, tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) + tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) distribued_fmm_info = DistributedFMMInfo( -- GitLab From 4a12539d2c77890a06aa46c93e0cdf6f4f759647 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 9 May 2018 01:01:41 -0500 Subject: [PATCH 089/260] Refactor code --- boxtree/distributed.py | 303 ++++++++++++++++++++++------------------- 1 file changed, 160 insertions(+), 143 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index cb5f40a..c8be97d 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -228,7 +228,8 @@ MPITags = dict( DIST_WEIGHT=1, GATHER_POTENTIALS=2, REDUCE_POTENTIALS=3, - REDUCE_INDICES=4 + REDUCE_INDICES=4, + USER_TARGET_TO_CENTER=5 ) WorkloadWeight = namedtuple('Workload', ['direct', 'm2l', 'm2p', 'p2l', 'multipole']) @@ -346,9 +347,7 @@ def partition_work(traversal, total_rank, workload_weight): return responsible_boxes_mask, responsible_boxes_list -def get_gen_local_tree_helper(queue, tree): - d_tree = tree_to_device(queue, tree) - +def get_gen_local_tree_kernels(tree): particle_mask_knl = cl.elementwise.ElementwiseKernel( queue.context, arguments=Template(""" @@ -482,140 +481,152 @@ def get_gen_local_tree_helper(queue, tree): """ ) - def gen_local_tree_helper(src_box_mask, tgt_box_mask, local_tree, local_data): - """ This helper function generates a copy of the tree but with subset of - particles, and fetch the generated fields to *local_tree*. - """ - nsources = tree.nsources - - # source particle mask - src_particle_mask = cl.array.zeros(queue, (nsources,), - dtype=tree.particle_id_dtype) - particle_mask_knl(src_box_mask, - d_tree.box_source_starts, - d_tree.box_source_counts_nonchild, - src_particle_mask) - - # scan of source particle mask - src_particle_scan = cl.array.empty(queue, (nsources + 1,), - dtype=tree.particle_id_dtype) - src_particle_scan[0] = 0 - mask_scan_knl(src_particle_mask, src_particle_scan) - - # local sources - local_nsources = src_particle_scan[-1].get(queue) - local_sources = np.empty((tree.dimensions,), dtype=object) - for i in range(tree.dimensions): - local_sources[i] = cl.array.empty(queue, (local_nsources,), - dtype=tree.coord_dtype) - assert(tree.sources_have_extent is False) - fetch_local_src_knl(src_particle_mask, src_particle_scan, - *d_tree.sources.tolist(), - *local_sources.tolist()) - - # box_source_starts - local_box_source_starts = cl.array.empty(queue, (tree.nboxes,), - dtype=tree.particle_id_dtype) - generate_box_particle_starts(d_tree.box_source_starts, src_particle_scan, - local_box_source_starts) - - # box_source_counts_nonchild - local_box_source_counts_nonchild = cl.array.zeros( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - generate_box_particle_counts_nonchild(src_box_mask, - d_tree.box_source_counts_nonchild, - local_box_source_counts_nonchild) - - # box_source_counts_cumul - local_box_source_counts_cumul = cl.array.empty( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - generate_box_particle_counts_cumul(d_tree.box_source_counts_cumul, - d_tree.box_source_starts, - local_box_source_counts_cumul, - src_particle_scan) - - ntargets = tree.ntargets - # target particle mask - tgt_particle_mask = cl.array.zeros(queue, (ntargets,), - dtype=tree.particle_id_dtype) - particle_mask_knl(tgt_box_mask, - d_tree.box_target_starts, - d_tree.box_target_counts_nonchild, - tgt_particle_mask) - - # scan of target particle mask - tgt_particle_scan = cl.array.empty(queue, (ntargets + 1,), - dtype=tree.particle_id_dtype) - tgt_particle_scan[0] = 0 - mask_scan_knl(tgt_particle_mask, tgt_particle_scan) - - # local targets - local_ntargets = tgt_particle_scan[-1].get(queue) - local_targets = np.empty((tree.dimensions,), dtype=object) - for i in range(tree.dimensions): - local_targets[i] = cl.array.empty(queue, (local_ntargets,), - dtype=tree.coord_dtype) - if tree.targets_have_extent: - local_target_radii = cl.array.empty(queue, (local_ntargets,), - dtype=tree.coord_dtype) - fetch_local_tgt_knl(tgt_particle_mask, tgt_particle_scan, - *d_tree.targets.tolist(), *local_targets.tolist(), - d_tree.target_radii, local_target_radii) - else: - fetch_local_tgt_knl(tgt_particle_mask, tgt_particle_scan, - *d_tree.targets.tolist(), - *local_targets.tolist()) - - # box_target_starts - local_box_target_starts = cl.array.empty(queue, (tree.nboxes,), - dtype=tree.particle_id_dtype) - generate_box_particle_starts(d_tree.box_target_starts, tgt_particle_scan, - local_box_target_starts) - - # box_target_counts_nonchild - local_box_target_counts_nonchild = cl.array.zeros( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - generate_box_particle_counts_nonchild(tgt_box_mask, - d_tree.box_target_counts_nonchild, - local_box_target_counts_nonchild) - - # box_target_counts_cumul - local_box_target_counts_cumul = cl.array.empty( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - generate_box_particle_counts_cumul(d_tree.box_target_counts_cumul, - d_tree.box_target_starts, - local_box_target_counts_cumul, - tgt_particle_scan) - - # Fetch fields to local_tree - for i in range(tree.dimensions): - local_sources[i] = local_sources[i].get(queue=queue) - local_tree.sources = local_sources - for i in range(tree.dimensions): - local_targets[i] = local_targets[i].get(queue=queue) - local_tree.targets = local_targets - if tree.targets_have_extent: - local_tree.target_radii = local_target_radii.get(queue=queue) - local_tree.box_source_starts = local_box_source_starts.get(queue=queue) - local_tree.box_source_counts_nonchild = \ - local_box_source_counts_nonchild.get(queue=queue) - local_tree.box_source_counts_cumul = \ - local_box_source_counts_cumul.get(queue=queue) - local_tree.box_target_starts = local_box_target_starts.get(queue=queue) - local_tree.box_target_counts_nonchild = \ - local_box_target_counts_nonchild.get(queue=queue) - local_tree.box_target_counts_cumul = \ - local_box_target_counts_cumul.get(queue=queue) - - # Fetch fields to local_data - local_data["src_mask"] = src_particle_mask - local_data["src_scan"] = src_particle_scan - local_data["nsources"] = local_nsources - local_data["tgt_mask"] = tgt_particle_mask - local_data["tgt_scan"] = tgt_particle_scan - local_data["ntargets"] = local_ntargets - - return gen_local_tree_helper + return dict( + particle_mask_knl=particle_mask_knl, + mask_scan_knl=mask_scan_knl, + fetch_local_src_knl=fetch_local_src_knl, + fetch_local_tgt_knl=fetch_local_tgt_knl, + generate_box_particle_starts=generate_box_particle_starts, + generate_box_particle_counts_nonchild=generate_box_particle_counts_nonchild, + generate_box_particle_counts_cumul=generate_box_particle_counts_cumul + ) + + +def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, + local_data, knls): + """ This helper function generates a copy of the tree but with subset of + particles, and fetch the generated fields to *local_tree*. + """ + d_tree = tree_to_device(queue, tree) + nsources = tree.nsources + + # source particle mask + src_particle_mask = cl.array.zeros(queue, (nsources,), + dtype=tree.particle_id_dtype) + knls["particle_mask_knl"](src_box_mask, + d_tree.box_source_starts, + d_tree.box_source_counts_nonchild, + src_particle_mask) + + # scan of source particle mask + src_particle_scan = cl.array.empty(queue, (nsources + 1,), + dtype=tree.particle_id_dtype) + src_particle_scan[0] = 0 + knls["mask_scan_knl"](src_particle_mask, src_particle_scan) + + # local sources + local_nsources = src_particle_scan[-1].get(queue) + local_sources = np.empty((tree.dimensions,), dtype=object) + for i in range(tree.dimensions): + local_sources[i] = cl.array.empty(queue, (local_nsources,), + dtype=tree.coord_dtype) + assert(tree.sources_have_extent is False) + knls["fetch_local_src_knl"](src_particle_mask, src_particle_scan, + *d_tree.sources.tolist(), + *local_sources.tolist()) + + # box_source_starts + local_box_source_starts = cl.array.empty(queue, (tree.nboxes,), + dtype=tree.particle_id_dtype) + knls["generate_box_particle_starts"](d_tree.box_source_starts, src_particle_scan, + local_box_source_starts) + + # box_source_counts_nonchild + local_box_source_counts_nonchild = cl.array.zeros( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + knls["generate_box_particle_counts_nonchild"](src_box_mask, + d_tree.box_source_counts_nonchild, + local_box_source_counts_nonchild) + + # box_source_counts_cumul + local_box_source_counts_cumul = cl.array.empty( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + knls["generate_box_particle_counts_cumul"](d_tree.box_source_counts_cumul, + d_tree.box_source_starts, + local_box_source_counts_cumul, + src_particle_scan) + + ntargets = tree.ntargets + # target particle mask + tgt_particle_mask = cl.array.zeros(queue, (ntargets,), + dtype=tree.particle_id_dtype) + knls["particle_mask_knl"](tgt_box_mask, + d_tree.box_target_starts, + d_tree.box_target_counts_nonchild, + tgt_particle_mask) + + # scan of target particle mask + tgt_particle_scan = cl.array.empty(queue, (ntargets + 1,), + dtype=tree.particle_id_dtype) + tgt_particle_scan[0] = 0 + knls["mask_scan_knl"](tgt_particle_mask, tgt_particle_scan) + + # local targets + local_ntargets = tgt_particle_scan[-1].get(queue) + local_targets = np.empty((tree.dimensions,), dtype=object) + for i in range(tree.dimensions): + local_targets[i] = cl.array.empty(queue, (local_ntargets,), + dtype=tree.coord_dtype) + if tree.targets_have_extent: + local_target_radii = cl.array.empty(queue, (local_ntargets,), + dtype=tree.coord_dtype) + knls["fetch_local_tgt_knl"](tgt_particle_mask, tgt_particle_scan, + *d_tree.targets.tolist(), + *local_targets.tolist(), + d_tree.target_radii, local_target_radii) + else: + knls["fetch_local_tgt_knl"](tgt_particle_mask, tgt_particle_scan, + *d_tree.targets.tolist(), + *local_targets.tolist()) + + # box_target_starts + local_box_target_starts = cl.array.empty(queue, (tree.nboxes,), + dtype=tree.particle_id_dtype) + knls["generate_box_particle_starts"](d_tree.box_target_starts, tgt_particle_scan, + local_box_target_starts) + + # box_target_counts_nonchild + local_box_target_counts_nonchild = cl.array.zeros( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + knls["generate_box_particle_counts_nonchild"](tgt_box_mask, + d_tree.box_target_counts_nonchild, + local_box_target_counts_nonchild) + + # box_target_counts_cumul + local_box_target_counts_cumul = cl.array.empty( + queue, (tree.nboxes,), dtype=tree.particle_id_dtype) + knls["generate_box_particle_counts_cumul"](d_tree.box_target_counts_cumul, + d_tree.box_target_starts, + local_box_target_counts_cumul, + tgt_particle_scan) + + # Fetch fields to local_tree + for i in range(tree.dimensions): + local_sources[i] = local_sources[i].get(queue=queue) + local_tree.sources = local_sources + for i in range(tree.dimensions): + local_targets[i] = local_targets[i].get(queue=queue) + local_tree.targets = local_targets + if tree.targets_have_extent: + local_tree.target_radii = local_target_radii.get(queue=queue) + local_tree.box_source_starts = local_box_source_starts.get(queue=queue) + local_tree.box_source_counts_nonchild = \ + local_box_source_counts_nonchild.get(queue=queue) + local_tree.box_source_counts_cumul = \ + local_box_source_counts_cumul.get(queue=queue) + local_tree.box_target_starts = local_box_target_starts.get(queue=queue) + local_tree.box_target_counts_nonchild = \ + local_box_target_counts_nonchild.get(queue=queue) + local_tree.box_target_counts_cumul = \ + local_box_target_counts_cumul.get(queue=queue) + + # Fetch fields to local_data + local_data["src_mask"] = src_particle_mask + local_data["src_scan"] = src_particle_scan + local_data["nsources"] = local_nsources + local_data["tgt_mask"] = tgt_particle_mask + local_data["tgt_scan"] = tgt_particle_scan + local_data["ntargets"] = local_ntargets def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): @@ -672,6 +683,8 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): else: local_data = None + knls = None + if current_rank == 0: tree = traversal.tree local_tree = np.empty((total_rank,), dtype=object) @@ -851,10 +864,12 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): # }}} + # kernels for generating local trees + knls = get_gen_local_tree_kernels(tree) + # request objects for non-blocking communication tree_req = np.empty((total_rank,), dtype=object) - gen_local_tree_helper = get_gen_local_tree_helper(queue, tree) for rank in range(total_rank): local_tree[rank] = LocalTree.copy_from_global_tree( tree, responsible_boxes_list[rank].get(), @@ -865,10 +880,12 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): local_tree[rank].user_source_ids = None local_tree[rank].sorted_target_ids = None - gen_local_tree_helper(src_boxes_mask[rank], + gen_local_tree_helper(tree, + src_boxes_mask[rank], responsible_boxes_mask[rank], local_tree[rank], - local_data[rank]) + local_data[rank], + knls) tree_req[rank] = comm.isend(local_tree[rank], dest=rank, tag=MPITags["DIST_TREE"]) @@ -903,7 +920,7 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): "max": box_target_bounding_box_max } - return local_tree, local_data, box_bounding_box + return local_tree, local_data, box_bounding_box, knls def generate_local_travs( @@ -1389,7 +1406,7 @@ class DistributedFMMInfo(object): well_sep_is_n_away = None well_sep_is_n_away = comm.bcast(well_sep_is_n_away, root=0) - self.local_tree, self.local_data, self.box_bounding_box = \ + self.local_tree, self.local_data, self.box_bounding_box, _ = \ generate_local_tree(self.global_trav) self.trav_local, self.trav_global = generate_local_travs( self.local_tree, self.box_bounding_box, comm=comm, -- GitLab From 0760dbe70f524fc42076d5fa6e6fe8e7e59b0ad2 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 9 May 2018 11:52:57 -0500 Subject: [PATCH 090/260] Add tgt_box_mask to local_data --- boxtree/distributed.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index c8be97d..62caf95 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -228,8 +228,7 @@ MPITags = dict( DIST_WEIGHT=1, GATHER_POTENTIALS=2, REDUCE_POTENTIALS=3, - REDUCE_INDICES=4, - USER_TARGET_TO_CENTER=5 + REDUCE_INDICES=4 ) WorkloadWeight = namedtuple('Workload', ['direct', 'm2l', 'm2p', 'p2l', 'multipole']) @@ -502,7 +501,7 @@ def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, # source particle mask src_particle_mask = cl.array.zeros(queue, (nsources,), - dtype=tree.particle_id_dtype) + dtype=tree.particle_id_dtype) knls["particle_mask_knl"](src_box_mask, d_tree.box_source_starts, d_tree.box_source_counts_nonchild, @@ -627,6 +626,7 @@ def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, local_data["tgt_mask"] = tgt_particle_mask local_data["tgt_scan"] = tgt_particle_scan local_data["ntargets"] = local_ntargets + local_data["tgt_box_mask"] = tgt_box_mask def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): -- GitLab From 3ad1e904397dbf3667089599026df2f6f1865aa6 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 17 May 2018 23:35:53 -0500 Subject: [PATCH 091/260] Add more options for generate_local_travs --- boxtree/distributed.py | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 62caf95..ff9bd07 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -925,7 +925,9 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): def generate_local_travs( local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD, - well_sep_is_n_away=1): + well_sep_is_n_away=1, from_sep_smaller_crit=None, + _from_sep_smaller_min_nsources_cumul=None, + merge_close_lists=False): d_tree = local_tree.to_device(queue) # Modify box flags for targets @@ -955,9 +957,20 @@ def generate_local_travs( d_tree.box_flags) from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(queue.context, well_sep_is_n_away=well_sep_is_n_away) - d_trav_global, _ = tg(queue, d_tree, debug=True, - box_bounding_box=box_bounding_box) + tg = FMMTraversalBuilder( + queue.context, + well_sep_is_n_away=well_sep_is_n_away, + from_sep_smaller_crit=from_sep_smaller_crit + ) + d_trav_global, _ = tg( + queue, d_tree, debug=True, + box_bounding_box=box_bounding_box, + _from_sep_smaller_min_nsources_cumul=_from_sep_smaller_min_nsources_cumul + ) + + if merge_close_lists and d_tree.targets_have_extent: + d_trav_global = d_trav_global.merge_close_lists(queue) + trav_global = d_trav_global.get(queue=queue) # Source flags @@ -988,8 +1001,15 @@ def generate_local_travs( modify_own_sources_knl(d_tree.responsible_boxes_list, d_tree.box_flags) modify_child_sources_knl(d_tree.ancestor_mask, d_tree.box_flags) - d_trav_local, _ = tg(queue, d_tree, debug=True, - box_bounding_box=box_bounding_box) + d_trav_local, _ = tg( + queue, d_tree, debug=True, + box_bounding_box=box_bounding_box, + _from_sep_smaller_min_nsources_cumul=_from_sep_smaller_min_nsources_cumul + ) + + if merge_close_lists and d_tree.targets_have_extent: + d_trav_local = d_trav_local.merge_close_lists(queue) + trav_local = d_trav_local.get(queue=queue) return trav_local, trav_global -- GitLab From 1f05443901553f9c1197a3210143dac69793684c Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 23 May 2018 14:28:01 -0500 Subject: [PATCH 092/260] _from_sep_smaller_min_nsources_cumul is not compatible with distributed implementation --- boxtree/distributed.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index ff9bd07..99d42d5 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -926,7 +926,6 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): def generate_local_travs( local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD, well_sep_is_n_away=1, from_sep_smaller_crit=None, - _from_sep_smaller_min_nsources_cumul=None, merge_close_lists=False): d_tree = local_tree.to_device(queue) @@ -964,8 +963,7 @@ def generate_local_travs( ) d_trav_global, _ = tg( queue, d_tree, debug=True, - box_bounding_box=box_bounding_box, - _from_sep_smaller_min_nsources_cumul=_from_sep_smaller_min_nsources_cumul + box_bounding_box=box_bounding_box ) if merge_close_lists and d_tree.targets_have_extent: @@ -1003,8 +1001,7 @@ def generate_local_travs( d_trav_local, _ = tg( queue, d_tree, debug=True, - box_bounding_box=box_bounding_box, - _from_sep_smaller_min_nsources_cumul=_from_sep_smaller_min_nsources_cumul + box_bounding_box=box_bounding_box ) if merge_close_lists and d_tree.targets_have_extent: -- GitLab From 4f957820b366b8e3da45e04ca1c2086a47305682 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 26 May 2018 00:28:54 -0500 Subject: [PATCH 093/260] Use Python's standard logging module --- boxtree/distributed.py | 54 +++++++++++++++++++++++++++------------- test/test_distributed.py | 6 +++++ 2 files changed, 43 insertions(+), 17 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 99d42d5..1506b35 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -11,6 +11,7 @@ from pytools import memoize_in, memoize_method from boxtree import Tree from collections import namedtuple from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +import time __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2017 Hao Gao" @@ -40,11 +41,6 @@ logger = logging.getLogger(__name__) ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) -print("Process %d of %d on %s with ctx %s.\n" % ( - MPI.COMM_WORLD.Get_rank(), - MPI.COMM_WORLD.Get_size(), - MPI.Get_processor_name(), - queue.context.devices)) def tree_to_device(queue, tree, additional_fields_to_device=[]): @@ -630,6 +626,22 @@ def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): + + # Get MPI information + current_rank = comm.Get_rank() + total_rank = comm.Get_size() + + # Log OpenCL context information + logger.info("Process %d of %d on %s with ctx %s." % ( + comm.Get_rank(), + comm.Get_size(), + MPI.Get_processor_name(), + queue.context.devices) + ) + + if current_rank == 0: + start_time = time.time() + # {{{ kernel to mark if a box mpole is used by a process via an interaction list @memoize_in(generate_local_tree, "loopy_cache") @@ -668,10 +680,6 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): # }}} - # Get MPI information - current_rank = comm.Get_rank() - total_rank = comm.Get_size() - # {{{ Construct local tree for each rank on root if current_rank == 0: local_data = np.empty((total_rank,), dtype=object) @@ -920,6 +928,11 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): "max": box_target_bounding_box_max } + if current_rank == 0: + logger.info("Distribute local tree in {} sec.".format( + str(time.time() - start_time)) + ) + return local_tree, local_data, box_bounding_box, knls @@ -927,6 +940,9 @@ def generate_local_travs( local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD, well_sep_is_n_away=1, from_sep_smaller_crit=None, merge_close_lists=False): + + start_time = time.time() + d_tree = local_tree.to_device(queue) # Modify box flags for targets @@ -1009,6 +1025,10 @@ def generate_local_travs( trav_local = d_trav_local.get(queue=queue) + logger.info("Generate local traversal in {} sec.".format( + str(time.time() - start_time)) + ) + return trav_local, trav_global @@ -1124,7 +1144,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): comm_pattern.advance() stats["total_time"] = time() - t_start - logger.debug("communicate multipoles: done in %.2f s" % stats["total_time"]) + logger.info("communicate multipoles: done in %.2f s" % stats["total_time"]) if return_stats: return stats @@ -1201,10 +1221,14 @@ def distribute_source_weights(source_weights, global_tree, local_data, def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_weights, local_data, comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): + # Get MPI information current_rank = comm.Get_rank() total_rank = comm.Get_size() + if current_rank == 0: + start_time = time.time() + # {{{ Distribute source weights if current_rank == 0: @@ -1221,9 +1245,7 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei # {{{ "Step 2.1:" Construct local multipoles - import time logger.debug("construct local multipoles") - mpole_exps = wrangler.form_multipoles( trav_local.level_start_source_box_nrs, trav_local.source_boxes, @@ -1245,8 +1267,6 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei # {{{ Communicate mpoles - last_time = time.time() - if _communicate_mpoles_via_allreduce: mpole_exps_all = np.zeros_like(mpole_exps) comm.Allreduce(mpole_exps, mpole_exps_all) @@ -1254,8 +1274,6 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei else: communicate_mpoles(wrangler, comm, trav_local, mpole_exps) - print("Communication: " + str(time.time()-last_time)) - # }}} # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") @@ -1401,7 +1419,9 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei logger.debug("finalize potentials") result = global_wrangler.finalize_potentials(result) - logger.info("fmm complete") + logger.info("Distributed FMM evaluation completes in {} sec.".format( + str(time.time() - start_time) + )) return result diff --git a/test/test_distributed.py b/test/test_distributed.py index f719347..d2c1827 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -4,6 +4,12 @@ from boxtree.distributed import ( DistributedFMMInfo, DistributedFMMLibExpansionWrangler) import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +import logging +import os + +# Configure logging +logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING")) +logging.getLogger("boxtree.distributed").setLevel(logging.INFO) # Parameters dims = 3 -- GitLab From 86cbc1025ed36062648548af315808015cfe5c13 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 27 May 2018 16:56:15 -0500 Subject: [PATCH 094/260] Build one traversal object with different source flags instead of two separate traversals --- boxtree/distributed.py | 113 ++++++++++++++++++++--------------------- boxtree/traversal.py | 9 +++- 2 files changed, 61 insertions(+), 61 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 1506b35..a1ba791 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -967,28 +967,13 @@ def generate_local_travs( HAS_CHILD_TARGETS=("(" + box_flag_t + ") " + str(box_flags_enum.HAS_CHILD_TARGETS))) ) + modify_target_flags_knl(d_tree.box_target_counts_nonchild, d_tree.box_target_counts_cumul, d_tree.box_flags) - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder( - queue.context, - well_sep_is_n_away=well_sep_is_n_away, - from_sep_smaller_crit=from_sep_smaller_crit - ) - d_trav_global, _ = tg( - queue, d_tree, debug=True, - box_bounding_box=box_bounding_box - ) - - if merge_close_lists and d_tree.targets_have_extent: - d_trav_global = d_trav_global.merge_close_lists(queue) - - trav_global = d_trav_global.get(queue=queue) - - # Source flags - d_tree.box_flags = d_tree.box_flags & 250 + # Generate local source flags + local_box_flags = d_tree.box_flags & 250 modify_own_sources_knl = cl.elementwise.ElementwiseKernel( queue.context, Template(""" @@ -1001,6 +986,7 @@ def generate_local_travs( """).render(HAS_OWN_SOURCES=("(" + box_flag_t + ") " + str(box_flags_enum.HAS_OWN_SOURCES))) ) + modify_child_sources_knl = cl.elementwise.ElementwiseKernel( queue.context, Template(""" @@ -1012,24 +998,33 @@ def generate_local_travs( """).render(HAS_CHILD_SOURCES=("(" + box_flag_t + ") " + str(box_flags_enum.HAS_CHILD_SOURCES))) ) - modify_own_sources_knl(d_tree.responsible_boxes_list, d_tree.box_flags) - modify_child_sources_knl(d_tree.ancestor_mask, d_tree.box_flags) - d_trav_local, _ = tg( + modify_own_sources_knl(d_tree.responsible_boxes_list, local_box_flags) + modify_child_sources_knl(d_tree.ancestor_mask, local_box_flags) + + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder( + queue.context, + well_sep_is_n_away=well_sep_is_n_away, + from_sep_smaller_crit=from_sep_smaller_crit + ) + + d_local_trav, _ = tg( queue, d_tree, debug=True, - box_bounding_box=box_bounding_box + box_bounding_box=box_bounding_box, + local_box_flags=local_box_flags ) if merge_close_lists and d_tree.targets_have_extent: - d_trav_local = d_trav_local.merge_close_lists(queue) + d_local_trav = d_local_trav.merge_close_lists(queue) - trav_local = d_trav_local.get(queue=queue) + local_trav = d_local_trav.get(queue=queue) logger.info("Generate local traversal in {} sec.".format( str(time.time() - start_time)) ) - return trav_local, trav_global + return local_trav # {{{ communicate mpoles @@ -1218,7 +1213,7 @@ def distribute_source_weights(source_weights, global_tree, local_data, return local_src_weights -def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_weights, +def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_data, comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): @@ -1247,8 +1242,8 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei logger.debug("construct local multipoles") mpole_exps = wrangler.form_multipoles( - trav_local.level_start_source_box_nrs, - trav_local.source_boxes, + local_trav.level_start_source_box_nrs, + local_trav.source_boxes, local_src_weights) # }}} @@ -1257,8 +1252,8 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei logger.debug("propagate multipoles upward") wrangler.coarsen_multipoles( - trav_local.level_start_source_parent_box_nrs, - trav_local.source_parent_boxes, + local_trav.level_start_source_parent_box_nrs, + local_trav.source_parent_boxes, mpole_exps) # mpole_exps is called Phi in [1] @@ -1272,7 +1267,7 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all else: - communicate_mpoles(wrangler, comm, trav_local, mpole_exps) + communicate_mpoles(wrangler, comm, local_trav, mpole_exps) # }}} @@ -1280,9 +1275,9 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei logger.debug("direct evaluation from neighbor source boxes ('list 1')") potentials = wrangler.eval_direct( - trav_global.target_boxes, - trav_global.neighbor_source_boxes_starts, - trav_global.neighbor_source_boxes_lists, + local_trav.target_boxes, + local_trav.neighbor_source_boxes_starts, + local_trav.neighbor_source_boxes_lists, local_src_weights) # these potentials are called alpha in [1] @@ -1293,10 +1288,10 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei logger.debug("translate separated siblings' ('list 2') mpoles to local") local_exps = wrangler.multipole_to_local( - trav_global.level_start_target_or_target_parent_box_nrs, - trav_global.target_or_target_parent_boxes, - trav_global.from_sep_siblings_starts, - trav_global.from_sep_siblings_lists, + local_trav.level_start_target_or_target_parent_box_nrs, + local_trav.target_or_target_parent_boxes, + local_trav.from_sep_siblings_starts, + local_trav.from_sep_siblings_lists, mpole_exps) # local_exps represents both Gamma and Delta in [1] @@ -1311,19 +1306,19 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei # contribution *out* of the downward-propagating local expansions) potentials = potentials + wrangler.eval_multipoles( - trav_global.target_boxes_sep_smaller_by_source_level, - trav_global.from_sep_smaller_by_level, + local_trav.target_boxes_sep_smaller_by_source_level, + local_trav.from_sep_smaller_by_level, mpole_exps) # these potentials are called beta in [1] - if trav_global.from_sep_close_smaller_starts is not None: + if local_trav.from_sep_close_smaller_starts is not None: logger.debug("evaluate separated close smaller interactions directly " "('list 3 close')") potentials = potentials + wrangler.eval_direct( - trav_global.target_boxes, - trav_global.from_sep_close_smaller_starts, - trav_global.from_sep_close_smaller_lists, + local_trav.target_boxes, + local_trav.from_sep_close_smaller_starts, + local_trav.from_sep_close_smaller_lists, local_src_weights) # }}} @@ -1333,20 +1328,20 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei logger.debug("form locals for separated bigger source boxes ('list 4 far')") local_exps = local_exps + wrangler.form_locals( - trav_global.level_start_target_or_target_parent_box_nrs, - trav_global.target_or_target_parent_boxes, - trav_global.from_sep_bigger_starts, - trav_global.from_sep_bigger_lists, + local_trav.level_start_target_or_target_parent_box_nrs, + local_trav.target_or_target_parent_boxes, + local_trav.from_sep_bigger_starts, + local_trav.from_sep_bigger_lists, local_src_weights) - if trav_global.from_sep_close_bigger_starts is not None: + if local_trav.from_sep_close_bigger_starts is not None: logger.debug("evaluate separated close bigger interactions directly " "('list 4 close')") potentials = potentials + wrangler.eval_direct( - trav_global.target_or_target_parent_boxes, - trav_global.from_sep_close_bigger_starts, - trav_global.from_sep_close_bigger_lists, + local_trav.target_or_target_parent_boxes, + local_trav.from_sep_close_bigger_starts, + local_trav.from_sep_close_bigger_lists, local_src_weights) # }}} @@ -1356,8 +1351,8 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei logger.debug("propagate local_exps downward") wrangler.refine_locals( - trav_global.level_start_target_or_target_parent_box_nrs, - trav_global.target_or_target_parent_boxes, + local_trav.level_start_target_or_target_parent_box_nrs, + local_trav.target_or_target_parent_boxes, local_exps) # }}} @@ -1366,8 +1361,8 @@ def calculate_pot(wrangler, trav_local, global_wrangler, trav_global, source_wei logger.debug("evaluate locals") potentials = potentials + wrangler.eval_locals( - trav_global.level_start_target_box_nrs, - trav_global.target_boxes, + local_trav.level_start_target_box_nrs, + local_trav.target_boxes, local_exps) # }}} @@ -1445,7 +1440,7 @@ class DistributedFMMInfo(object): self.local_tree, self.local_data, self.box_bounding_box, _ = \ generate_local_tree(self.global_trav) - self.trav_local, self.trav_global = generate_local_travs( + self.local_trav = generate_local_travs( self.local_tree, self.box_bounding_box, comm=comm, well_sep_is_n_away=well_sep_is_n_away) self.local_wrangler = self.distributed_expansion_wrangler_factory( @@ -1458,5 +1453,5 @@ class DistributedFMMInfo(object): def drive_dfmm(self, source_weights): return calculate_pot( - self.local_wrangler, self.trav_local, self.global_wrangler, - self.trav_global, source_weights, self.local_data) + self.local_wrangler, self.global_wrangler, self.local_trav, + source_weights, self.local_data) diff --git a/boxtree/traversal.py b/boxtree/traversal.py index 6184954..aa6e1f3 100644 --- a/boxtree/traversal.py +++ b/boxtree/traversal.py @@ -1792,18 +1792,23 @@ class FMMTraversalBuilder: def __call__(self, queue, tree, wait_for=None, debug=False, _from_sep_smaller_min_nsources_cumul=None, - box_bounding_box=None): + box_bounding_box=None, local_box_flags=None): """ :arg queue: A :class:`pyopencl.CommandQueue` instance. :arg tree: A :class:`boxtree.Tree` instance. :arg wait_for: may either be *None* or a list of :class:`pyopencl.Event` instances for whose completion this command waits before starting exeuction. + :arg local_box_flags: Used by distributed FMM for building source boxes + for local trees. :return: A tuple *(trav, event)*, where *trav* is a new instance of :class:`FMMTraversalInfo` and *event* is a :class:`pyopencl.Event` for dependency management. """ + if local_box_flags is None: + local_box_flags = tree.box_flags + if _from_sep_smaller_min_nsources_cumul is None: # default to old no-threshold behavior _from_sep_smaller_min_nsources_cumul = 0 @@ -1843,7 +1848,7 @@ class FMMTraversalBuilder: fin_debug("building list of source boxes, their parents, and target boxes") result, evt = knl_info.sources_parents_and_targets_builder( - queue, tree.nboxes, tree.box_flags.data, wait_for=wait_for) + queue, tree.nboxes, local_box_flags.data, wait_for=wait_for) wait_for = [evt] source_parent_boxes = result["source_parent_boxes"].lists -- GitLab From 1420b60b7891ce6400a40585b7dd34cffff1dbbd Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 28 May 2018 10:54:08 -0500 Subject: [PATCH 095/260] Distribute sources and targets without pickle --- boxtree/distributed.py | 114 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 106 insertions(+), 8 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index a1ba791..e4e790e 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -106,7 +106,13 @@ class LocalTree(Tree): responsible_boxes_list=responsible_boxes_list, ancestor_mask=ancestor_mask, box_to_user_starts=box_to_user_starts, - box_to_user_lists=box_to_user_lists) + box_to_user_lists=box_to_user_lists, + _dimensions=None, + _ntargets=None, + _nsources=None, + _particle_dtype=None, + _radii_dtype=None + ) local_tree.__class__ = cls return local_tree @@ -221,15 +227,32 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): MPITags = dict( DIST_TREE=0, - DIST_WEIGHT=1, - GATHER_POTENTIALS=2, - REDUCE_POTENTIALS=3, - REDUCE_INDICES=4 + DIST_SOURCES=1, + DIST_TARGETS=2, + DIST_RADII=3, + DIST_WEIGHT=4, + GATHER_POTENTIALS=5, + REDUCE_POTENTIALS=6, + REDUCE_INDICES=7 ) WorkloadWeight = namedtuple('Workload', ['direct', 'm2l', 'm2p', 'p2l', 'multipole']) +def dtype_to_mpi(dtype): + """ This function translates a numpy.dtype object into the corresponding type + used in mpi4py. + """ + if hasattr(MPI, '_typedict'): + mpi_type = MPI._typedict[np.dtype(dtype).char] + elif hasattr(MPI, '__TypeDict__'): + mpi_type = MPI.__TypeDict__[np.dtype(dtype).char] + else: + raise RuntimeError("There is no dictionary to translate from Numpy dtype to " + "MPI type") + return mpi_type + + def partition_work(traversal, total_rank, workload_weight): """ This function returns a pyopencl array of size total_rank*nboxes, where the (i,j) entry is 1 iff rank i is responsible for box j. @@ -681,6 +704,7 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): # }}} # {{{ Construct local tree for each rank on root + if current_rank == 0: local_data = np.empty((total_rank,), dtype=object) for i in range(total_rank): @@ -695,7 +719,11 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): if current_rank == 0: tree = traversal.tree + local_tree = np.empty((total_rank,), dtype=object) + local_targets = np.empty((total_rank,), dtype=object) + local_sources = np.empty((total_rank,), dtype=object) + local_target_radii = np.empty((total_rank,), dtype=object) # {{{ Partition the work d_box_parent_ids = cl.array.to_device(queue, tree.box_parent_ids) @@ -877,6 +905,10 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): # request objects for non-blocking communication tree_req = np.empty((total_rank,), dtype=object) + sources_req = np.empty((total_rank,), dtype=object) + targets_req = np.empty((total_rank,), dtype=object) + if tree.targets_have_extent: + target_radii_req = np.empty((total_rank,), dtype=object) for rank in range(total_rank): local_tree[rank] = LocalTree.copy_from_global_tree( @@ -895,12 +927,52 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): local_data[rank], knls) + if rank == 0: + # master process does not need to communicate with itself + continue + + # {{{ Peel sources and targets off tree + + local_tree[rank]._dimensions = local_tree[rank].dimensions + + local_targets[rank] = np.stack(local_tree[rank].targets, axis=0) + local_tree[rank]._ntargets = local_tree[rank].ntargets + local_tree[rank].targets = None + + local_sources[rank] = np.stack(local_tree[rank].sources, axis=0) + local_tree[rank]._nsources = local_tree[rank].nsources + local_tree[rank].sources = None + + local_target_radii[rank] = np.stack( + local_tree[rank].target_radii, axis=0) + local_tree[rank].target_radii = None + + local_tree[rank]._particle_dtype = tree.sources[0].dtype + local_tree[rank]._radii_dtype = tree.target_radii.dtype + + # }}} + + # Send the local tree skeleton without sources and targets tree_req[rank] = comm.isend(local_tree[rank], dest=rank, tag=MPITags["DIST_TREE"]) + # Send the sources and targets + sources_req[rank] = comm.Isend( + local_sources[rank], dest=rank, tag=MPITags["DIST_SOURCES"] + ) + + targets_req[rank] = comm.Isend( + local_targets[rank], dest=rank, tag=MPITags["DIST_TARGETS"] + ) + + if tree.targets_have_extent: + target_radii_req[rank] = comm.Isend( + local_target_radii[rank], dest=rank, tag=MPITags["DIST_RADII"] + ) + # }}} - # Recieve the local trav from root + # Receive the local tree from root if current_rank == 0: for rank in range(1, total_rank): tree_req[rank].wait() @@ -908,7 +980,33 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): else: local_tree = comm.recv(source=0, tag=MPITags["DIST_TREE"]) - # Recieve box extent + # Receive sources and targets + if current_rank == 0: + for rank in range(1, total_rank): + sources_req[rank].wait() + targets_req[rank].wait() + target_radii_req[rank].wait() + else: + local_tree.sources = np.empty( + (local_tree._dimensions, local_tree._nsources), + dtype=local_tree._particle_dtype + ) + comm.Recv(local_tree.sources, source=0, tag=MPITags["DIST_SOURCES"]) + + local_tree.targets = np.empty( + (local_tree._dimensions, local_tree._ntargets), + dtype=local_tree._particle_dtype + ) + comm.Recv(local_tree.targets, source=0, tag=MPITags["DIST_TARGETS"]) + + if local_tree.targets_have_extent: + local_tree.target_radii = np.empty( + (local_tree._ntargets,), + dtype=local_tree._radii_dtype + ) + comm.Recv(local_tree.target_radii, source=0, tag=MPITags["DIST_RADII"]) + + # Receive box extent if current_rank == 0: box_target_bounding_box_min = traversal.box_target_bounding_box_min box_target_bounding_box_max = traversal.box_target_bounding_box_max @@ -1367,7 +1465,7 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, # }}} - potentials_mpi_type = MPI._typedict[potentials.dtype.char] + potentials_mpi_type = dtype_to_mpi(potentials.dtype) if current_rank == 0: potentials_all_ranks = np.empty((total_rank,), dtype=object) potentials_all_ranks[0] = potentials -- GitLab From 3f35c3c42caa5d8be660a50e022914663199b7fa Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 28 May 2018 18:01:47 -0500 Subject: [PATCH 096/260] Use waitall in tree distribution instead of a loop --- boxtree/distributed.py | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index e4e790e..040e75c 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -904,11 +904,11 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): knls = get_gen_local_tree_kernels(tree) # request objects for non-blocking communication - tree_req = np.empty((total_rank,), dtype=object) - sources_req = np.empty((total_rank,), dtype=object) - targets_req = np.empty((total_rank,), dtype=object) + tree_req = [] + sources_req = [] + targets_req = [] if tree.targets_have_extent: - target_radii_req = np.empty((total_rank,), dtype=object) + target_radii_req = [] for rank in range(total_rank): local_tree[rank] = LocalTree.copy_from_global_tree( @@ -953,39 +953,34 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): # }}} # Send the local tree skeleton without sources and targets - tree_req[rank] = comm.isend(local_tree[rank], dest=rank, - tag=MPITags["DIST_TREE"]) + tree_req.append(comm.isend( + local_tree[rank], dest=rank, tag=MPITags["DIST_TREE"])) # Send the sources and targets - sources_req[rank] = comm.Isend( - local_sources[rank], dest=rank, tag=MPITags["DIST_SOURCES"] - ) + sources_req.append(comm.Isend( + local_sources[rank], dest=rank, tag=MPITags["DIST_SOURCES"])) - targets_req[rank] = comm.Isend( - local_targets[rank], dest=rank, tag=MPITags["DIST_TARGETS"] - ) + targets_req.append(comm.Isend( + local_targets[rank], dest=rank, tag=MPITags["DIST_TARGETS"])) if tree.targets_have_extent: - target_radii_req[rank] = comm.Isend( - local_target_radii[rank], dest=rank, tag=MPITags["DIST_RADII"] - ) + target_radii_req.append(comm.Isend( + local_target_radii[rank], dest=rank, tag=MPITags["DIST_RADII"])) # }}} # Receive the local tree from root if current_rank == 0: - for rank in range(1, total_rank): - tree_req[rank].wait() + MPI.Request.Waitall(tree_req) local_tree = local_tree[0] else: local_tree = comm.recv(source=0, tag=MPITags["DIST_TREE"]) # Receive sources and targets if current_rank == 0: - for rank in range(1, total_rank): - sources_req[rank].wait() - targets_req[rank].wait() - target_radii_req[rank].wait() + MPI.Request.Waitall(sources_req) + MPI.Request.Waitall(targets_req) + MPI.Request.Waitall(target_radii_req) else: local_tree.sources = np.empty( (local_tree._dimensions, local_tree._nsources), -- GitLab From 6c07d8fa89c46bd61168cbec82dfba0f62d142d4 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 28 May 2018 19:52:09 -0500 Subject: [PATCH 097/260] Use nonblocking recv for tree distribution --- boxtree/distributed.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 040e75c..3e9ae83 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -982,24 +982,31 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): MPI.Request.Waitall(targets_req) MPI.Request.Waitall(target_radii_req) else: + reqs = [] + local_tree.sources = np.empty( (local_tree._dimensions, local_tree._nsources), dtype=local_tree._particle_dtype ) - comm.Recv(local_tree.sources, source=0, tag=MPITags["DIST_SOURCES"]) + reqs.append(comm.Irecv( + local_tree.sources, source=0, tag=MPITags["DIST_SOURCES"])) local_tree.targets = np.empty( (local_tree._dimensions, local_tree._ntargets), dtype=local_tree._particle_dtype ) - comm.Recv(local_tree.targets, source=0, tag=MPITags["DIST_TARGETS"]) + reqs.append(comm.Irecv( + local_tree.targets, source=0, tag=MPITags["DIST_TARGETS"])) if local_tree.targets_have_extent: local_tree.target_radii = np.empty( (local_tree._ntargets,), dtype=local_tree._radii_dtype ) - comm.Recv(local_tree.target_radii, source=0, tag=MPITags["DIST_RADII"]) + reqs.append(comm.Irecv( + local_tree.target_radii, source=0, tag=MPITags["DIST_RADII"])) + + MPI.Request.Waitall(reqs) # Receive box extent if current_rank == 0: -- GitLab From ac2213caf767c08a2caff024abba022b47295128 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 28 May 2018 23:44:29 -0500 Subject: [PATCH 098/260] Reduce request object --- boxtree/distributed.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 3e9ae83..f3a6b7b 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -905,10 +905,7 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): # request objects for non-blocking communication tree_req = [] - sources_req = [] - targets_req = [] - if tree.targets_have_extent: - target_radii_req = [] + particles_req = [] for rank in range(total_rank): local_tree[rank] = LocalTree.copy_from_global_tree( @@ -957,14 +954,14 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): local_tree[rank], dest=rank, tag=MPITags["DIST_TREE"])) # Send the sources and targets - sources_req.append(comm.Isend( + particles_req.append(comm.Isend( local_sources[rank], dest=rank, tag=MPITags["DIST_SOURCES"])) - targets_req.append(comm.Isend( + particles_req.append(comm.Isend( local_targets[rank], dest=rank, tag=MPITags["DIST_TARGETS"])) if tree.targets_have_extent: - target_radii_req.append(comm.Isend( + particles_req.append(comm.Isend( local_target_radii[rank], dest=rank, tag=MPITags["DIST_RADII"])) # }}} @@ -978,9 +975,7 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): # Receive sources and targets if current_rank == 0: - MPI.Request.Waitall(sources_req) - MPI.Request.Waitall(targets_req) - MPI.Request.Waitall(target_radii_req) + MPI.Request.Waitall(particles_req) else: reqs = [] -- GitLab From 0f43ac96f645e0fb7a0235317d0c61100fdf5065 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 29 May 2018 17:14:33 -0500 Subject: [PATCH 099/260] Use multidimensional array instead of object arrays for local sources/targets --- boxtree/distributed.py | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index f3a6b7b..ba2a307 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -534,14 +534,15 @@ def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, # local sources local_nsources = src_particle_scan[-1].get(queue) - local_sources = np.empty((tree.dimensions,), dtype=object) - for i in range(tree.dimensions): - local_sources[i] = cl.array.empty(queue, (local_nsources,), - dtype=tree.coord_dtype) + local_sources = cl.array.empty( + queue, (tree.dimensions, local_nsources), dtype=tree.coord_dtype) + local_sources_list = [local_sources[idim, :] for idim in range(tree.dimensions)] + assert(tree.sources_have_extent is False) + knls["fetch_local_src_knl"](src_particle_mask, src_particle_scan, *d_tree.sources.tolist(), - *local_sources.tolist()) + *local_sources_list) # box_source_starts local_box_source_starts = cl.array.empty(queue, (tree.nboxes,), @@ -581,21 +582,22 @@ def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, # local targets local_ntargets = tgt_particle_scan[-1].get(queue) - local_targets = np.empty((tree.dimensions,), dtype=object) - for i in range(tree.dimensions): - local_targets[i] = cl.array.empty(queue, (local_ntargets,), - dtype=tree.coord_dtype) + + local_targets = cl.array.empty( + queue, (tree.dimensions, local_ntargets), dtype=tree.coord_dtype) + local_targets_list = [local_targets[idim, :] for idim in range(tree.dimensions)] + if tree.targets_have_extent: local_target_radii = cl.array.empty(queue, (local_ntargets,), dtype=tree.coord_dtype) knls["fetch_local_tgt_knl"](tgt_particle_mask, tgt_particle_scan, *d_tree.targets.tolist(), - *local_targets.tolist(), + *local_targets_list, d_tree.target_radii, local_target_radii) else: knls["fetch_local_tgt_knl"](tgt_particle_mask, tgt_particle_scan, *d_tree.targets.tolist(), - *local_targets.tolist()) + *local_targets_list) # box_target_starts local_box_target_starts = cl.array.empty(queue, (tree.nboxes,), @@ -619,12 +621,12 @@ def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, tgt_particle_scan) # Fetch fields to local_tree - for i in range(tree.dimensions): - local_sources[i] = local_sources[i].get(queue=queue) + local_sources = local_sources.get(queue=queue) local_tree.sources = local_sources - for i in range(tree.dimensions): - local_targets[i] = local_targets[i].get(queue=queue) + + local_targets = local_targets.get(queue=queue) local_tree.targets = local_targets + if tree.targets_have_extent: local_tree.target_radii = local_target_radii.get(queue=queue) local_tree.box_source_starts = local_box_source_starts.get(queue=queue) @@ -932,16 +934,15 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): local_tree[rank]._dimensions = local_tree[rank].dimensions - local_targets[rank] = np.stack(local_tree[rank].targets, axis=0) local_tree[rank]._ntargets = local_tree[rank].ntargets + local_targets[rank] = local_tree[rank].targets local_tree[rank].targets = None - local_sources[rank] = np.stack(local_tree[rank].sources, axis=0) local_tree[rank]._nsources = local_tree[rank].nsources + local_sources[rank] = local_tree[rank].sources local_tree[rank].sources = None - local_target_radii[rank] = np.stack( - local_tree[rank].target_radii, axis=0) + local_target_radii[rank] = local_tree[rank].target_radii local_tree[rank].target_radii = None local_tree[rank]._particle_dtype = tree.sources[0].dtype -- GitLab From e32c319dc49a8920ba3c34d14bf3bbfd50a6ee76 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 16 Jun 2018 12:04:46 +0800 Subject: [PATCH 100/260] Move work partition to a separate file --- boxtree/distributed.py | 127 ++++--------------------------------- boxtree/partition.py | 141 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 154 insertions(+), 114 deletions(-) create mode 100644 boxtree/partition.py diff --git a/boxtree/distributed.py b/boxtree/distributed.py index ba2a307..b290ba5 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -253,118 +253,6 @@ def dtype_to_mpi(dtype): return mpi_type -def partition_work(traversal, total_rank, workload_weight): - """ This function returns a pyopencl array of size total_rank*nboxes, where - the (i,j) entry is 1 iff rank i is responsible for box j. - """ - tree = traversal.tree - workload = np.zeros((tree.nboxes,), dtype=np.float64) - - # workload for list 1 - for itarget_box, box_idx in enumerate(traversal.target_boxes): - box_ntargets = tree.box_target_counts_nonchild[box_idx] - start = traversal.neighbor_source_boxes_starts[itarget_box] - end = traversal.neighbor_source_boxes_starts[itarget_box + 1] - list1 = traversal.neighbor_source_boxes_lists[start:end] - particle_count = 0 - for ibox in list1: - particle_count += tree.box_source_counts_nonchild[ibox] - workload[box_idx] += box_ntargets * particle_count * workload_weight.direct - - # workload for list 2 - for itarget_or_target_parent_boxes, box_idx in enumerate( - traversal.target_or_target_parent_boxes): - start = traversal.from_sep_siblings_starts[itarget_or_target_parent_boxes] - end = traversal.from_sep_siblings_starts[itarget_or_target_parent_boxes + 1] - workload[box_idx] += (end - start) * workload_weight.m2l - - for ilevel in range(tree.nlevels): - # workload for list 3 far - for itarget_box, box_idx in enumerate( - traversal.target_boxes_sep_smaller_by_source_level[ilevel]): - box_ntargets = tree.box_target_counts_nonchild[box_idx] - start = traversal.from_sep_smaller_by_level[ilevel].starts[itarget_box] - end = traversal.from_sep_smaller_by_level[ilevel].starts[ - itarget_box + 1] - workload[box_idx] += (end - start) * box_ntargets - - # workload for list 3 near - if tree.targets_have_extent and \ - traversal.from_sep_close_smaller_starts is not None: - for itarget_box, box_idx in enumerate(traversal.target_boxes): - box_ntargets = tree.box_target_counts_nonchild[box_idx] - start = traversal.from_sep_close_smaller_starts[itarget_box] - end = traversal.from_sep_close_smaller_starts[itarget_box + 1] - particle_count = 0 - for near_box_id in traversal.from_sep_close_smaller_lists[start:end]: - particle_count += tree.box_source_counts_nonchild[near_box_id] - workload[box_idx] += ( - box_ntargets * particle_count * workload_weight.direct) - - # workload for list 4 - for itarget_or_target_parent_boxes, box_idx in enumerate( - traversal.target_or_target_parent_boxes): - start = traversal.from_sep_bigger_starts[itarget_or_target_parent_boxes] - end = traversal.from_sep_bigger_starts[itarget_or_target_parent_boxes + 1] - particle_count = 0 - for far_box_id in traversal.from_sep_bigger_lists[start:end]: - particle_count += tree.box_source_counts_nonchild[far_box_id] - workload[box_idx] += particle_count * workload_weight.p2l - - if tree.targets_have_extent and \ - traversal.from_sep_close_bigger_starts is not None: - box_ntargets = tree.box_target_counts_nonchild[box_idx] - start = traversal.from_sep_close_bigger_starts[ - itarget_or_target_parent_boxes] - end = traversal.from_sep_close_bigger_starts[ - itarget_or_target_parent_boxes + 1] - particle_count = 0 - for direct_box_id in traversal.from_sep_close_bigger_lists[start:end]: - particle_count += tree.box_source_counts_nonchild[direct_box_id] - workload[box_idx] += ( - box_ntargets * particle_count * workload_weight.direct) - - for i in range(tree.nboxes): - # workload for multipole calculation - workload[i] += tree.box_source_counts_nonchild[i] * workload_weight.multipole - - total_workload = 0 - for i in range(tree.nboxes): - total_workload += workload[i] - - dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) - idx = 0 - stack = [0] - while len(stack) > 0: - box_id = stack.pop() - dfs_order[idx] = box_id - idx += 1 - for i in range(2**tree.dimensions): - child_box_id = tree.box_child_ids[i][box_id] - if child_box_id > 0: - stack.append(child_box_id) - - responsible_boxes_mask = np.zeros((total_rank, tree.nboxes), dtype=np.int8) - responsible_boxes_list = np.empty((total_rank,), dtype=object) - - rank = 0 - start = 0 - workload_count = 0 - for i in range(tree.nboxes): - box_idx = dfs_order[i] - responsible_boxes_mask[rank][box_idx] = 1 - workload_count += workload[box_idx] - if (workload_count > (rank + 1)*total_workload/total_rank or - i == tree.nboxes - 1): - responsible_boxes_list[rank] = cl.array.to_device( - queue, dfs_order[start:i+1]) - start = i + 1 - rank += 1 - - responsible_boxes_mask = cl.array.to_device(queue, responsible_boxes_mask) - return responsible_boxes_mask, responsible_boxes_list - - def get_gen_local_tree_kernels(tree): particle_mask_knl = cl.elementwise.ElementwiseKernel( queue.context, @@ -740,8 +628,19 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): p2l=1, multipole=5 ) - responsible_boxes_mask, responsible_boxes_list = partition_work( - traversal, total_rank, workload_weight) + + from boxtree.partition import partition_work + responsible_boxes_list = partition_work(traversal, total_rank, + workload_weight) + + responsible_boxes_mask = np.zeros((total_rank, tree.nboxes), dtype=np.int8) + for irank in range(total_rank): + responsible_boxes_mask[irank, responsible_boxes_list[irank]] = 1 + responsible_boxes_mask = cl.array.to_device(queue, responsible_boxes_mask) + + for irank in range(total_rank): + responsible_boxes_list[irank] = cl.array.to_device( + queue, responsible_boxes_list[irank]) # Calculate ancestors of responsible boxes ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), diff --git a/boxtree/partition.py b/boxtree/partition.py new file mode 100644 index 0000000..967b27e --- /dev/null +++ b/boxtree/partition.py @@ -0,0 +1,141 @@ +import numpy as np + +__copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ + Copyright (C) 2018 Hao Gao" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + + +def partition_work(traversal, total_rank, workload_weight): + """ This function assigns responsible boxes of each process. + + :arg traversal: The traversal object built on root containing all particles. + :arg total_rank: The total number of processes. + :arg workload_weight: Workload coefficients of various operations (e.g. direct + evaluations, multipole-to-local, etc.) used for load balacing. + :return: A numpy array of shape (total_rank,), where the ith element is an numpy + array containing the responsible boxes of process i. + """ + tree = traversal.tree + + # store the workload of each box + workload = np.zeros((tree.nboxes,), dtype=np.float64) + + # add workload of list 1 + for itarget_box, box_idx in enumerate(traversal.target_boxes): + box_ntargets = tree.box_target_counts_nonchild[box_idx] + start = traversal.neighbor_source_boxes_starts[itarget_box] + end = traversal.neighbor_source_boxes_starts[itarget_box + 1] + list1 = traversal.neighbor_source_boxes_lists[start:end] + particle_count = 0 + for ibox in list1: + particle_count += tree.box_source_counts_nonchild[ibox] + workload[box_idx] += box_ntargets * particle_count * workload_weight.direct + + # add workload of list 2 + for itarget_or_target_parent_boxes, box_idx in enumerate( + traversal.target_or_target_parent_boxes): + start = traversal.from_sep_siblings_starts[itarget_or_target_parent_boxes] + end = traversal.from_sep_siblings_starts[itarget_or_target_parent_boxes + 1] + workload[box_idx] += (end - start) * workload_weight.m2l + + for ilevel in range(tree.nlevels): + # add workload of list 3 far + for itarget_box, box_idx in enumerate( + traversal.target_boxes_sep_smaller_by_source_level[ilevel]): + box_ntargets = tree.box_target_counts_nonchild[box_idx] + start = traversal.from_sep_smaller_by_level[ilevel].starts[itarget_box] + end = traversal.from_sep_smaller_by_level[ilevel].starts[ + itarget_box + 1] + workload[box_idx] += (end - start) * box_ntargets + + # add workload of list 3 near + if tree.targets_have_extent and \ + traversal.from_sep_close_smaller_starts is not None: + for itarget_box, box_idx in enumerate(traversal.target_boxes): + box_ntargets = tree.box_target_counts_nonchild[box_idx] + start = traversal.from_sep_close_smaller_starts[itarget_box] + end = traversal.from_sep_close_smaller_starts[itarget_box + 1] + particle_count = 0 + for near_box_id in traversal.from_sep_close_smaller_lists[start:end]: + particle_count += tree.box_source_counts_nonchild[near_box_id] + workload[box_idx] += ( + box_ntargets * particle_count * workload_weight.direct) + + # add workload of list 4 + for itarget_or_target_parent_boxes, box_idx in enumerate( + traversal.target_or_target_parent_boxes): + start = traversal.from_sep_bigger_starts[itarget_or_target_parent_boxes] + end = traversal.from_sep_bigger_starts[itarget_or_target_parent_boxes + 1] + particle_count = 0 + for far_box_id in traversal.from_sep_bigger_lists[start:end]: + particle_count += tree.box_source_counts_nonchild[far_box_id] + workload[box_idx] += particle_count * workload_weight.p2l + + if tree.targets_have_extent and \ + traversal.from_sep_close_bigger_starts is not None: + box_ntargets = tree.box_target_counts_nonchild[box_idx] + start = traversal.from_sep_close_bigger_starts[ + itarget_or_target_parent_boxes] + end = traversal.from_sep_close_bigger_starts[ + itarget_or_target_parent_boxes + 1] + particle_count = 0 + for direct_box_id in traversal.from_sep_close_bigger_lists[start:end]: + particle_count += tree.box_source_counts_nonchild[direct_box_id] + workload[box_idx] += ( + box_ntargets * particle_count * workload_weight.direct) + + for i in range(tree.nboxes): + # add workload of multipole calculation + workload[i] += tree.box_source_counts_nonchild[i] * workload_weight.multipole + + total_workload = 0 + for i in range(tree.nboxes): + total_workload += workload[i] + + # transform tree from level order to dfs order + dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) + idx = 0 + stack = [0] + while len(stack) > 0: + box_id = stack.pop() + dfs_order[idx] = box_id + idx += 1 + for i in range(2**tree.dimensions): + child_box_id = tree.box_child_ids[i][box_id] + if child_box_id > 0: + stack.append(child_box_id) + + # partition all boxes in dfs order evenly according to workload + responsible_boxes_list = np.empty((total_rank,), dtype=object) + rank = 0 + start = 0 + workload_count = 0 + for i in range(tree.nboxes): + box_idx = dfs_order[i] + workload_count += workload[box_idx] + if (workload_count > (rank + 1)*total_workload/total_rank or + i == tree.nboxes - 1): + responsible_boxes_list[rank] = dfs_order[start:i+1] + start = i + 1 + rank += 1 + + return responsible_boxes_list -- GitLab From e2560f7f80f5b11a841b84cb2817c489da5c3577 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 16 Jun 2018 15:43:32 +0800 Subject: [PATCH 101/260] Refactor ancestor boxes construction --- boxtree/distributed.py | 24 ++++++--------------- boxtree/partition.py | 47 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 18 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index b290ba5..0c73484 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -616,7 +616,6 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): local_target_radii = np.empty((total_rank,), dtype=object) # {{{ Partition the work - d_box_parent_ids = cl.array.to_device(queue, tree.box_parent_ids) # Each rank is responsible for calculating the multiple expansion as well as # evaluating target potentials in *responsible_boxes* @@ -642,26 +641,15 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): responsible_boxes_list[irank] = cl.array.to_device( queue, responsible_boxes_list[irank]) + from boxtree.partition import ResponsibleBoxesQuery + responsible_box_query = ResponsibleBoxesQuery(queue, tree) + # Calculate ancestors of responsible boxes ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), dtype=np.int8) - for rank in range(total_rank): - ancestor_boxes_last = responsible_boxes_mask[rank, :].copy() - mark_parent_knl = cl.elementwise.ElementwiseKernel( - ctx, - "__global char *current, __global char *parent, " - "__global %s *box_parent_ids" % dtype_to_ctype(tree.box_id_dtype), - "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" - ) - while ancestor_boxes_last.any(): - ancestor_boxes_new = cl.array.zeros(queue, (tree.nboxes,), - dtype=np.int8) - mark_parent_knl(ancestor_boxes_last, ancestor_boxes_new, - d_box_parent_ids) - ancestor_boxes_new = ancestor_boxes_new & (~ancestor_boxes[rank, :]) - ancestor_boxes[rank, :] = \ - ancestor_boxes[rank, :] | ancestor_boxes_new - ancestor_boxes_last = ancestor_boxes_new + for irank in range(total_rank): + ancestor_boxes[irank, :] = responsible_box_query.ancestor_boxes_mask( + responsible_boxes_mask[irank, :]) # In order to evaluate, each rank needs sources in boxes in # *src_boxes_mask* diff --git a/boxtree/partition.py b/boxtree/partition.py index 967b27e..c4271fb 100644 --- a/boxtree/partition.py +++ b/boxtree/partition.py @@ -1,4 +1,6 @@ import numpy as np +import pyopencl as cl +from pyopencl.tools import dtype_to_ctype __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2018 Hao Gao" @@ -139,3 +141,48 @@ def partition_work(traversal, total_rank, workload_weight): rank += 1 return responsible_boxes_list + + +class ResponsibleBoxesQuery(object): + """ Query various lists related to the responsible boxes in a given tree. + """ + def __init__(self, queue, tree): + """ + :param queue: A pyopencl.CommandQueue object. + :param tree: The global tree on root with all particles. + """ + self.queue = queue + self.tree = tree + + self.mark_parent_knl = cl.elementwise.ElementwiseKernel( + queue.context, + "__global char *current, __global char *parent, " + "__global %s *box_parent_ids" % dtype_to_ctype(tree.box_id_dtype), + "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" + ) + + self.box_parent_ids_dev = cl.array.to_device(queue, tree.box_parent_ids) + + def ancestor_boxes_mask(self, responsible_boxes_mask): + """ Query the ancestors of responsible boxes. + + :param responsible_boxes_mask: A pyopencl.array.Array object of shape + (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. + :return: A pyopencl.array.Array object of shape (tree.nboxes,) whose ith + entry is 1 iff i is either a responsible box or an ancestor of the + responsible boxes specified by responsible_boxes_mask. + """ + ancestor_boxes = cl.array.zeros( + self.queue, (self.tree.nboxes,), dtype=np.int8) + ancestor_boxes_last = responsible_boxes_mask.copy() + + while ancestor_boxes_last.any(): + ancestor_boxes_new = cl.array.zeros(self.queue, (self.tree.nboxes,), + dtype=np.int8) + self.mark_parent_knl(ancestor_boxes_last, ancestor_boxes_new, + self.box_parent_ids_dev) + ancestor_boxes_new = ancestor_boxes_new & (~ancestor_boxes) + ancestor_boxes = ancestor_boxes | ancestor_boxes_new + ancestor_boxes_last = ancestor_boxes_new + + return ancestor_boxes -- GitLab From a39f76fc4f20cc88f62013bacc09e87487c94adf Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 16 Jun 2018 16:23:57 +0800 Subject: [PATCH 102/260] Refactor source box construction --- boxtree/distributed.py | 90 +++------------------------------- boxtree/partition.py | 107 ++++++++++++++++++++++++++++++++++++++--- requirements.txt | 1 + 3 files changed, 109 insertions(+), 89 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 0c73484..b386dcb 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -642,7 +642,7 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): queue, responsible_boxes_list[irank]) from boxtree.partition import ResponsibleBoxesQuery - responsible_box_query = ResponsibleBoxesQuery(queue, tree) + responsible_box_query = ResponsibleBoxesQuery(queue, traversal) # Calculate ancestors of responsible boxes ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), @@ -653,89 +653,13 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): # In order to evaluate, each rank needs sources in boxes in # *src_boxes_mask* - src_boxes_mask = responsible_boxes_mask.copy() - - # Add list 1 and list 4 to src_boxes_mask - add_interaction_list_boxes = cl.elementwise.ElementwiseKernel( - ctx, - Template(""" - __global ${box_id_t} *box_list, - __global char *responsible_boxes_mask, - __global ${box_id_t} *interaction_boxes_starts, - __global ${box_id_t} *interaction_boxes_lists, - __global char *src_boxes_mask - """, strict_undefined=True).render( - box_id_t=dtype_to_ctype(tree.box_id_dtype) - ), - Template(r""" - typedef ${box_id_t} box_id_t; - box_id_t current_box = box_list[i]; - if(responsible_boxes_mask[current_box]) { - for(box_id_t box_idx = interaction_boxes_starts[i]; - box_idx < interaction_boxes_starts[i + 1]; - ++box_idx) - src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; - } - """, strict_undefined=True).render( - box_id_t=dtype_to_ctype(tree.box_id_dtype) - ), - ) - - for rank in range(total_rank): - # Add list 1 of responsible boxes - d_target_boxes = cl.array.to_device(queue, traversal.target_boxes) - d_neighbor_source_boxes_starts = cl.array.to_device( - queue, traversal.neighbor_source_boxes_starts) - d_neighbor_source_boxes_lists = cl.array.to_device( - queue, traversal.neighbor_source_boxes_lists) - add_interaction_list_boxes( - d_target_boxes, responsible_boxes_mask[rank], - d_neighbor_source_boxes_starts, - d_neighbor_source_boxes_lists, src_boxes_mask[rank], - range=range(0, traversal.target_boxes.shape[0])) - - # Add list 4 of responsible boxes or ancestor boxes - d_target_or_target_parent_boxes = cl.array.to_device( - queue, traversal.target_or_target_parent_boxes) - d_from_sep_bigger_starts = cl.array.to_device( - queue, traversal.from_sep_bigger_starts) - d_from_sep_bigger_lists = cl.array.to_device( - queue, traversal.from_sep_bigger_lists) - add_interaction_list_boxes( - d_target_or_target_parent_boxes, - responsible_boxes_mask[rank] | ancestor_boxes[rank], - d_from_sep_bigger_starts, d_from_sep_bigger_lists, - src_boxes_mask[rank], - range=range(0, traversal.target_or_target_parent_boxes.shape[0])) + src_boxes_mask = cl.array.zeros(queue, (total_rank, tree.nboxes), + dtype=np.int8) - if tree.targets_have_extent: - if traversal.from_sep_close_bigger_starts is not None: - d_from_sep_close_bigger_starts = cl.array.to_device( - queue, traversal.from_sep_close_bigger_starts) - d_from_sep_close_bigger_lists = cl.array.to_device( - queue, traversal.from_sep_close_bigger_lists) - add_interaction_list_boxes( - d_target_or_target_parent_boxes, - responsible_boxes_mask[rank] | ancestor_boxes[rank], - d_from_sep_close_bigger_starts, - d_from_sep_close_bigger_lists, - src_boxes_mask[rank] - ) - - # Add list 3 direct - if traversal.from_sep_close_smaller_starts is not None: - d_from_sep_close_smaller_starts = cl.array.to_device( - queue, traversal.from_sep_close_smaller_starts) - d_from_sep_close_smaller_lists = cl.array.to_device( - queue, traversal.from_sep_close_smaller_lists) - - add_interaction_list_boxes( - d_target_boxes, - responsible_boxes_mask[rank], - d_from_sep_close_smaller_starts, - d_from_sep_close_smaller_lists, - src_boxes_mask[rank] - ) + for irank in range(total_rank): + src_boxes_mask[irank, :] = responsible_box_query.src_boxes_mask( + responsible_boxes_mask[irank, :], ancestor_boxes[irank, :] + ) # {{{ compute box_to_user diff --git a/boxtree/partition.py b/boxtree/partition.py index c4271fb..c29b676 100644 --- a/boxtree/partition.py +++ b/boxtree/partition.py @@ -1,6 +1,7 @@ import numpy as np import pyopencl as cl from pyopencl.tools import dtype_to_ctype +from mako.template import Template __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2018 Hao Gao" @@ -144,24 +145,75 @@ def partition_work(traversal, total_rank, workload_weight): class ResponsibleBoxesQuery(object): - """ Query various lists related to the responsible boxes in a given tree. + """ Query related to the responsible boxes for a given traversal. """ - def __init__(self, queue, tree): + + def __init__(self, queue, traversal): """ :param queue: A pyopencl.CommandQueue object. - :param tree: The global tree on root with all particles. + :param traversal: The global traversal built on root with all particles. """ self.queue = queue - self.tree = tree + self.traversal = traversal + self.tree = traversal.tree + + # fetch useful fields of tree to device memory + self.box_parent_ids_dev = cl.array.to_device(queue, self.tree.box_parent_ids) + self.target_boxes_dev = cl.array.to_device(queue, traversal.target_boxes) + self.neighbor_source_boxes_starts_dev = cl.array.to_device( + queue, traversal.neighbor_source_boxes_starts) + self.neighbor_source_boxes_lists_dev = cl.array.to_device( + queue, traversal.neighbor_source_boxes_lists) + self.target_or_target_parent_boxes_dev = cl.array.to_device( + queue, traversal.target_or_target_parent_boxes) + self.from_sep_bigger_starts_dev = cl.array.to_device( + queue, traversal.from_sep_bigger_starts) + self.from_sep_bigger_lists_dev = cl.array.to_device( + queue, traversal.from_sep_bigger_lists) + if self.tree.targets_have_extent: + self.from_sep_close_bigger_starts_dev = cl.array.to_device( + queue, traversal.from_sep_close_bigger_starts) + self.from_sep_close_bigger_lists_dev = cl.array.to_device( + queue, traversal.from_sep_close_bigger_lists) + self.from_sep_close_smaller_starts_dev = cl.array.to_device( + queue, traversal.from_sep_close_smaller_starts) + self.from_sep_close_smaller_lists_dev = cl.array.to_device( + queue, traversal.from_sep_close_smaller_lists) + + # helper kernel for ancestor box query self.mark_parent_knl = cl.elementwise.ElementwiseKernel( queue.context, "__global char *current, __global char *parent, " - "__global %s *box_parent_ids" % dtype_to_ctype(tree.box_id_dtype), + "__global %s *box_parent_ids" % dtype_to_ctype(self.tree.box_id_dtype), "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" ) - self.box_parent_ids_dev = cl.array.to_device(queue, tree.box_parent_ids) + # helper kernel for adding boxes from interaction list 1 and 4 + self.add_interaction_list_boxes = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${box_id_t} *box_list, + __global char *responsible_boxes_mask, + __global ${box_id_t} *interaction_boxes_starts, + __global ${box_id_t} *interaction_boxes_lists, + __global char *src_boxes_mask + """, strict_undefined=True).render( + box_id_t=dtype_to_ctype(self.tree.box_id_dtype) + ), + Template(r""" + typedef ${box_id_t} box_id_t; + box_id_t current_box = box_list[i]; + if(responsible_boxes_mask[current_box]) { + for(box_id_t box_idx = interaction_boxes_starts[i]; + box_idx < interaction_boxes_starts[i + 1]; + ++box_idx) + src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; + } + """, strict_undefined=True).render( + box_id_t=dtype_to_ctype(self.tree.box_id_dtype) + ), + ) def ancestor_boxes_mask(self, responsible_boxes_mask): """ Query the ancestors of responsible boxes. @@ -186,3 +238,46 @@ class ResponsibleBoxesQuery(object): ancestor_boxes_last = ancestor_boxes_new return ancestor_boxes + + def src_boxes_mask(self, responsible_boxes_mask, ancestor_boxes_mask): + src_boxes_mask = responsible_boxes_mask.copy() + + # Add list 1 of responsible boxes + self.add_interaction_list_boxes( + self.target_boxes_dev, responsible_boxes_mask, + self.neighbor_source_boxes_starts_dev, + self.neighbor_source_boxes_lists_dev, src_boxes_mask, + range=range(0, self.traversal.target_boxes.shape[0]) + ) + + # Add list 4 of responsible boxes or ancestor boxes + self.add_interaction_list_boxes( + self.target_or_target_parent_boxes_dev, + responsible_boxes_mask | ancestor_boxes_mask, + self.from_sep_bigger_starts_dev, self.from_sep_bigger_lists_dev, + src_boxes_mask, + range=range(0, self.traversal.target_or_target_parent_boxes.shape[0])) + + if self.tree.targets_have_extent: + + # Add list 3 close + if self.traversal.from_sep_close_smaller_starts is not None: + self.add_interaction_list_boxes( + self.target_boxes_dev, + responsible_boxes_mask, + self.from_sep_close_smaller_starts_dev, + self.from_sep_close_smaller_lists_dev, + src_boxes_mask + ) + + # Add list 4 close + if self.traversal.from_sep_close_bigger_starts is not None: + self.add_interaction_list_boxes( + self.target_or_target_parent_boxes_dev, + responsible_boxes_mask | ancestor_boxes_mask, + self.from_sep_close_bigger_starts_dev, + self.from_sep_close_bigger_lists_dev, + src_boxes_mask + ) + + return src_boxes_mask diff --git a/requirements.txt b/requirements.txt index cd0a243..dd5f008 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ numpy +mako git+https://github.com/inducer/pyopencl git+https://github.com/inducer/islpy git+https://github.com/inducer/loopy -- GitLab From 51d5498b22408a13cd8afabab20ea341f5697c4a Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 16 Jun 2018 17:19:19 +0800 Subject: [PATCH 103/260] Add doc for function src_boxes_mask --- boxtree/partition.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/boxtree/partition.py b/boxtree/partition.py index c29b676..440e114 100644 --- a/boxtree/partition.py +++ b/boxtree/partition.py @@ -240,6 +240,18 @@ class ResponsibleBoxesQuery(object): return ancestor_boxes def src_boxes_mask(self, responsible_boxes_mask, ancestor_boxes_mask): + """ Query the boxes whose sources are needed in order to evaluate potentials + of boxes represented by responsible_boxes_mask. + + :param responsible_boxes_mask: A pyopencl.array.Array object of shape + (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. + :param ancestor_boxes_mask: A pyopencl.array.Array object of shape + (tree.nboxes,) whose ith entry is 1 iff i is either a responsible box + or an ancestor of the responsible boxes. + :return: A pyopencl.array.Array object of shape (tree.nboxes,) whose ith + entry is 1 iff souces of box i are needed for evaluating the potentials + of targets in boxes represented by responsible_boxes_mask. + """ src_boxes_mask = responsible_boxes_mask.copy() # Add list 1 of responsible boxes -- GitLab From 75c6ac6375b27fd025d95fb53e8ea45426051018 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 17 Jun 2018 19:15:04 +0800 Subject: [PATCH 104/260] Refactor multipole boxes query --- boxtree/distributed.py | 72 +++---------------------------- boxtree/partition.py | 96 +++++++++++++++++++++++++++++++++++++++--- 2 files changed, 94 insertions(+), 74 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index b386dcb..621b478 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -555,44 +555,6 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): if current_rank == 0: start_time = time.time() - # {{{ kernel to mark if a box mpole is used by a process via an interaction list - - @memoize_in(generate_local_tree, "loopy_cache") - def get_box_mpole_is_used_marker_kernel(): - knl = lp.make_kernel( - [ - "{[irank] : 0 <= irank < total_rank}", - "{[itgt_box] : 0 <= itgt_box < ntgt_boxes}", - "{[isrc_box] : isrc_box_start <= isrc_box < isrc_box_end}", - ], - """ - for irank, itgt_box - <> tgt_ibox = target_boxes[itgt_box] - <> is_relevant = relevant_boxes_mask[irank, tgt_ibox] - if is_relevant - <> isrc_box_start = source_box_starts[itgt_box] - <> isrc_box_end = source_box_starts[itgt_box + 1] - for isrc_box - <> src_ibox = source_box_lists[isrc_box] - box_mpole_is_used[irank, src_ibox] = 1 - end - end - end - """, - [ - lp.ValueArg("nboxes", np.int32), - lp.GlobalArg("relevant_boxes_mask, box_mpole_is_used", - shape=("total_rank", "nboxes")), - lp.GlobalArg("source_box_lists", shape=None), - "..." - ], - default_offset=lp.auto) - - # knl = lp.split_iname(knl, "itgt_box", 16, outer_tag="g.0", inner_tag="l.0") - return knl - - # }}} - # {{{ Construct local tree for each rank on root if current_rank == 0: @@ -667,36 +629,12 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): box_mpole_is_used = cl.array.zeros(queue, (total_rank, tree.nboxes), dtype=np.int8) - knl = get_box_mpole_is_used_marker_kernel() - # A mpole is used by process p if it is in the List 2 of either a box - # owned by p or one of its ancestors. - knl(queue, - total_rank=total_rank, - nboxes=tree.nboxes, - target_boxes=traversal.target_or_target_parent_boxes, - relevant_boxes_mask=responsible_boxes_mask | ancestor_boxes, - source_box_starts=traversal.from_sep_siblings_starts, - source_box_lists=traversal.from_sep_siblings_lists, - box_mpole_is_used=box_mpole_is_used) - - box_mpole_is_used.finish() - - # A mpole is used by process p if it is in the List 3 of a box owned by p. - for level in range(tree.nlevels): - source_box_starts = traversal.from_sep_smaller_by_level[level].starts - source_box_lists = traversal.from_sep_smaller_by_level[level].lists - knl(queue, - total_rank=total_rank, - nboxes=tree.nboxes, - target_boxes=( - traversal.target_boxes_sep_smaller_by_source_level[level]), - relevant_boxes_mask=responsible_boxes_mask, - source_box_starts=source_box_starts, - source_box_lists=source_box_lists, - box_mpole_is_used=box_mpole_is_used) - - box_mpole_is_used.finish() + for irank in range(total_rank): + box_mpole_is_used[irank, :] = \ + responsible_box_query.multipole_boxes_mask( + responsible_boxes_mask[irank, :], ancestor_boxes[irank, :] + ) from boxtree.tools import MaskCompressorKernel matcompr = MaskCompressorKernel(ctx) diff --git a/boxtree/partition.py b/boxtree/partition.py index 440e114..c65f095 100644 --- a/boxtree/partition.py +++ b/boxtree/partition.py @@ -157,30 +157,70 @@ class ResponsibleBoxesQuery(object): self.traversal = traversal self.tree = traversal.tree - # fetch useful fields of tree to device memory + # {{{ fetch tree structure and interaction lists to device memory + self.box_parent_ids_dev = cl.array.to_device(queue, self.tree.box_parent_ids) self.target_boxes_dev = cl.array.to_device(queue, traversal.target_boxes) + self.target_or_target_parent_boxes_dev = cl.array.to_device( + queue, traversal.target_or_target_parent_boxes) + + # list 1 self.neighbor_source_boxes_starts_dev = cl.array.to_device( queue, traversal.neighbor_source_boxes_starts) self.neighbor_source_boxes_lists_dev = cl.array.to_device( queue, traversal.neighbor_source_boxes_lists) - self.target_or_target_parent_boxes_dev = cl.array.to_device( - queue, traversal.target_or_target_parent_boxes) + + # list 2 + self.from_sep_siblings_starts_dev = cl.array.to_device( + queue, traversal.from_sep_siblings_starts) + self.from_sep_siblings_lists_dev = cl.array.to_device( + queue, traversal.from_sep_siblings_lists) + + # list 3 + self.target_boxes_sep_smaller_by_source_level_dev = np.empty( + (self.tree.nlevels,), dtype=object) + for ilevel in range(self.tree.nlevels): + self.target_boxes_sep_smaller_by_source_level_dev[ilevel] = \ + cl.array.to_device( + queue, + traversal.target_boxes_sep_smaller_by_source_level[ilevel] + ) + + self.from_sep_smaller_by_level_starts_dev = np.empty( + (self.tree.nlevels,), dtype=object) + for ilevel in range(self.tree.nlevels): + self.from_sep_smaller_by_level_starts_dev[ilevel] = cl.array.to_device( + queue, traversal.from_sep_smaller_by_level[ilevel].starts + ) + + self.from_sep_smaller_by_level_lists_dev = np.empty( + (self.tree.nlevels,), dtype=object) + for ilevel in range(self.tree.nlevels): + self.from_sep_smaller_by_level_lists_dev[ilevel] = cl.array.to_device( + queue, traversal.from_sep_smaller_by_level[ilevel].lists + ) + + # list 4 self.from_sep_bigger_starts_dev = cl.array.to_device( queue, traversal.from_sep_bigger_starts) self.from_sep_bigger_lists_dev = cl.array.to_device( queue, traversal.from_sep_bigger_lists) + # }}} + if self.tree.targets_have_extent: - self.from_sep_close_bigger_starts_dev = cl.array.to_device( - queue, traversal.from_sep_close_bigger_starts) - self.from_sep_close_bigger_lists_dev = cl.array.to_device( - queue, traversal.from_sep_close_bigger_lists) + # list 3 close self.from_sep_close_smaller_starts_dev = cl.array.to_device( queue, traversal.from_sep_close_smaller_starts) self.from_sep_close_smaller_lists_dev = cl.array.to_device( queue, traversal.from_sep_close_smaller_lists) + # list 4 close + self.from_sep_close_bigger_starts_dev = cl.array.to_device( + queue, traversal.from_sep_close_bigger_starts) + self.from_sep_close_bigger_lists_dev = cl.array.to_device( + queue, traversal.from_sep_close_bigger_lists) + # helper kernel for ancestor box query self.mark_parent_knl = cl.elementwise.ElementwiseKernel( queue.context, @@ -293,3 +333,45 @@ class ResponsibleBoxesQuery(object): ) return src_boxes_mask + + def multipole_boxes_mask(self, responsible_boxes_mask, ancestor_boxes_mask): + """ Query the boxes whose multipoles are used in order to evaluate + potentials of targets in boxes represented by responsible_boxes_mask. + + :param responsible_boxes_mask: A pyopencl.array.Array object of shape + (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. + :param ancestor_boxes_mask: A pyopencl.array.Array object of shape + (tree.nboxes,) whose ith entry is 1 iff i is either a responsible box + or an ancestor of the responsible boxes. + :return: A pyopencl.array.Array object of shape (tree.nboxes,) whose ith + entry is 1 iff multipoles of box i are needed for evaluating the + potentials of targets in boxes represented by responsible_boxes_mask. + """ + + multipole_boxes_mask = cl.array.zeros(self.queue, (self.tree.nboxes,), + dtype=np.int8) + + # A mpole is used by process p if it is in the List 2 of either a box + # owned by p or one of its ancestors. + self.add_interaction_list_boxes( + self.target_or_target_parent_boxes_dev, + responsible_boxes_mask | ancestor_boxes_mask, + self.from_sep_siblings_starts_dev, + self.from_sep_siblings_lists_dev, + multipole_boxes_mask + ) + multipole_boxes_mask.finish() + + # A mpole is used by process p if it is in the List 3 of a box owned by p. + for ilevel in range(self.tree.nlevels): + self.add_interaction_list_boxes( + self.target_boxes_sep_smaller_by_source_level_dev[ilevel], + responsible_boxes_mask, + self.from_sep_smaller_by_level_starts_dev[ilevel], + self.from_sep_smaller_by_level_lists_dev[ilevel], + multipole_boxes_mask + ) + + multipole_boxes_mask.finish() + + return multipole_boxes_mask -- GitLab From 906cd97366fc5ebc1ea8a60756f22140cb8dc7e8 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 19 Jun 2018 14:37:31 +0800 Subject: [PATCH 105/260] Refactor local tree building --- boxtree/distributed.py | 265 ++++++++++++++++++++--------------------- boxtree/partition.py | 7 +- 2 files changed, 136 insertions(+), 136 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 621b478..3f4b6f2 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -7,7 +7,7 @@ import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype from pyopencl.scan import GenericScanKernel -from pytools import memoize_in, memoize_method +from pytools import memoize_method from boxtree import Tree from collections import namedtuple from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler @@ -42,6 +42,14 @@ logger = logging.getLogger(__name__) ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) +# Log OpenCL context information +logger.info("Process %d of %d on %s with ctx %s." % ( + MPI.COMM_WORLD.Get_rank(), + MPI.COMM_WORLD.Get_size(), + MPI.Get_processor_name(), + queue.context.devices) +) + def tree_to_device(queue, tree, additional_fields_to_device=[]): field_to_device = [ @@ -71,6 +79,49 @@ def tree_to_device(queue, tree, additional_fields_to_device=[]): return d_tree +class LocalTreeBuilder(object): + + def __init__(self, global_tree): + self.global_tree = global_tree + self.knls = get_gen_local_tree_kernels(global_tree) + + def from_global_tree(self, responsible_boxes_list, responsible_boxes_mask, + src_boxes_mask, ancestor_mask): + + local_tree = self.global_tree.copy( + responsible_boxes_list=responsible_boxes_list, + ancestor_mask=ancestor_mask.get(), + box_to_user_starts=None, + box_to_user_lists=None, + _dimensions=None, + _ntargets=None, + _nsources=None, + _particle_dtype=None, + _radii_dtype=None + ) + + local_tree.user_source_ids = None + local_tree.sorted_target_ids = None + + local_data = { + "src_mask": None, "src_scan": None, "nsources": None, + "tgt_mask": None, "tgt_scan": None, "ntargets": None + } + + gen_local_tree_helper( + self.global_tree, + src_boxes_mask, + responsible_boxes_mask, + local_tree, + local_data, + self.knls + ) + + local_tree.__class__ = LocalTree + + return local_tree, local_data + + class LocalTree(Tree): """ .. attribute:: box_to_user_starts @@ -98,24 +149,6 @@ class LocalTree(Tree): def ntargets(self): return self.targets[0].shape[0] - @classmethod - def copy_from_global_tree(cls, global_tree, responsible_boxes_list, - ancestor_mask, box_to_user_starts, - box_to_user_lists): - local_tree = global_tree.copy( - responsible_boxes_list=responsible_boxes_list, - ancestor_mask=ancestor_mask, - box_to_user_starts=box_to_user_starts, - box_to_user_lists=box_to_user_lists, - _dimensions=None, - _ntargets=None, - _nsources=None, - _particle_dtype=None, - _radii_dtype=None - ) - local_tree.__class__ = cls - return local_tree - def to_device(self, queue): additional_fields_to_device = ["responsible_boxes_list", "ancestor_mask", "box_to_user_starts", "box_to_user_lists"] @@ -544,177 +577,136 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): current_rank = comm.Get_rank() total_rank = comm.Get_size() - # Log OpenCL context information - logger.info("Process %d of %d on %s with ctx %s." % ( - comm.Get_rank(), - comm.Get_size(), - MPI.Get_processor_name(), - queue.context.devices) - ) - if current_rank == 0: start_time = time.time() - # {{{ Construct local tree for each rank on root - if current_rank == 0: local_data = np.empty((total_rank,), dtype=object) - for i in range(total_rank): - local_data[i] = { - "src_mask": None, "src_scan": None, "nsources": None, - "tgt_mask": None, "tgt_scan": None, "ntargets": None - } else: local_data = None knls = None + box_to_user_starts = None + box_to_user_lists = None if current_rank == 0: tree = traversal.tree - local_tree = np.empty((total_rank,), dtype=object) - local_targets = np.empty((total_rank,), dtype=object) - local_sources = np.empty((total_rank,), dtype=object) - local_target_radii = np.empty((total_rank,), dtype=object) - # {{{ Partition the work - # Each rank is responsible for calculating the multiple expansion as well as - # evaluating target potentials in *responsible_boxes* + # kernels for generating local trees + knls = get_gen_local_tree_kernels(tree) + + from boxtree.partition import ResponsibleBoxesQuery + responsible_box_query = ResponsibleBoxesQuery(queue, traversal) + + local_tree_builder = LocalTreeBuilder(tree) + if workload_weight is None: workload_weight = WorkloadWeight( - direct=1, - m2l=1, - m2p=1, - p2l=1, - multipole=5 - ) + direct=1, m2l=1, m2p=1, p2l=1, multipole=5) from boxtree.partition import partition_work responsible_boxes_list = partition_work(traversal, total_rank, workload_weight) - responsible_boxes_mask = np.zeros((total_rank, tree.nboxes), dtype=np.int8) - for irank in range(total_rank): - responsible_boxes_mask[irank, responsible_boxes_list[irank]] = 1 - responsible_boxes_mask = cl.array.to_device(queue, responsible_boxes_mask) + box_mpole_is_used = cl.array.empty( + queue, (total_rank, tree.nboxes,), dtype=np.int8 + ) - for irank in range(total_rank): - responsible_boxes_list[irank] = cl.array.to_device( - queue, responsible_boxes_list[irank]) + # request objects for non-blocking communication + tree_req = [] + particles_req = [] - from boxtree.partition import ResponsibleBoxesQuery - responsible_box_query = ResponsibleBoxesQuery(queue, traversal) + # buffer holding communication data so that it is not garbage collected + local_tree = np.empty((total_rank,), dtype=object) + local_targets = np.empty((total_rank,), dtype=object) + local_sources = np.empty((total_rank,), dtype=object) + local_target_radii = np.empty((total_rank,), dtype=object) - # Calculate ancestors of responsible boxes - ancestor_boxes = cl.array.zeros(queue, (total_rank, tree.nboxes), - dtype=np.int8) for irank in range(total_rank): - ancestor_boxes[irank, :] = responsible_box_query.ancestor_boxes_mask( - responsible_boxes_mask[irank, :]) - # In order to evaluate, each rank needs sources in boxes in - # *src_boxes_mask* - src_boxes_mask = cl.array.zeros(queue, (total_rank, tree.nboxes), - dtype=np.int8) + responsible_boxes_mask = np.zeros((tree.nboxes,), dtype=np.int8) + responsible_boxes_mask[responsible_boxes_list[irank]] = 1 + responsible_boxes_mask = cl.array.to_device( + queue, responsible_boxes_mask) - for irank in range(total_rank): - src_boxes_mask[irank, :] = responsible_box_query.src_boxes_mask( - responsible_boxes_mask[irank, :], ancestor_boxes[irank, :] + # Calculate ancestors of responsible boxes + ancestor_boxes = responsible_box_query.ancestor_boxes_mask( + responsible_boxes_mask ) - # {{{ compute box_to_user - - logger.debug("computing box_to_user: start") + # In order to evaluate, each rank needs sources in boxes in + # *src_boxes_mask* + src_boxes_mask = responsible_box_query.src_boxes_mask( + responsible_boxes_mask, ancestor_boxes + ) - box_mpole_is_used = cl.array.zeros(queue, (total_rank, tree.nboxes), - dtype=np.int8) + box_mpole_is_used[irank, :] = responsible_box_query.multipole_boxes_mask( + responsible_boxes_mask, ancestor_boxes + ) - for irank in range(total_rank): - box_mpole_is_used[irank, :] = \ - responsible_box_query.multipole_boxes_mask( - responsible_boxes_mask[irank, :], ancestor_boxes[irank, :] + local_tree[irank], local_data[irank] = \ + local_tree_builder.from_global_tree( + responsible_boxes_list[irank], responsible_boxes_mask, + src_boxes_mask, ancestor_boxes ) - from boxtree.tools import MaskCompressorKernel - matcompr = MaskCompressorKernel(ctx) - ( - box_to_user_starts, - box_to_user_lists, - evt) = matcompr(queue, box_mpole_is_used.transpose(), - list_dtype=np.int32) - - cl.wait_for_events([evt]) - del box_mpole_is_used - - logger.debug("computing box_to_user: done") - - # }}} - - # kernels for generating local trees - knls = get_gen_local_tree_kernels(tree) - - # request objects for non-blocking communication - tree_req = [] - particles_req = [] - - for rank in range(total_rank): - local_tree[rank] = LocalTree.copy_from_global_tree( - tree, responsible_boxes_list[rank].get(), - ancestor_boxes[rank].get(), - box_to_user_starts.get(), - box_to_user_lists.get()) - - local_tree[rank].user_source_ids = None - local_tree[rank].sorted_target_ids = None - - gen_local_tree_helper(tree, - src_boxes_mask[rank], - responsible_boxes_mask[rank], - local_tree[rank], - local_data[rank], - knls) - - if rank == 0: - # master process does not need to communicate with itself + # master process does not need to communicate with itself + if irank == 0: continue # {{{ Peel sources and targets off tree - local_tree[rank]._dimensions = local_tree[rank].dimensions + local_tree[irank]._dimensions = local_tree[irank].dimensions - local_tree[rank]._ntargets = local_tree[rank].ntargets - local_targets[rank] = local_tree[rank].targets - local_tree[rank].targets = None + local_tree[irank]._ntargets = local_tree[irank].ntargets + local_targets[irank] = local_tree[irank].targets + local_tree[irank].targets = None - local_tree[rank]._nsources = local_tree[rank].nsources - local_sources[rank] = local_tree[rank].sources - local_tree[rank].sources = None + local_tree[irank]._nsources = local_tree[irank].nsources + local_sources[irank] = local_tree[irank].sources + local_tree[irank].sources = None - local_target_radii[rank] = local_tree[rank].target_radii - local_tree[rank].target_radii = None + local_target_radii[irank] = local_tree[irank].target_radii + local_tree[irank].target_radii = None - local_tree[rank]._particle_dtype = tree.sources[0].dtype - local_tree[rank]._radii_dtype = tree.target_radii.dtype + local_tree[irank]._particle_dtype = tree.sources[0].dtype + local_tree[irank]._radii_dtype = tree.target_radii.dtype # }}} # Send the local tree skeleton without sources and targets tree_req.append(comm.isend( - local_tree[rank], dest=rank, tag=MPITags["DIST_TREE"])) + local_tree[irank], dest=irank, tag=MPITags["DIST_TREE"])) # Send the sources and targets particles_req.append(comm.Isend( - local_sources[rank], dest=rank, tag=MPITags["DIST_SOURCES"])) + local_sources[irank], dest=irank, tag=MPITags["DIST_SOURCES"])) particles_req.append(comm.Isend( - local_targets[rank], dest=rank, tag=MPITags["DIST_TARGETS"])) + local_targets[irank], dest=irank, tag=MPITags["DIST_TARGETS"])) if tree.targets_have_extent: particles_req.append(comm.Isend( - local_target_radii[rank], dest=rank, tag=MPITags["DIST_RADII"])) + local_target_radii[irank], dest=irank, tag=MPITags["DIST_RADII"]) + ) - # }}} + from boxtree.tools import MaskCompressorKernel + matcompr = MaskCompressorKernel(ctx) + (box_to_user_starts, box_to_user_lists, evt) = \ + matcompr(queue, box_mpole_is_used.transpose(), + list_dtype=np.int32) + + cl.wait_for_events([evt]) + del box_mpole_is_used + + box_to_user_starts = box_to_user_starts.get() + box_to_user_lists = box_to_user_lists.get() + + logger.debug("computing box_to_user: done") + + # }}} # Receive the local tree from root if current_rank == 0: @@ -773,6 +765,11 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): "max": box_target_bounding_box_max } + box_to_user_starts = comm.bcast(box_to_user_starts, root=0) + box_to_user_lists = comm.bcast(box_to_user_lists, root=0) + local_tree.box_to_user_starts = box_to_user_starts + local_tree.box_to_user_lists = box_to_user_lists + if current_rank == 0: logger.info("Distribute local tree in {} sec.".format( str(time.time() - start_time)) diff --git a/boxtree/partition.py b/boxtree/partition.py index c65f095..1a5b39c 100644 --- a/boxtree/partition.py +++ b/boxtree/partition.py @@ -30,6 +30,9 @@ THE SOFTWARE. def partition_work(traversal, total_rank, workload_weight): """ This function assigns responsible boxes of each process. + Each process is responsible for calculating the multiple expansions as well as + evaluating target potentials in *responsible_boxes*. + :arg traversal: The traversal object built on root containing all particles. :arg total_rank: The total number of processes. :arg workload_weight: Workload coefficients of various operations (e.g. direct @@ -312,7 +315,7 @@ class ResponsibleBoxesQuery(object): if self.tree.targets_have_extent: - # Add list 3 close + # Add list 3 close of responsible boxes if self.traversal.from_sep_close_smaller_starts is not None: self.add_interaction_list_boxes( self.target_boxes_dev, @@ -322,7 +325,7 @@ class ResponsibleBoxesQuery(object): src_boxes_mask ) - # Add list 4 close + # Add list 4 close of responsible boxes if self.traversal.from_sep_close_bigger_starts is not None: self.add_interaction_list_boxes( self.target_or_target_parent_boxes_dev, -- GitLab From b5d1c71794c7522f4f2eff926411a60703e12d2f Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 19 Jun 2018 16:15:52 +0800 Subject: [PATCH 106/260] Bug fix --- boxtree/partition.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/boxtree/partition.py b/boxtree/partition.py index 1a5b39c..4f1cdfb 100644 --- a/boxtree/partition.py +++ b/boxtree/partition.py @@ -213,16 +213,18 @@ class ResponsibleBoxesQuery(object): if self.tree.targets_have_extent: # list 3 close - self.from_sep_close_smaller_starts_dev = cl.array.to_device( - queue, traversal.from_sep_close_smaller_starts) - self.from_sep_close_smaller_lists_dev = cl.array.to_device( - queue, traversal.from_sep_close_smaller_lists) + if traversal.from_sep_close_smaller_starts is not None: + self.from_sep_close_smaller_starts_dev = cl.array.to_device( + queue, traversal.from_sep_close_smaller_starts) + self.from_sep_close_smaller_lists_dev = cl.array.to_device( + queue, traversal.from_sep_close_smaller_lists) # list 4 close - self.from_sep_close_bigger_starts_dev = cl.array.to_device( - queue, traversal.from_sep_close_bigger_starts) - self.from_sep_close_bigger_lists_dev = cl.array.to_device( - queue, traversal.from_sep_close_bigger_lists) + if traversal.from_sep_close_bigger_starts is not None: + self.from_sep_close_bigger_starts_dev = cl.array.to_device( + queue, traversal.from_sep_close_bigger_starts) + self.from_sep_close_bigger_lists_dev = cl.array.to_device( + queue, traversal.from_sep_close_bigger_lists) # helper kernel for ancestor box query self.mark_parent_knl = cl.elementwise.ElementwiseKernel( -- GitLab From 360191fbbcee703f8124146c3387b160554ff523 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 19 Jun 2018 17:10:01 +0800 Subject: [PATCH 107/260] Tweak interface so that partition work is outside local tree build --- boxtree/distributed.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 3f4b6f2..e1633b2 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -571,7 +571,7 @@ def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, local_data["tgt_box_mask"] = tgt_box_mask -def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): +def generate_local_tree(traversal, responsible_boxes_list, comm=MPI.COMM_WORLD): # Get MPI information current_rank = comm.Get_rank() @@ -602,14 +602,6 @@ def generate_local_tree(traversal, comm=MPI.COMM_WORLD, workload_weight=None): local_tree_builder = LocalTreeBuilder(tree) - if workload_weight is None: - workload_weight = WorkloadWeight( - direct=1, m2l=1, m2p=1, p2l=1, multipole=5) - - from boxtree.partition import partition_work - responsible_boxes_list = partition_work(traversal, total_rank, - workload_weight) - box_mpole_is_used = cl.array.empty( queue, (total_rank, tree.nboxes,), dtype=np.int8 ) @@ -1280,8 +1272,19 @@ class DistributedFMMInfo(object): well_sep_is_n_away = None well_sep_is_n_away = comm.bcast(well_sep_is_n_away, root=0) + if current_rank == 0: + from boxtree.partition import partition_work + workload_weight = WorkloadWeight( + direct=1, m2l=1, m2p=1, p2l=1, multipole=5 + ) + responsible_boxes_list = partition_work( + global_trav, comm.Get_size(), workload_weight + ) + else: + responsible_boxes_list = None + self.local_tree, self.local_data, self.box_bounding_box, _ = \ - generate_local_tree(self.global_trav) + generate_local_tree(self.global_trav, responsible_boxes_list) self.local_trav = generate_local_travs( self.local_tree, self.box_bounding_box, comm=comm, well_sep_is_n_away=well_sep_is_n_away) -- GitLab From 11bf5eea6ba3e36740cea105a454af6d26e4e9eb Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 20 Jun 2018 10:20:19 +0800 Subject: [PATCH 108/260] Add get_boxes_mask --- boxtree/distributed.py | 22 +++------------------- boxtree/partition.py | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 19 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index e1633b2..25f8688 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -618,25 +618,9 @@ def generate_local_tree(traversal, responsible_boxes_list, comm=MPI.COMM_WORLD): for irank in range(total_rank): - responsible_boxes_mask = np.zeros((tree.nboxes,), dtype=np.int8) - responsible_boxes_mask[responsible_boxes_list[irank]] = 1 - responsible_boxes_mask = cl.array.to_device( - queue, responsible_boxes_mask) - - # Calculate ancestors of responsible boxes - ancestor_boxes = responsible_box_query.ancestor_boxes_mask( - responsible_boxes_mask - ) - - # In order to evaluate, each rank needs sources in boxes in - # *src_boxes_mask* - src_boxes_mask = responsible_box_query.src_boxes_mask( - responsible_boxes_mask, ancestor_boxes - ) - - box_mpole_is_used[irank, :] = responsible_box_query.multipole_boxes_mask( - responsible_boxes_mask, ancestor_boxes - ) + (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, + box_mpole_is_used[irank]) = \ + responsible_box_query.get_boxes_mask(responsible_boxes_list[irank]) local_tree[irank], local_data[irank] = \ local_tree_builder.from_global_tree( diff --git a/boxtree/partition.py b/boxtree/partition.py index 4f1cdfb..f0485a9 100644 --- a/boxtree/partition.py +++ b/boxtree/partition.py @@ -380,3 +380,42 @@ class ResponsibleBoxesQuery(object): multipole_boxes_mask.finish() return multipole_boxes_mask + + def get_boxes_mask(self, responsible_boxes_list): + """ + Given a list of responsible boxes for a process, calculates the following + three masks: + + responsible_box_mask: Current process will evaluate target potentials and + multipole expansions in these boxes. Sources and targets in these boxes + are needed. + + ancestor_boxes_mask: The responsible boxes and the ancestor of the + responsible boxes. + + src_boxes_mask: Current process needs sources but not targets in these boxes. + + multipole_boxes_mask: Current process needs multipole expressions in these + boxes. + + :param responsible_boxes_list: A numpy array of responsible box indices. + + :returns: responsible_box_mask, ancestor_boxes_mask, src_boxes_mask and + multipole_boxes_mask, as described above. + """ + + responsible_boxes_mask = np.zeros((self.tree.nboxes,), dtype=np.int8) + responsible_boxes_mask[responsible_boxes_list] = 1 + responsible_boxes_mask = cl.array.to_device( + self.queue, responsible_boxes_mask) + + ancestor_boxes_mask = self.ancestor_boxes_mask(responsible_boxes_mask) + + src_boxes_mask = self.src_boxes_mask( + responsible_boxes_mask, ancestor_boxes_mask) + + multipole_boxes_mask = self.multipole_boxes_mask( + responsible_boxes_mask, ancestor_boxes_mask) + + return (responsible_boxes_mask, ancestor_boxes_mask, src_boxes_mask, + multipole_boxes_mask) -- GitLab From af15f0b7a925eff593eab1a20afa5fe2a80e9c31 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 20 Jun 2018 10:46:24 +0800 Subject: [PATCH 109/260] Make responsible_box_query an argument --- boxtree/distributed.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 25f8688..742d651 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -571,7 +571,8 @@ def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, local_data["tgt_box_mask"] = tgt_box_mask -def generate_local_tree(traversal, responsible_boxes_list, comm=MPI.COMM_WORLD): +def generate_local_tree(traversal, responsible_boxes_list, responsible_box_query, + comm=MPI.COMM_WORLD): # Get MPI information current_rank = comm.Get_rank() @@ -597,9 +598,6 @@ def generate_local_tree(traversal, responsible_boxes_list, comm=MPI.COMM_WORLD): # kernels for generating local trees knls = get_gen_local_tree_kernels(tree) - from boxtree.partition import ResponsibleBoxesQuery - responsible_box_query = ResponsibleBoxesQuery(queue, traversal) - local_tree_builder = LocalTreeBuilder(tree) box_mpole_is_used = cl.array.empty( @@ -1267,8 +1265,16 @@ class DistributedFMMInfo(object): else: responsible_boxes_list = None + if current_rank == 0: + from boxtree.partition import ResponsibleBoxesQuery + responsible_box_query = ResponsibleBoxesQuery(queue, global_trav) + else: + responsible_box_query = None + self.local_tree, self.local_data, self.box_bounding_box, _ = \ - generate_local_tree(self.global_trav, responsible_boxes_list) + generate_local_tree(self.global_trav, responsible_boxes_list, + responsible_box_query) + self.local_trav = generate_local_travs( self.local_tree, self.box_bounding_box, comm=comm, well_sep_is_n_away=well_sep_is_n_away) -- GitLab From 9383c536fb6cad279d2460a751971c3076d5b932 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 22 Jun 2018 16:55:03 +0800 Subject: [PATCH 110/260] Update documentation --- boxtree/partition.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/boxtree/partition.py b/boxtree/partition.py index f0485a9..f7dc353 100644 --- a/boxtree/partition.py +++ b/boxtree/partition.py @@ -266,8 +266,8 @@ class ResponsibleBoxesQuery(object): :param responsible_boxes_mask: A pyopencl.array.Array object of shape (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. :return: A pyopencl.array.Array object of shape (tree.nboxes,) whose ith - entry is 1 iff i is either a responsible box or an ancestor of the - responsible boxes specified by responsible_boxes_mask. + entry is 1 iff i is an ancestor of the responsible boxes specified by + responsible_boxes_mask. """ ancestor_boxes = cl.array.zeros( self.queue, (self.tree.nboxes,), dtype=np.int8) @@ -390,8 +390,7 @@ class ResponsibleBoxesQuery(object): multipole expansions in these boxes. Sources and targets in these boxes are needed. - ancestor_boxes_mask: The responsible boxes and the ancestor of the - responsible boxes. + ancestor_boxes_mask: The the ancestor of the responsible boxes. src_boxes_mask: Current process needs sources but not targets in these boxes. -- GitLab From abce6ff5dd616cfcd411e0dd4cdf8dcf21afa53e Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 25 Jun 2018 10:50:51 +0800 Subject: [PATCH 111/260] Use to_device API --- boxtree/distributed.py | 38 ++------------------------------------ 1 file changed, 2 insertions(+), 36 deletions(-) diff --git a/boxtree/distributed.py b/boxtree/distributed.py index 742d651..f1ae562 100644 --- a/boxtree/distributed.py +++ b/boxtree/distributed.py @@ -51,34 +51,6 @@ logger.info("Process %d of %d on %s with ctx %s." % ( ) -def tree_to_device(queue, tree, additional_fields_to_device=[]): - field_to_device = [ - "box_centers", "box_child_ids", "box_flags", "box_levels", - "box_parent_ids", "box_source_counts_cumul", - "box_source_counts_nonchild", "box_source_starts", - "box_target_counts_cumul", "box_target_counts_nonchild", - "box_target_starts", "level_start_box_nrs_dev", "sources", "targets", - ] + additional_fields_to_device - d_tree = tree.copy() - for field in field_to_device: - current_obj = d_tree.__getattribute__(field) - if current_obj.dtype == object: - new_obj = np.empty_like(current_obj) - for i in range(current_obj.shape[0]): - new_obj[i] = cl.array.to_device(queue, current_obj[i]) - d_tree.__setattr__(field, new_obj) - else: - d_tree.__setattr__( - field, cl.array.to_device(queue, current_obj)) - - if tree.sources_have_extent: - d_tree.source_radii = cl.array.to_device(queue, d_tree.source_radii) - if tree.targets_have_extent: - d_tree.target_radii = cl.array.to_device(queue, d_tree.target_radii) - - return d_tree - - class LocalTreeBuilder(object): def __init__(self, global_tree): @@ -149,12 +121,6 @@ class LocalTree(Tree): def ntargets(self): return self.targets[0].shape[0] - def to_device(self, queue): - additional_fields_to_device = ["responsible_boxes_list", "ancestor_mask", - "box_to_user_starts", "box_to_user_lists"] - - return tree_to_device(queue, self, additional_fields_to_device) - # {{{ distributed fmm wrangler @@ -436,7 +402,7 @@ def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, """ This helper function generates a copy of the tree but with subset of particles, and fetch the generated fields to *local_tree*. """ - d_tree = tree_to_device(queue, tree) + d_tree = tree.to_device(queue).with_queue(queue) nsources = tree.nsources # source particle mask @@ -759,7 +725,7 @@ def generate_local_travs( start_time = time.time() - d_tree = local_tree.to_device(queue) + d_tree = local_tree.to_device(queue).with_queue(queue) # Modify box flags for targets from boxtree import box_flags_enum -- GitLab From 5f56c77b349901f8450251bf8ba6615b830e0f65 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 29 Jun 2018 12:02:05 +0800 Subject: [PATCH 112/260] Refactor code, move distributed implementation into a submodule --- boxtree/distributed.py | 1258 ------------------------ boxtree/distributed/__init__.py | 122 +++ boxtree/distributed/calculation.py | 538 ++++++++++ boxtree/distributed/local_traversal.py | 123 +++ boxtree/distributed/local_tree.py | 720 ++++++++++++++ boxtree/{ => distributed}/partition.py | 10 +- test/test_distributed.py | 24 +- 7 files changed, 1522 insertions(+), 1273 deletions(-) delete mode 100644 boxtree/distributed.py create mode 100644 boxtree/distributed/__init__.py create mode 100644 boxtree/distributed/calculation.py create mode 100644 boxtree/distributed/local_traversal.py create mode 100644 boxtree/distributed/local_tree.py rename boxtree/{ => distributed}/partition.py (99%) diff --git a/boxtree/distributed.py b/boxtree/distributed.py deleted file mode 100644 index f1ae562..0000000 --- a/boxtree/distributed.py +++ /dev/null @@ -1,1258 +0,0 @@ -from __future__ import division -from mpi4py import MPI -import numpy as np -import loopy as lp -from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_1 # noqa: F401 -import pyopencl as cl -from mako.template import Template -from pyopencl.tools import dtype_to_ctype -from pyopencl.scan import GenericScanKernel -from pytools import memoize_method -from boxtree import Tree -from collections import namedtuple -from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler -import time - -__copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ - Copyright (C) 2017 Hao Gao" - -__license__ = """ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -""" - -import logging -logger = logging.getLogger(__name__) - -ctx = cl.create_some_context() -queue = cl.CommandQueue(ctx) - -# Log OpenCL context information -logger.info("Process %d of %d on %s with ctx %s." % ( - MPI.COMM_WORLD.Get_rank(), - MPI.COMM_WORLD.Get_size(), - MPI.Get_processor_name(), - queue.context.devices) -) - - -class LocalTreeBuilder(object): - - def __init__(self, global_tree): - self.global_tree = global_tree - self.knls = get_gen_local_tree_kernels(global_tree) - - def from_global_tree(self, responsible_boxes_list, responsible_boxes_mask, - src_boxes_mask, ancestor_mask): - - local_tree = self.global_tree.copy( - responsible_boxes_list=responsible_boxes_list, - ancestor_mask=ancestor_mask.get(), - box_to_user_starts=None, - box_to_user_lists=None, - _dimensions=None, - _ntargets=None, - _nsources=None, - _particle_dtype=None, - _radii_dtype=None - ) - - local_tree.user_source_ids = None - local_tree.sorted_target_ids = None - - local_data = { - "src_mask": None, "src_scan": None, "nsources": None, - "tgt_mask": None, "tgt_scan": None, "ntargets": None - } - - gen_local_tree_helper( - self.global_tree, - src_boxes_mask, - responsible_boxes_mask, - local_tree, - local_data, - self.knls - ) - - local_tree.__class__ = LocalTree - - return local_tree, local_data - - -class LocalTree(Tree): - """ - .. attribute:: box_to_user_starts - - ``box_id_t [nboxes + 1]`` - - .. attribute:: box_to_user_lists - - ``int32 [*]`` - - A :ref:`csr` array. For each box, the list of processes which own - targets that *use* the multipole expansion at this box, via either List - 3 or (possibly downward propagated from an ancestor) List 2. - """ - - @property - def nboxes(self): - return self.box_source_starts.shape[0] - - @property - def nsources(self): - return self.sources[0].shape[0] - - @property - def ntargets(self): - return self.targets[0].shape[0] - - -# {{{ distributed fmm wrangler - -class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): - - def __init__(self, tree, helmholtz_k, fmm_level_to_nterms=None): - super(DistributedFMMLibExpansionWrangler, self).__init__( - tree, helmholtz_k, fmm_level_to_nterms - ) - - def slice_mpoles(self, mpoles, slice_indices): - if len(slice_indices) == 0: - return np.empty((0,), dtype=mpoles.dtype) - - level_start_slice_indices = np.searchsorted( - slice_indices, self.tree.level_start_box_nrs) - mpoles_list = [] - - for ilevel in range(self.tree.nlevels): - start, stop = level_start_slice_indices[ilevel:ilevel+2] - if stop > start: - level_start_box_idx, mpoles_current_level = \ - self.multipole_expansions_view(mpoles, ilevel) - mpoles_list.append( - mpoles_current_level[ - slice_indices[start:stop] - level_start_box_idx - ].reshape(-1) - ) - - return np.concatenate(mpoles_list) - - def update_mpoles(self, mpoles, mpole_updates, slice_indices): - if len(slice_indices) == 0: - return - - level_start_slice_indices = np.searchsorted( - slice_indices, self.tree.level_start_box_nrs) - mpole_updates_start = 0 - - for ilevel in range(self.tree.nlevels): - start, stop = level_start_slice_indices[ilevel:ilevel+2] - if stop > start: - level_start_box_idx, mpoles_current_level = \ - self.multipole_expansions_view(mpoles, ilevel) - mpoles_shape = (stop - start,) + mpoles_current_level.shape[1:] - - from pytools import product - mpole_updates_end = mpole_updates_start + product(mpoles_shape) - - mpoles_current_level[ - slice_indices[start:stop] - level_start_box_idx - ] += mpole_updates[ - mpole_updates_start:mpole_updates_end - ].reshape(mpoles_shape) - - mpole_updates_start = mpole_updates_end - - def empty_box_in_subrange_mask(self): - return cl.array.empty(queue, self.tree.nboxes, dtype=np.int8) - - @memoize_method - def find_boxes_used_by_subrange_kernel(self): - knl = lp.make_kernel( - [ - "{[ibox]: 0 <= ibox < nboxes}", - "{[iuser]: iuser_start <= iuser < iuser_end}", - ], - """ - for ibox - <> iuser_start = box_to_user_starts[ibox] - <> iuser_end = box_to_user_starts[ibox + 1] - for iuser - <> useri = box_to_user_lists[iuser] - <> in_subrange = subrange_start <= useri and useri < subrange_end - if in_subrange - box_in_subrange[ibox] = 1 - end - end - end - """, - [ - lp.ValueArg("subrange_start, subrange_end", np.int32), - lp.GlobalArg("box_to_user_lists", shape=None), - "..." - ]) - knl = lp.split_iname(knl, "ibox", 16, outer_tag="g.0", inner_tag="l.0") - return knl - - def find_boxes_used_by_subrange(self, box_in_subrange, subrange, - box_to_user_starts, box_to_user_lists): - knl = self.find_boxes_used_by_subrange_kernel() - knl(queue, - subrange_start=subrange[0], - subrange_end=subrange[1], - box_to_user_starts=box_to_user_starts, - box_to_user_lists=box_to_user_lists, - box_in_subrange=box_in_subrange) - - box_in_subrange.finish() - -# }}} - - -MPITags = dict( - DIST_TREE=0, - DIST_SOURCES=1, - DIST_TARGETS=2, - DIST_RADII=3, - DIST_WEIGHT=4, - GATHER_POTENTIALS=5, - REDUCE_POTENTIALS=6, - REDUCE_INDICES=7 -) - -WorkloadWeight = namedtuple('Workload', ['direct', 'm2l', 'm2p', 'p2l', 'multipole']) - - -def dtype_to_mpi(dtype): - """ This function translates a numpy.dtype object into the corresponding type - used in mpi4py. - """ - if hasattr(MPI, '_typedict'): - mpi_type = MPI._typedict[np.dtype(dtype).char] - elif hasattr(MPI, '__TypeDict__'): - mpi_type = MPI.__TypeDict__[np.dtype(dtype).char] - else: - raise RuntimeError("There is no dictionary to translate from Numpy dtype to " - "MPI type") - return mpi_type - - -def get_gen_local_tree_kernels(tree): - particle_mask_knl = cl.elementwise.ElementwiseKernel( - queue.context, - arguments=Template(""" - __global char *responsible_boxes, - __global ${particle_id_t} *box_particle_starts, - __global ${particle_id_t} *box_particle_counts_nonchild, - __global ${particle_id_t} *particle_mask - """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(tree.particle_id_dtype) - ), - operation=Template(""" - if(responsible_boxes[i]) { - for(${particle_id_t} pid = box_particle_starts[i]; - pid < box_particle_starts[i] + box_particle_counts_nonchild[i]; - ++pid) { - particle_mask[pid] = 1; - } - } - """).render(particle_id_t=dtype_to_ctype(tree.particle_id_dtype)) - ) - - mask_scan_knl = GenericScanKernel( - queue.context, tree.particle_id_dtype, - arguments=Template(""" - __global ${mask_t} *ary, - __global ${mask_t} *scan - """, strict_undefined=True).render( - mask_t=dtype_to_ctype(tree.particle_id_dtype) - ), - input_expr="ary[i]", - scan_expr="a+b", neutral="0", - output_statement="scan[i + 1] = item;" - ) - - fetch_local_paticles_arguments = Template(""" - __global const ${mask_t} *particle_mask, - __global const ${mask_t} *particle_scan - % for dim in range(ndims): - , __global const ${coord_t} *particles_${dim} - % endfor - % for dim in range(ndims): - , __global ${coord_t} *local_particles_${dim} - % endfor - % if particles_have_extent: - , __global const ${coord_t} *particle_radii - , __global ${coord_t} *local_particle_radii - % endif - """, strict_undefined=True) - - fetch_local_particles_prg = Template(""" - if(particle_mask[i]) { - ${particle_id_t} des = particle_scan[i]; - % for dim in range(ndims): - local_particles_${dim}[des] = particles_${dim}[i]; - % endfor - % if particles_have_extent: - local_particle_radii[des] = particle_radii[i]; - % endif - } - """, strict_undefined=True) - - fetch_local_src_knl = cl.elementwise.ElementwiseKernel( - queue.context, - fetch_local_paticles_arguments.render( - mask_t=dtype_to_ctype(tree.particle_id_dtype), - coord_t=dtype_to_ctype(tree.coord_dtype), - ndims=tree.dimensions, - particles_have_extent=tree.sources_have_extent - ), - fetch_local_particles_prg.render( - particle_id_t=dtype_to_ctype(tree.particle_id_dtype), - ndims=tree.dimensions, - particles_have_extent=tree.sources_have_extent - ) - ) - - fetch_local_tgt_knl = cl.elementwise.ElementwiseKernel( - queue.context, - fetch_local_paticles_arguments.render( - mask_t=dtype_to_ctype(tree.particle_id_dtype), - coord_t=dtype_to_ctype(tree.coord_dtype), - ndims=tree.dimensions, - particles_have_extent=tree.targets_have_extent - ), - fetch_local_particles_prg.render( - particle_id_t=dtype_to_ctype(tree.particle_id_dtype), - ndims=tree.dimensions, - particles_have_extent=tree.targets_have_extent - ) - ) - - generate_box_particle_starts = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${particle_id_t} *old_starts, - __global ${particle_id_t} *particle_scan, - __global ${particle_id_t} *new_starts - """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(tree.particle_id_dtype) - ), - "new_starts[i] = particle_scan[old_starts[i]]", - name="generate_box_particle_starts" - ) - - generate_box_particle_counts_nonchild = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global char *res_boxes, - __global ${particle_id_t} *old_counts_nonchild, - __global ${particle_id_t} *new_counts_nonchild - """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(tree.particle_id_dtype) - ), - "if(res_boxes[i]) new_counts_nonchild[i] = old_counts_nonchild[i];" - ) - - generate_box_particle_counts_cumul = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${particle_id_t} *old_counts_cumul, - __global ${particle_id_t} *old_starts, - __global ${particle_id_t} *new_counts_cumul, - __global ${particle_id_t} *particle_scan - """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(tree.particle_id_dtype) - ), - """ - new_counts_cumul[i] = - particle_scan[old_starts[i] + old_counts_cumul[i]] - - particle_scan[old_starts[i]] - """ - ) - - return dict( - particle_mask_knl=particle_mask_knl, - mask_scan_knl=mask_scan_knl, - fetch_local_src_knl=fetch_local_src_knl, - fetch_local_tgt_knl=fetch_local_tgt_knl, - generate_box_particle_starts=generate_box_particle_starts, - generate_box_particle_counts_nonchild=generate_box_particle_counts_nonchild, - generate_box_particle_counts_cumul=generate_box_particle_counts_cumul - ) - - -def gen_local_tree_helper(tree, src_box_mask, tgt_box_mask, local_tree, - local_data, knls): - """ This helper function generates a copy of the tree but with subset of - particles, and fetch the generated fields to *local_tree*. - """ - d_tree = tree.to_device(queue).with_queue(queue) - nsources = tree.nsources - - # source particle mask - src_particle_mask = cl.array.zeros(queue, (nsources,), - dtype=tree.particle_id_dtype) - knls["particle_mask_knl"](src_box_mask, - d_tree.box_source_starts, - d_tree.box_source_counts_nonchild, - src_particle_mask) - - # scan of source particle mask - src_particle_scan = cl.array.empty(queue, (nsources + 1,), - dtype=tree.particle_id_dtype) - src_particle_scan[0] = 0 - knls["mask_scan_knl"](src_particle_mask, src_particle_scan) - - # local sources - local_nsources = src_particle_scan[-1].get(queue) - local_sources = cl.array.empty( - queue, (tree.dimensions, local_nsources), dtype=tree.coord_dtype) - local_sources_list = [local_sources[idim, :] for idim in range(tree.dimensions)] - - assert(tree.sources_have_extent is False) - - knls["fetch_local_src_knl"](src_particle_mask, src_particle_scan, - *d_tree.sources.tolist(), - *local_sources_list) - - # box_source_starts - local_box_source_starts = cl.array.empty(queue, (tree.nboxes,), - dtype=tree.particle_id_dtype) - knls["generate_box_particle_starts"](d_tree.box_source_starts, src_particle_scan, - local_box_source_starts) - - # box_source_counts_nonchild - local_box_source_counts_nonchild = cl.array.zeros( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - knls["generate_box_particle_counts_nonchild"](src_box_mask, - d_tree.box_source_counts_nonchild, - local_box_source_counts_nonchild) - - # box_source_counts_cumul - local_box_source_counts_cumul = cl.array.empty( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - knls["generate_box_particle_counts_cumul"](d_tree.box_source_counts_cumul, - d_tree.box_source_starts, - local_box_source_counts_cumul, - src_particle_scan) - - ntargets = tree.ntargets - # target particle mask - tgt_particle_mask = cl.array.zeros(queue, (ntargets,), - dtype=tree.particle_id_dtype) - knls["particle_mask_knl"](tgt_box_mask, - d_tree.box_target_starts, - d_tree.box_target_counts_nonchild, - tgt_particle_mask) - - # scan of target particle mask - tgt_particle_scan = cl.array.empty(queue, (ntargets + 1,), - dtype=tree.particle_id_dtype) - tgt_particle_scan[0] = 0 - knls["mask_scan_knl"](tgt_particle_mask, tgt_particle_scan) - - # local targets - local_ntargets = tgt_particle_scan[-1].get(queue) - - local_targets = cl.array.empty( - queue, (tree.dimensions, local_ntargets), dtype=tree.coord_dtype) - local_targets_list = [local_targets[idim, :] for idim in range(tree.dimensions)] - - if tree.targets_have_extent: - local_target_radii = cl.array.empty(queue, (local_ntargets,), - dtype=tree.coord_dtype) - knls["fetch_local_tgt_knl"](tgt_particle_mask, tgt_particle_scan, - *d_tree.targets.tolist(), - *local_targets_list, - d_tree.target_radii, local_target_radii) - else: - knls["fetch_local_tgt_knl"](tgt_particle_mask, tgt_particle_scan, - *d_tree.targets.tolist(), - *local_targets_list) - - # box_target_starts - local_box_target_starts = cl.array.empty(queue, (tree.nboxes,), - dtype=tree.particle_id_dtype) - knls["generate_box_particle_starts"](d_tree.box_target_starts, tgt_particle_scan, - local_box_target_starts) - - # box_target_counts_nonchild - local_box_target_counts_nonchild = cl.array.zeros( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - knls["generate_box_particle_counts_nonchild"](tgt_box_mask, - d_tree.box_target_counts_nonchild, - local_box_target_counts_nonchild) - - # box_target_counts_cumul - local_box_target_counts_cumul = cl.array.empty( - queue, (tree.nboxes,), dtype=tree.particle_id_dtype) - knls["generate_box_particle_counts_cumul"](d_tree.box_target_counts_cumul, - d_tree.box_target_starts, - local_box_target_counts_cumul, - tgt_particle_scan) - - # Fetch fields to local_tree - local_sources = local_sources.get(queue=queue) - local_tree.sources = local_sources - - local_targets = local_targets.get(queue=queue) - local_tree.targets = local_targets - - if tree.targets_have_extent: - local_tree.target_radii = local_target_radii.get(queue=queue) - local_tree.box_source_starts = local_box_source_starts.get(queue=queue) - local_tree.box_source_counts_nonchild = \ - local_box_source_counts_nonchild.get(queue=queue) - local_tree.box_source_counts_cumul = \ - local_box_source_counts_cumul.get(queue=queue) - local_tree.box_target_starts = local_box_target_starts.get(queue=queue) - local_tree.box_target_counts_nonchild = \ - local_box_target_counts_nonchild.get(queue=queue) - local_tree.box_target_counts_cumul = \ - local_box_target_counts_cumul.get(queue=queue) - - # Fetch fields to local_data - local_data["src_mask"] = src_particle_mask - local_data["src_scan"] = src_particle_scan - local_data["nsources"] = local_nsources - local_data["tgt_mask"] = tgt_particle_mask - local_data["tgt_scan"] = tgt_particle_scan - local_data["ntargets"] = local_ntargets - local_data["tgt_box_mask"] = tgt_box_mask - - -def generate_local_tree(traversal, responsible_boxes_list, responsible_box_query, - comm=MPI.COMM_WORLD): - - # Get MPI information - current_rank = comm.Get_rank() - total_rank = comm.Get_size() - - if current_rank == 0: - start_time = time.time() - - if current_rank == 0: - local_data = np.empty((total_rank,), dtype=object) - else: - local_data = None - - knls = None - box_to_user_starts = None - box_to_user_lists = None - - if current_rank == 0: - tree = traversal.tree - - # {{{ Partition the work - - # kernels for generating local trees - knls = get_gen_local_tree_kernels(tree) - - local_tree_builder = LocalTreeBuilder(tree) - - box_mpole_is_used = cl.array.empty( - queue, (total_rank, tree.nboxes,), dtype=np.int8 - ) - - # request objects for non-blocking communication - tree_req = [] - particles_req = [] - - # buffer holding communication data so that it is not garbage collected - local_tree = np.empty((total_rank,), dtype=object) - local_targets = np.empty((total_rank,), dtype=object) - local_sources = np.empty((total_rank,), dtype=object) - local_target_radii = np.empty((total_rank,), dtype=object) - - for irank in range(total_rank): - - (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, - box_mpole_is_used[irank]) = \ - responsible_box_query.get_boxes_mask(responsible_boxes_list[irank]) - - local_tree[irank], local_data[irank] = \ - local_tree_builder.from_global_tree( - responsible_boxes_list[irank], responsible_boxes_mask, - src_boxes_mask, ancestor_boxes - ) - - # master process does not need to communicate with itself - if irank == 0: - continue - - # {{{ Peel sources and targets off tree - - local_tree[irank]._dimensions = local_tree[irank].dimensions - - local_tree[irank]._ntargets = local_tree[irank].ntargets - local_targets[irank] = local_tree[irank].targets - local_tree[irank].targets = None - - local_tree[irank]._nsources = local_tree[irank].nsources - local_sources[irank] = local_tree[irank].sources - local_tree[irank].sources = None - - local_target_radii[irank] = local_tree[irank].target_radii - local_tree[irank].target_radii = None - - local_tree[irank]._particle_dtype = tree.sources[0].dtype - local_tree[irank]._radii_dtype = tree.target_radii.dtype - - # }}} - - # Send the local tree skeleton without sources and targets - tree_req.append(comm.isend( - local_tree[irank], dest=irank, tag=MPITags["DIST_TREE"])) - - # Send the sources and targets - particles_req.append(comm.Isend( - local_sources[irank], dest=irank, tag=MPITags["DIST_SOURCES"])) - - particles_req.append(comm.Isend( - local_targets[irank], dest=irank, tag=MPITags["DIST_TARGETS"])) - - if tree.targets_have_extent: - particles_req.append(comm.Isend( - local_target_radii[irank], dest=irank, tag=MPITags["DIST_RADII"]) - ) - - from boxtree.tools import MaskCompressorKernel - matcompr = MaskCompressorKernel(ctx) - (box_to_user_starts, box_to_user_lists, evt) = \ - matcompr(queue, box_mpole_is_used.transpose(), - list_dtype=np.int32) - - cl.wait_for_events([evt]) - del box_mpole_is_used - - box_to_user_starts = box_to_user_starts.get() - box_to_user_lists = box_to_user_lists.get() - - logger.debug("computing box_to_user: done") - - # }}} - - # Receive the local tree from root - if current_rank == 0: - MPI.Request.Waitall(tree_req) - local_tree = local_tree[0] - else: - local_tree = comm.recv(source=0, tag=MPITags["DIST_TREE"]) - - # Receive sources and targets - if current_rank == 0: - MPI.Request.Waitall(particles_req) - else: - reqs = [] - - local_tree.sources = np.empty( - (local_tree._dimensions, local_tree._nsources), - dtype=local_tree._particle_dtype - ) - reqs.append(comm.Irecv( - local_tree.sources, source=0, tag=MPITags["DIST_SOURCES"])) - - local_tree.targets = np.empty( - (local_tree._dimensions, local_tree._ntargets), - dtype=local_tree._particle_dtype - ) - reqs.append(comm.Irecv( - local_tree.targets, source=0, tag=MPITags["DIST_TARGETS"])) - - if local_tree.targets_have_extent: - local_tree.target_radii = np.empty( - (local_tree._ntargets,), - dtype=local_tree._radii_dtype - ) - reqs.append(comm.Irecv( - local_tree.target_radii, source=0, tag=MPITags["DIST_RADII"])) - - MPI.Request.Waitall(reqs) - - # Receive box extent - if current_rank == 0: - box_target_bounding_box_min = traversal.box_target_bounding_box_min - box_target_bounding_box_max = traversal.box_target_bounding_box_max - else: - box_target_bounding_box_min = np.empty( - (local_tree.dimensions, local_tree.aligned_nboxes), - dtype=local_tree.coord_dtype - ) - box_target_bounding_box_max = np.empty( - (local_tree.dimensions, local_tree.aligned_nboxes), - dtype=local_tree.coord_dtype - ) - comm.Bcast(box_target_bounding_box_min, root=0) - comm.Bcast(box_target_bounding_box_max, root=0) - box_bounding_box = { - "min": box_target_bounding_box_min, - "max": box_target_bounding_box_max - } - - box_to_user_starts = comm.bcast(box_to_user_starts, root=0) - box_to_user_lists = comm.bcast(box_to_user_lists, root=0) - local_tree.box_to_user_starts = box_to_user_starts - local_tree.box_to_user_lists = box_to_user_lists - - if current_rank == 0: - logger.info("Distribute local tree in {} sec.".format( - str(time.time() - start_time)) - ) - - return local_tree, local_data, box_bounding_box, knls - - -def generate_local_travs( - local_tree, box_bounding_box=None, comm=MPI.COMM_WORLD, - well_sep_is_n_away=1, from_sep_smaller_crit=None, - merge_close_lists=False): - - start_time = time.time() - - d_tree = local_tree.to_device(queue).with_queue(queue) - - # Modify box flags for targets - from boxtree import box_flags_enum - box_flag_t = dtype_to_ctype(box_flags_enum.dtype) - modify_target_flags_knl = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${particle_id_t} *box_target_counts_nonchild, - __global ${particle_id_t} *box_target_counts_cumul, - __global ${box_flag_t} *box_flags - """).render(particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), - box_flag_t=box_flag_t), - Template(""" - box_flags[i] &= (~${HAS_OWN_TARGETS}); - box_flags[i] &= (~${HAS_CHILD_TARGETS}); - if(box_target_counts_nonchild[i]) box_flags[i] |= ${HAS_OWN_TARGETS}; - if(box_target_counts_nonchild[i] < box_target_counts_cumul[i]) - box_flags[i] |= ${HAS_CHILD_TARGETS}; - """).render(HAS_OWN_TARGETS=("(" + box_flag_t + ") " + - str(box_flags_enum.HAS_OWN_TARGETS)), - HAS_CHILD_TARGETS=("(" + box_flag_t + ") " + - str(box_flags_enum.HAS_CHILD_TARGETS))) - ) - - modify_target_flags_knl(d_tree.box_target_counts_nonchild, - d_tree.box_target_counts_cumul, - d_tree.box_flags) - - # Generate local source flags - local_box_flags = d_tree.box_flags & 250 - modify_own_sources_knl = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${box_id_t} *responsible_box_list, - __global ${box_flag_t} *box_flags - """).render(box_id_t=dtype_to_ctype(local_tree.box_id_dtype), - box_flag_t=box_flag_t), - Template(r""" - box_flags[responsible_box_list[i]] |= ${HAS_OWN_SOURCES}; - """).render(HAS_OWN_SOURCES=("(" + box_flag_t + ") " + - str(box_flags_enum.HAS_OWN_SOURCES))) - ) - - modify_child_sources_knl = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global char *ancestor_box_mask, - __global ${box_flag_t} *box_flags - """).render(box_flag_t=box_flag_t), - Template(""" - if(ancestor_box_mask[i]) box_flags[i] |= ${HAS_CHILD_SOURCES}; - """).render(HAS_CHILD_SOURCES=("(" + box_flag_t + ") " + - str(box_flags_enum.HAS_CHILD_SOURCES))) - ) - - modify_own_sources_knl(d_tree.responsible_boxes_list, local_box_flags) - modify_child_sources_knl(d_tree.ancestor_mask, local_box_flags) - - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder( - queue.context, - well_sep_is_n_away=well_sep_is_n_away, - from_sep_smaller_crit=from_sep_smaller_crit - ) - - d_local_trav, _ = tg( - queue, d_tree, debug=True, - box_bounding_box=box_bounding_box, - local_box_flags=local_box_flags - ) - - if merge_close_lists and d_tree.targets_have_extent: - d_local_trav = d_local_trav.merge_close_lists(queue) - - local_trav = d_local_trav.get(queue=queue) - - logger.info("Generate local traversal in {} sec.".format( - str(time.time() - start_time)) - ) - - return local_trav - - -# {{{ communicate mpoles - -def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): - """Based on Algorithm 3: Reduce and Scatter in [1]. - - The main idea is to mimic a allreduce as done on a hypercube network, but to - decrease the bandwidth cost by sending only information that is relevant to - the processes receiving the message. - - .. [1] Lashuk, Ilya, Aparna Chandramowlishwaran, Harper Langston, - Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, Lexing - Ying, Denis Zorin, and George Biros. “A massively parallel adaptive fast - multipole method on heterogeneous architectures." Communications of the - ACM 55, no. 5 (2012): 101-109. - """ - rank = comm.Get_rank() - nprocs = comm.Get_size() - - stats = {} - - from time import time - t_start = time() - logger.debug("communicate multipoles: start") - - # contributing_boxes: - # - # A mask of the the set of boxes that the current process contributes - # to. This process contributes to a box when: - # - # (a) this process owns sources that contribute to the multipole expansion - # in the box (via the upward pass) or - # (b) this process has received a portion of the multipole expansion in this - # box from another process. - # - # Initially, this set consists of the boxes satisfying condition (a), which - # are precisely the boxes owned by this process and their ancestors. - contributing_boxes = trav.tree.ancestor_mask.copy() - contributing_boxes[trav.tree.responsible_boxes_list] = 1 - - from boxtree.tools import AllReduceCommPattern - comm_pattern = AllReduceCommPattern(rank, nprocs) - - # Temporary buffers for receiving data - mpole_exps_buf = np.empty(mpole_exps.shape, dtype=mpole_exps.dtype) - boxes_list_buf = np.empty(trav.tree.nboxes, dtype=trav.tree.box_id_dtype) - - # Temporary buffer for holding the mask - box_in_subrange = wrangler.empty_box_in_subrange_mask() - - stats["bytes_sent_by_stage"] = [] - stats["bytes_recvd_by_stage"] = [] - - while not comm_pattern.done(): - send_requests = [] - - # Send data to other processors. - if comm_pattern.sinks(): - # Compute the subset of boxes to be sent. - message_subrange = comm_pattern.messages() - - box_in_subrange.fill(0) - - wrangler.find_boxes_used_by_subrange( - box_in_subrange, message_subrange, - trav.tree.box_to_user_starts, trav.tree.box_to_user_lists) - - box_in_subrange_host = ( - box_in_subrange.map_to_host(flags=cl.map_flags.READ)) - - with box_in_subrange_host.data: - relevant_boxes_list = ( - np.nonzero(box_in_subrange_host & contributing_boxes) - [0] - .astype(trav.tree.box_id_dtype)) - - del box_in_subrange_host - - relevant_mpole_exps = wrangler.slice_mpoles(mpole_exps, - relevant_boxes_list) - - # Send the box subset to the other processors. - for sink in comm_pattern.sinks(): - req = comm.Isend(relevant_mpole_exps, dest=sink, - tag=MPITags["REDUCE_POTENTIALS"]) - send_requests.append(req) - - req = comm.Isend(relevant_boxes_list, dest=sink, - tag=MPITags["REDUCE_INDICES"]) - send_requests.append(req) - - # Receive data from other processors. - for source in comm_pattern.sources(): - comm.Recv(mpole_exps_buf, source=source, - tag=MPITags["REDUCE_POTENTIALS"]) - - status = MPI.Status() - comm.Recv(boxes_list_buf, source=source, tag=MPITags["REDUCE_INDICES"], - status=status) - nboxes = status.Get_count() // boxes_list_buf.dtype.itemsize - - # Update data structures. - wrangler.update_mpoles(mpole_exps, mpole_exps_buf, - boxes_list_buf[:nboxes]) - - contributing_boxes[boxes_list_buf[:nboxes]] = 1 - - for req in send_requests: - req.wait() - - comm_pattern.advance() - - stats["total_time"] = time() - t_start - logger.info("communicate multipoles: done in %.2f s" % stats["total_time"]) - - if return_stats: - return stats - -# }}} - - -def get_gen_local_weights_helper(queue, particle_dtype, weight_dtype): - gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( - queue.context, - arguments=Template(""" - __global ${weight_t} *src_weights, - __global ${particle_id_t} *particle_mask, - __global ${particle_id_t} *particle_scan, - __global ${weight_t} *local_weights - """, strict_undefined=True).render( - weight_t=dtype_to_ctype(weight_dtype), - particle_id_t=dtype_to_ctype(particle_dtype) - ), - operation=""" - if(particle_mask[i]) { - local_weights[particle_scan[i]] = src_weights[i]; - } - """ - ) - - def gen_local_weights(global_weights, source_mask, source_scan): - local_nsources = source_scan[-1].get(queue) - local_weights = cl.array.empty(queue, (local_nsources,), - dtype=weight_dtype) - gen_local_source_weights_knl(global_weights, source_mask, source_scan, - local_weights) - return local_weights.get(queue) - - return gen_local_weights - - -def distribute_source_weights(source_weights, global_tree, local_data, - comm=MPI.COMM_WORLD): - """ - source_weights: source weights in tree order - global_tree: complete tree structure on root, None otherwise. - local_data: returned from *generate_local_tree* - """ - current_rank = comm.Get_rank() - total_rank = comm.Get_size() - - if current_rank == 0: - weight_req = np.empty((total_rank,), dtype=object) - local_src_weights = np.empty((total_rank,), dtype=object) - - # Generate local_weights - source_weights = cl.array.to_device(queue, source_weights) - gen_local_weights_helper = get_gen_local_weights_helper( - queue, global_tree.particle_id_dtype, source_weights.dtype) - for rank in range(total_rank): - local_src_weights[rank] = gen_local_weights_helper( - source_weights, - local_data[rank]["src_mask"], - local_data[rank]["src_scan"] - ) - weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, - tag=MPITags["DIST_WEIGHT"]) - - for rank in range(1, total_rank): - weight_req[rank].wait() - local_src_weights = local_src_weights[0] - else: - local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) - - return local_src_weights - - -def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, - local_data, comm=MPI.COMM_WORLD, - _communicate_mpoles_via_allreduce=False): - - # Get MPI information - current_rank = comm.Get_rank() - total_rank = comm.Get_size() - - if current_rank == 0: - start_time = time.time() - - # {{{ Distribute source weights - - if current_rank == 0: - global_tree = global_wrangler.tree - # Convert src_weights to tree order - source_weights = source_weights[global_tree.user_source_ids] - else: - global_tree = None - - local_src_weights = distribute_source_weights( - source_weights, global_tree, local_data, comm=comm) - - # }}} - - # {{{ "Step 2.1:" Construct local multipoles - - logger.debug("construct local multipoles") - mpole_exps = wrangler.form_multipoles( - local_trav.level_start_source_box_nrs, - local_trav.source_boxes, - local_src_weights) - - # }}} - - # {{{ "Step 2.2:" Propagate multipoles upward - - logger.debug("propagate multipoles upward") - wrangler.coarsen_multipoles( - local_trav.level_start_source_parent_box_nrs, - local_trav.source_parent_boxes, - mpole_exps) - - # mpole_exps is called Phi in [1] - - # }}} - - # {{{ Communicate mpoles - - if _communicate_mpoles_via_allreduce: - mpole_exps_all = np.zeros_like(mpole_exps) - comm.Allreduce(mpole_exps, mpole_exps_all) - mpole_exps = mpole_exps_all - else: - communicate_mpoles(wrangler, comm, local_trav, mpole_exps) - - # }}} - - # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") - - logger.debug("direct evaluation from neighbor source boxes ('list 1')") - potentials = wrangler.eval_direct( - local_trav.target_boxes, - local_trav.neighbor_source_boxes_starts, - local_trav.neighbor_source_boxes_lists, - local_src_weights) - - # these potentials are called alpha in [1] - - # }}} - - # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local - - logger.debug("translate separated siblings' ('list 2') mpoles to local") - local_exps = wrangler.multipole_to_local( - local_trav.level_start_target_or_target_parent_box_nrs, - local_trav.target_or_target_parent_boxes, - local_trav.from_sep_siblings_starts, - local_trav.from_sep_siblings_lists, - mpole_exps) - - # local_exps represents both Gamma and Delta in [1] - - # }}} - - # {{{ "Stage 5:" evaluate sep. smaller mpoles ("list 3") at particles - - logger.debug("evaluate sep. smaller mpoles at particles ('list 3 far')") - - # (the point of aiming this stage at particles is specifically to keep its - # contribution *out* of the downward-propagating local expansions) - - potentials = potentials + wrangler.eval_multipoles( - local_trav.target_boxes_sep_smaller_by_source_level, - local_trav.from_sep_smaller_by_level, - mpole_exps) - - # these potentials are called beta in [1] - - if local_trav.from_sep_close_smaller_starts is not None: - logger.debug("evaluate separated close smaller interactions directly " - "('list 3 close')") - potentials = potentials + wrangler.eval_direct( - local_trav.target_boxes, - local_trav.from_sep_close_smaller_starts, - local_trav.from_sep_close_smaller_lists, - local_src_weights) - - # }}} - - # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") - - logger.debug("form locals for separated bigger source boxes ('list 4 far')") - - local_exps = local_exps + wrangler.form_locals( - local_trav.level_start_target_or_target_parent_box_nrs, - local_trav.target_or_target_parent_boxes, - local_trav.from_sep_bigger_starts, - local_trav.from_sep_bigger_lists, - local_src_weights) - - if local_trav.from_sep_close_bigger_starts is not None: - logger.debug("evaluate separated close bigger interactions directly " - "('list 4 close')") - - potentials = potentials + wrangler.eval_direct( - local_trav.target_or_target_parent_boxes, - local_trav.from_sep_close_bigger_starts, - local_trav.from_sep_close_bigger_lists, - local_src_weights) - - # }}} - - # {{{ "Stage 7:" propagate local_exps downward - - logger.debug("propagate local_exps downward") - - wrangler.refine_locals( - local_trav.level_start_target_or_target_parent_box_nrs, - local_trav.target_or_target_parent_boxes, - local_exps) - - # }}} - - # {{{ "Stage 8:" evaluate locals - - logger.debug("evaluate locals") - potentials = potentials + wrangler.eval_locals( - local_trav.level_start_target_box_nrs, - local_trav.target_boxes, - local_exps) - - # }}} - - potentials_mpi_type = dtype_to_mpi(potentials.dtype) - if current_rank == 0: - potentials_all_ranks = np.empty((total_rank,), dtype=object) - potentials_all_ranks[0] = potentials - for i in range(1, total_rank): - potentials_all_ranks[i] = np.empty( - (local_data[i]["ntargets"],), dtype=potentials.dtype) - comm.Recv([potentials_all_ranks[i], potentials_mpi_type], - source=i, tag=MPITags["GATHER_POTENTIALS"]) - else: - comm.Send([potentials, potentials_mpi_type], - dest=0, tag=MPITags["GATHER_POTENTIALS"]) - - if current_rank == 0: - d_potentials = cl.array.empty(queue, (global_wrangler.tree.ntargets,), - dtype=potentials.dtype) - fill_potentials_knl = cl.elementwise.ElementwiseKernel( - ctx, - Template(""" - __global ${particle_id_t} *particle_mask, - __global ${particle_id_t} *particle_scan, - __global ${potential_t} *local_potentials, - __global ${potential_t} *potentials - """).render( - particle_id_t=dtype_to_ctype(global_wrangler.tree.particle_id_dtype), - potential_t=dtype_to_ctype(potentials.dtype)), - r""" - if(particle_mask[i]) { - potentials[i] = local_potentials[particle_scan[i]]; - } - """ - ) - - for i in range(total_rank): - local_potentials = cl.array.to_device(queue, potentials_all_ranks[i]) - fill_potentials_knl( - local_data[i]["tgt_mask"], local_data[i]["tgt_scan"], - local_potentials, d_potentials) - - potentials = d_potentials.get() - - logger.debug("reorder potentials") - result = global_wrangler.reorder_potentials(potentials) - - logger.debug("finalize potentials") - result = global_wrangler.finalize_potentials(result) - - logger.info("Distributed FMM evaluation completes in {} sec.".format( - str(time.time() - start_time) - )) - - return result - - -class DistributedFMMInfo(object): - - def __init__(self, global_trav, distributed_expansion_wrangler_factory, - comm=MPI.COMM_WORLD): - self.global_trav = global_trav - self.distributed_expansion_wrangler_factory = \ - distributed_expansion_wrangler_factory - - self.comm = comm - current_rank = comm.Get_rank() - - if current_rank == 0: - well_sep_is_n_away = global_trav.well_sep_is_n_away - else: - well_sep_is_n_away = None - well_sep_is_n_away = comm.bcast(well_sep_is_n_away, root=0) - - if current_rank == 0: - from boxtree.partition import partition_work - workload_weight = WorkloadWeight( - direct=1, m2l=1, m2p=1, p2l=1, multipole=5 - ) - responsible_boxes_list = partition_work( - global_trav, comm.Get_size(), workload_weight - ) - else: - responsible_boxes_list = None - - if current_rank == 0: - from boxtree.partition import ResponsibleBoxesQuery - responsible_box_query = ResponsibleBoxesQuery(queue, global_trav) - else: - responsible_box_query = None - - self.local_tree, self.local_data, self.box_bounding_box, _ = \ - generate_local_tree(self.global_trav, responsible_boxes_list, - responsible_box_query) - - self.local_trav = generate_local_travs( - self.local_tree, self.box_bounding_box, comm=comm, - well_sep_is_n_away=well_sep_is_n_away) - self.local_wrangler = self.distributed_expansion_wrangler_factory( - self.local_tree) - if current_rank == 0: - self.global_wrangler = self.distributed_expansion_wrangler_factory( - self.global_trav.tree) - else: - self.global_wrangler = None - - def drive_dfmm(self, source_weights): - return calculate_pot( - self.local_wrangler, self.global_wrangler, self.local_trav, - source_weights, self.local_data) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py new file mode 100644 index 0000000..8efa5e5 --- /dev/null +++ b/boxtree/distributed/__init__.py @@ -0,0 +1,122 @@ +from __future__ import division + +__copyright__ = "Copyright (C) 2013 Andreas Kloeckner \ + Copyright (C) 2018 Hao Gao" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from mpi4py import MPI +from collections import namedtuple +import numpy as np + +MPITags = dict( + DIST_TREE=0, + DIST_SOURCES=1, + DIST_TARGETS=2, + DIST_RADII=3, + DIST_WEIGHT=4, + GATHER_POTENTIALS=5, + REDUCE_POTENTIALS=6, + REDUCE_INDICES=7 +) + +WorkloadWeight = namedtuple( + 'Workload', + ['direct', 'm2l', 'm2p', 'p2l', 'multipole'] +) + + +def dtype_to_mpi(dtype): + """ This function translates a numpy.dtype object into the corresponding type + used in mpi4py. + """ + if hasattr(MPI, '_typedict'): + mpi_type = MPI._typedict[np.dtype(dtype).char] + elif hasattr(MPI, '__TypeDict__'): + mpi_type = MPI.__TypeDict__[np.dtype(dtype).char] + else: + raise RuntimeError("There is no dictionary to translate from Numpy dtype to " + "MPI type") + return mpi_type + + +class DistributedFMMInfo(object): + + def __init__(self, queue, global_trav, distributed_expansion_wrangler_factory, + comm=MPI.COMM_WORLD): + + self.queue = queue + + self.global_trav = global_trav + self.distributed_expansion_wrangler_factory = \ + distributed_expansion_wrangler_factory + + self.comm = comm + current_rank = comm.Get_rank() + + if current_rank == 0: + well_sep_is_n_away = global_trav.well_sep_is_n_away + else: + well_sep_is_n_away = None + + well_sep_is_n_away = comm.bcast(well_sep_is_n_away, root=0) + + if current_rank == 0: + from boxtree.distributed.partition import partition_work + workload_weight = WorkloadWeight( + direct=1, m2l=1, m2p=1, p2l=1, multipole=5 + ) + responsible_boxes_list = partition_work( + global_trav, comm.Get_size(), workload_weight + ) + else: + responsible_boxes_list = None + + if current_rank == 0: + from boxtree.distributed.partition import ResponsibleBoxesQuery + responsible_box_query = ResponsibleBoxesQuery(queue, global_trav) + else: + responsible_box_query = None + + from boxtree.distributed.local_tree import generate_local_tree + self.local_tree, self.local_data, self.box_bounding_box = \ + generate_local_tree(queue, self.global_trav, responsible_boxes_list, + responsible_box_query) + + from boxtree.distributed.local_traversal import generate_local_travs + self.local_trav = generate_local_travs( + queue, self.local_tree, self.box_bounding_box, + well_sep_is_n_away=well_sep_is_n_away) + + self.local_wrangler = self.distributed_expansion_wrangler_factory( + self.local_tree) + + if current_rank == 0: + self.global_wrangler = self.distributed_expansion_wrangler_factory( + self.global_trav.tree) + else: + self.global_wrangler = None + + def drive_dfmm(self, source_weights): + from boxtree.distributed.calculation import calculate_pot + return calculate_pot( + self.queue, self.local_wrangler, self.global_wrangler, self.local_trav, + source_weights, self.local_data) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py new file mode 100644 index 0000000..e45cd19 --- /dev/null +++ b/boxtree/distributed/calculation.py @@ -0,0 +1,538 @@ +from __future__ import division + +__copyright__ = "Copyright (C) 2013 Andreas Kloeckner \ + Copyright (C) 2018 Hao Gao" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import numpy as np +import pyopencl as cl +from boxtree.distributed import MPITags +from mpi4py import MPI +from mako.template import Template +from pyopencl.tools import dtype_to_ctype +import time +from boxtree.distributed import dtype_to_mpi +from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +from pytools import memoize_method +import loopy as lp +from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_1 # noqa: F401 + +import logging +logger = logging.getLogger(__name__) + + +# {{{ distributed fmm wrangler + +class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): + + def __init__(self, queue, tree, helmholtz_k, fmm_level_to_nterms=None): + super(DistributedFMMLibExpansionWrangler, self).__init__( + tree, helmholtz_k, fmm_level_to_nterms + ) + + self.queue = queue + + def slice_mpoles(self, mpoles, slice_indices): + if len(slice_indices) == 0: + return np.empty((0,), dtype=mpoles.dtype) + + level_start_slice_indices = np.searchsorted( + slice_indices, self.tree.level_start_box_nrs) + mpoles_list = [] + + for ilevel in range(self.tree.nlevels): + start, stop = level_start_slice_indices[ilevel:ilevel+2] + if stop > start: + level_start_box_idx, mpoles_current_level = \ + self.multipole_expansions_view(mpoles, ilevel) + mpoles_list.append( + mpoles_current_level[ + slice_indices[start:stop] - level_start_box_idx + ].reshape(-1) + ) + + return np.concatenate(mpoles_list) + + def update_mpoles(self, mpoles, mpole_updates, slice_indices): + if len(slice_indices) == 0: + return + + level_start_slice_indices = np.searchsorted( + slice_indices, self.tree.level_start_box_nrs) + mpole_updates_start = 0 + + for ilevel in range(self.tree.nlevels): + start, stop = level_start_slice_indices[ilevel:ilevel+2] + if stop > start: + level_start_box_idx, mpoles_current_level = \ + self.multipole_expansions_view(mpoles, ilevel) + mpoles_shape = (stop - start,) + mpoles_current_level.shape[1:] + + from pytools import product + mpole_updates_end = mpole_updates_start + product(mpoles_shape) + + mpoles_current_level[ + slice_indices[start:stop] - level_start_box_idx + ] += mpole_updates[ + mpole_updates_start:mpole_updates_end + ].reshape(mpoles_shape) + + mpole_updates_start = mpole_updates_end + + def empty_box_in_subrange_mask(self): + return cl.array.empty(self.queue, self.tree.nboxes, dtype=np.int8) + + @memoize_method + def find_boxes_used_by_subrange_kernel(self): + knl = lp.make_kernel( + [ + "{[ibox]: 0 <= ibox < nboxes}", + "{[iuser]: iuser_start <= iuser < iuser_end}", + ], + """ + for ibox + <> iuser_start = box_to_user_starts[ibox] + <> iuser_end = box_to_user_starts[ibox + 1] + for iuser + <> useri = box_to_user_lists[iuser] + <> in_subrange = subrange_start <= useri and useri < subrange_end + if in_subrange + box_in_subrange[ibox] = 1 + end + end + end + """, + [ + lp.ValueArg("subrange_start, subrange_end", np.int32), + lp.GlobalArg("box_to_user_lists", shape=None), + "..." + ]) + knl = lp.split_iname(knl, "ibox", 16, outer_tag="g.0", inner_tag="l.0") + return knl + + def find_boxes_used_by_subrange(self, box_in_subrange, subrange, + box_to_user_starts, box_to_user_lists): + knl = self.find_boxes_used_by_subrange_kernel() + knl(self.queue, + subrange_start=subrange[0], + subrange_end=subrange[1], + box_to_user_starts=box_to_user_starts, + box_to_user_lists=box_to_user_lists, + box_in_subrange=box_in_subrange) + + box_in_subrange.finish() + +# }}} + + +# {{{ communicate mpoles + +def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): + """Based on Algorithm 3: Reduce and Scatter in [1]. + + The main idea is to mimic a allreduce as done on a hypercube network, but to + decrease the bandwidth cost by sending only information that is relevant to + the processes receiving the message. + + .. [1] Lashuk, Ilya, Aparna Chandramowlishwaran, Harper Langston, + Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, Lexing + Ying, Denis Zorin, and George Biros. “A massively parallel adaptive fast + multipole method on heterogeneous architectures." Communications of the + ACM 55, no. 5 (2012): 101-109. + """ + rank = comm.Get_rank() + nprocs = comm.Get_size() + + stats = {} + + from time import time + t_start = time() + logger.debug("communicate multipoles: start") + + # contributing_boxes: + # + # A mask of the the set of boxes that the current process contributes + # to. This process contributes to a box when: + # + # (a) this process owns sources that contribute to the multipole expansion + # in the box (via the upward pass) or + # (b) this process has received a portion of the multipole expansion in this + # box from another process. + # + # Initially, this set consists of the boxes satisfying condition (a), which + # are precisely the boxes owned by this process and their ancestors. + contributing_boxes = trav.tree.ancestor_mask.copy() + contributing_boxes[trav.tree.responsible_boxes_list] = 1 + + from boxtree.tools import AllReduceCommPattern + comm_pattern = AllReduceCommPattern(rank, nprocs) + + # Temporary buffers for receiving data + mpole_exps_buf = np.empty(mpole_exps.shape, dtype=mpole_exps.dtype) + boxes_list_buf = np.empty(trav.tree.nboxes, dtype=trav.tree.box_id_dtype) + + # Temporary buffer for holding the mask + box_in_subrange = wrangler.empty_box_in_subrange_mask() + + stats["bytes_sent_by_stage"] = [] + stats["bytes_recvd_by_stage"] = [] + + while not comm_pattern.done(): + send_requests = [] + + # Send data to other processors. + if comm_pattern.sinks(): + # Compute the subset of boxes to be sent. + message_subrange = comm_pattern.messages() + + box_in_subrange.fill(0) + + wrangler.find_boxes_used_by_subrange( + box_in_subrange, message_subrange, + trav.tree.box_to_user_starts, trav.tree.box_to_user_lists) + + box_in_subrange_host = ( + box_in_subrange.map_to_host(flags=cl.map_flags.READ)) + + with box_in_subrange_host.data: + relevant_boxes_list = ( + np.nonzero(box_in_subrange_host & contributing_boxes) + [0] + .astype(trav.tree.box_id_dtype)) + + del box_in_subrange_host + + relevant_mpole_exps = wrangler.slice_mpoles(mpole_exps, + relevant_boxes_list) + + # Send the box subset to the other processors. + for sink in comm_pattern.sinks(): + req = comm.Isend(relevant_mpole_exps, dest=sink, + tag=MPITags["REDUCE_POTENTIALS"]) + send_requests.append(req) + + req = comm.Isend(relevant_boxes_list, dest=sink, + tag=MPITags["REDUCE_INDICES"]) + send_requests.append(req) + + # Receive data from other processors. + for source in comm_pattern.sources(): + comm.Recv(mpole_exps_buf, source=source, + tag=MPITags["REDUCE_POTENTIALS"]) + + status = MPI.Status() + comm.Recv(boxes_list_buf, source=source, tag=MPITags["REDUCE_INDICES"], + status=status) + nboxes = status.Get_count() // boxes_list_buf.dtype.itemsize + + # Update data structures. + wrangler.update_mpoles(mpole_exps, mpole_exps_buf, + boxes_list_buf[:nboxes]) + + contributing_boxes[boxes_list_buf[:nboxes]] = 1 + + for req in send_requests: + req.wait() + + comm_pattern.advance() + + stats["total_time"] = time() - t_start + logger.info("communicate multipoles: done in %.2f s" % stats["total_time"]) + + if return_stats: + return stats + +# }}} + + +def get_gen_local_weights_helper(queue, particle_dtype, weight_dtype): + gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( + queue.context, + arguments=Template(""" + __global ${weight_t} *src_weights, + __global ${particle_id_t} *particle_mask, + __global ${particle_id_t} *particle_scan, + __global ${weight_t} *local_weights + """, strict_undefined=True).render( + weight_t=dtype_to_ctype(weight_dtype), + particle_id_t=dtype_to_ctype(particle_dtype) + ), + operation=""" + if(particle_mask[i]) { + local_weights[particle_scan[i]] = src_weights[i]; + } + """ + ) + + def gen_local_weights(global_weights, source_mask, source_scan): + local_nsources = source_scan[-1].get(queue) + local_weights = cl.array.empty(queue, (local_nsources,), + dtype=weight_dtype) + gen_local_source_weights_knl(global_weights, source_mask, source_scan, + local_weights) + return local_weights.get(queue) + + return gen_local_weights + + +def distribute_source_weights(queue, source_weights, global_tree, local_data, + comm=MPI.COMM_WORLD): + """ + source_weights: source weights in tree order + global_tree: complete tree structure on root, None otherwise. + local_data: returned from *generate_local_tree* + """ + current_rank = comm.Get_rank() + total_rank = comm.Get_size() + + if current_rank == 0: + weight_req = np.empty((total_rank,), dtype=object) + local_src_weights = np.empty((total_rank,), dtype=object) + + # Generate local_weights + source_weights = cl.array.to_device(queue, source_weights) + gen_local_weights_helper = get_gen_local_weights_helper( + queue, global_tree.particle_id_dtype, source_weights.dtype) + for rank in range(total_rank): + local_src_weights[rank] = gen_local_weights_helper( + source_weights, + local_data[rank]["src_mask"], + local_data[rank]["src_scan"] + ) + weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, + tag=MPITags["DIST_WEIGHT"]) + + for rank in range(1, total_rank): + weight_req[rank].wait() + local_src_weights = local_src_weights[0] + else: + local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) + + return local_src_weights + + +def calculate_pot(queue, wrangler, global_wrangler, local_trav, source_weights, + local_data, comm=MPI.COMM_WORLD, + _communicate_mpoles_via_allreduce=False): + + # Get MPI information + current_rank = comm.Get_rank() + total_rank = comm.Get_size() + + if current_rank == 0: + start_time = time.time() + + # {{{ Distribute source weights + + if current_rank == 0: + global_tree = global_wrangler.tree + # Convert src_weights to tree order + source_weights = source_weights[global_tree.user_source_ids] + else: + global_tree = None + + local_src_weights = distribute_source_weights( + queue, source_weights, global_tree, local_data, comm=comm) + + # }}} + + # {{{ "Step 2.1:" Construct local multipoles + + logger.debug("construct local multipoles") + mpole_exps = wrangler.form_multipoles( + local_trav.level_start_source_box_nrs, + local_trav.source_boxes, + local_src_weights) + + # }}} + + # {{{ "Step 2.2:" Propagate multipoles upward + + logger.debug("propagate multipoles upward") + wrangler.coarsen_multipoles( + local_trav.level_start_source_parent_box_nrs, + local_trav.source_parent_boxes, + mpole_exps) + + # mpole_exps is called Phi in [1] + + # }}} + + # {{{ Communicate mpoles + + if _communicate_mpoles_via_allreduce: + mpole_exps_all = np.zeros_like(mpole_exps) + comm.Allreduce(mpole_exps, mpole_exps_all) + mpole_exps = mpole_exps_all + else: + communicate_mpoles(wrangler, comm, local_trav, mpole_exps) + + # }}} + + # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") + + logger.debug("direct evaluation from neighbor source boxes ('list 1')") + potentials = wrangler.eval_direct( + local_trav.target_boxes, + local_trav.neighbor_source_boxes_starts, + local_trav.neighbor_source_boxes_lists, + local_src_weights) + + # these potentials are called alpha in [1] + + # }}} + + # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local + + logger.debug("translate separated siblings' ('list 2') mpoles to local") + local_exps = wrangler.multipole_to_local( + local_trav.level_start_target_or_target_parent_box_nrs, + local_trav.target_or_target_parent_boxes, + local_trav.from_sep_siblings_starts, + local_trav.from_sep_siblings_lists, + mpole_exps) + + # local_exps represents both Gamma and Delta in [1] + + # }}} + + # {{{ "Stage 5:" evaluate sep. smaller mpoles ("list 3") at particles + + logger.debug("evaluate sep. smaller mpoles at particles ('list 3 far')") + + # (the point of aiming this stage at particles is specifically to keep its + # contribution *out* of the downward-propagating local expansions) + + potentials = potentials + wrangler.eval_multipoles( + local_trav.target_boxes_sep_smaller_by_source_level, + local_trav.from_sep_smaller_by_level, + mpole_exps) + + # these potentials are called beta in [1] + + if local_trav.from_sep_close_smaller_starts is not None: + logger.debug("evaluate separated close smaller interactions directly " + "('list 3 close')") + potentials = potentials + wrangler.eval_direct( + local_trav.target_boxes, + local_trav.from_sep_close_smaller_starts, + local_trav.from_sep_close_smaller_lists, + local_src_weights) + + # }}} + + # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") + + logger.debug("form locals for separated bigger source boxes ('list 4 far')") + + local_exps = local_exps + wrangler.form_locals( + local_trav.level_start_target_or_target_parent_box_nrs, + local_trav.target_or_target_parent_boxes, + local_trav.from_sep_bigger_starts, + local_trav.from_sep_bigger_lists, + local_src_weights) + + if local_trav.from_sep_close_bigger_starts is not None: + logger.debug("evaluate separated close bigger interactions directly " + "('list 4 close')") + + potentials = potentials + wrangler.eval_direct( + local_trav.target_or_target_parent_boxes, + local_trav.from_sep_close_bigger_starts, + local_trav.from_sep_close_bigger_lists, + local_src_weights) + + # }}} + + # {{{ "Stage 7:" propagate local_exps downward + + logger.debug("propagate local_exps downward") + + wrangler.refine_locals( + local_trav.level_start_target_or_target_parent_box_nrs, + local_trav.target_or_target_parent_boxes, + local_exps) + + # }}} + + # {{{ "Stage 8:" evaluate locals + + logger.debug("evaluate locals") + potentials = potentials + wrangler.eval_locals( + local_trav.level_start_target_box_nrs, + local_trav.target_boxes, + local_exps) + + # }}} + + potentials_mpi_type = dtype_to_mpi(potentials.dtype) + if current_rank == 0: + potentials_all_ranks = np.empty((total_rank,), dtype=object) + potentials_all_ranks[0] = potentials + for i in range(1, total_rank): + potentials_all_ranks[i] = np.empty( + (local_data[i]["ntargets"],), dtype=potentials.dtype) + comm.Recv([potentials_all_ranks[i], potentials_mpi_type], + source=i, tag=MPITags["GATHER_POTENTIALS"]) + else: + comm.Send([potentials, potentials_mpi_type], + dest=0, tag=MPITags["GATHER_POTENTIALS"]) + + if current_rank == 0: + d_potentials = cl.array.empty(queue, (global_wrangler.tree.ntargets,), + dtype=potentials.dtype) + fill_potentials_knl = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${particle_id_t} *particle_mask, + __global ${particle_id_t} *particle_scan, + __global ${potential_t} *local_potentials, + __global ${potential_t} *potentials + """).render( + particle_id_t=dtype_to_ctype(global_wrangler.tree.particle_id_dtype), + potential_t=dtype_to_ctype(potentials.dtype)), + r""" + if(particle_mask[i]) { + potentials[i] = local_potentials[particle_scan[i]]; + } + """ + ) + + for i in range(total_rank): + local_potentials = cl.array.to_device(queue, potentials_all_ranks[i]) + fill_potentials_knl( + local_data[i]["tgt_mask"], local_data[i]["tgt_scan"], + local_potentials, d_potentials) + + potentials = d_potentials.get() + + logger.debug("reorder potentials") + result = global_wrangler.reorder_potentials(potentials) + + logger.debug("finalize potentials") + result = global_wrangler.finalize_potentials(result) + + logger.info("Distributed FMM evaluation completes in {} sec.".format( + str(time.time() - start_time) + )) + + return result diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py new file mode 100644 index 0000000..2a8bcc0 --- /dev/null +++ b/boxtree/distributed/local_traversal.py @@ -0,0 +1,123 @@ +from __future__ import division + +__copyright__ = "Copyright (C) 2013 Andreas Kloeckner \ + Copyright (C) 2018 Hao Gao" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import time +from pyopencl.tools import dtype_to_ctype +import pyopencl as cl +from mako.template import Template + +import logging +logger = logging.getLogger(__name__) + + +def generate_local_travs( + queue, local_tree, box_bounding_box=None, + well_sep_is_n_away=1, from_sep_smaller_crit=None, + merge_close_lists=False): + + start_time = time.time() + + d_tree = local_tree.to_device(queue).with_queue(queue) + + # Modify box flags for targets + from boxtree import box_flags_enum + box_flag_t = dtype_to_ctype(box_flags_enum.dtype) + modify_target_flags_knl = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${particle_id_t} *box_target_counts_nonchild, + __global ${particle_id_t} *box_target_counts_cumul, + __global ${box_flag_t} *box_flags + """).render(particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), + box_flag_t=box_flag_t), + Template(""" + box_flags[i] &= (~${HAS_OWN_TARGETS}); + box_flags[i] &= (~${HAS_CHILD_TARGETS}); + if(box_target_counts_nonchild[i]) box_flags[i] |= ${HAS_OWN_TARGETS}; + if(box_target_counts_nonchild[i] < box_target_counts_cumul[i]) + box_flags[i] |= ${HAS_CHILD_TARGETS}; + """).render(HAS_OWN_TARGETS=("(" + box_flag_t + ") " + + str(box_flags_enum.HAS_OWN_TARGETS)), + HAS_CHILD_TARGETS=("(" + box_flag_t + ") " + + str(box_flags_enum.HAS_CHILD_TARGETS))) + ) + + modify_target_flags_knl(d_tree.box_target_counts_nonchild, + d_tree.box_target_counts_cumul, + d_tree.box_flags) + + # Generate local source flags + local_box_flags = d_tree.box_flags & 250 + modify_own_sources_knl = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global ${box_id_t} *responsible_box_list, + __global ${box_flag_t} *box_flags + """).render(box_id_t=dtype_to_ctype(local_tree.box_id_dtype), + box_flag_t=box_flag_t), + Template(r""" + box_flags[responsible_box_list[i]] |= ${HAS_OWN_SOURCES}; + """).render(HAS_OWN_SOURCES=("(" + box_flag_t + ") " + + str(box_flags_enum.HAS_OWN_SOURCES))) + ) + + modify_child_sources_knl = cl.elementwise.ElementwiseKernel( + queue.context, + Template(""" + __global char *ancestor_box_mask, + __global ${box_flag_t} *box_flags + """).render(box_flag_t=box_flag_t), + Template(""" + if(ancestor_box_mask[i]) box_flags[i] |= ${HAS_CHILD_SOURCES}; + """).render(HAS_CHILD_SOURCES=("(" + box_flag_t + ") " + + str(box_flags_enum.HAS_CHILD_SOURCES))) + ) + + modify_own_sources_knl(d_tree.responsible_boxes_list, local_box_flags) + modify_child_sources_knl(d_tree.ancestor_mask, local_box_flags) + + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder( + queue.context, + well_sep_is_n_away=well_sep_is_n_away, + from_sep_smaller_crit=from_sep_smaller_crit + ) + + d_local_trav, _ = tg( + queue, d_tree, debug=True, + box_bounding_box=box_bounding_box, + local_box_flags=local_box_flags + ) + + if merge_close_lists and d_tree.targets_have_extent: + d_local_trav = d_local_trav.merge_close_lists(queue) + + local_trav = d_local_trav.get(queue=queue) + + logger.info("Generate local traversal in {} sec.".format( + str(time.time() - start_time)) + ) + + return local_trav diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py new file mode 100644 index 0000000..9bfb2ed --- /dev/null +++ b/boxtree/distributed/local_tree.py @@ -0,0 +1,720 @@ +from __future__ import division + +__copyright__ = "Copyright (C) 2013 Andreas Kloeckner \ + Copyright (C) 2018 Hao Gao" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from collections import namedtuple +import pyopencl as cl +from mako.template import Template +from pyopencl.tools import dtype_to_ctype +from boxtree import Tree +from mpi4py import MPI +import time +import numpy as np +from boxtree.distributed import MPITags + +import logging +logger = logging.getLogger(__name__) + +FetchLocalParticlesKernels = namedtuple( + 'FetchLocalParticlesKernels', + [ + 'particle_mask_knl', + 'mask_scan_knl', + 'fetch_local_src_knl', + 'fetch_local_tgt_knl', + 'generate_box_particle_starts', + 'generate_box_particle_counts_nonchild', + 'generate_box_particle_counts_cumul' + ] +) + + +def get_fetch_local_particles_knls(context, global_tree): + """ + This function compiles several PyOpenCL kernels helpful for fetching particles of + local trees from global tree. + + :param context: The context to compile against. + :param global_tree: The global tree from which local trees are generated. + :return: A FetchLocalParticlesKernels object. + """ + + particle_mask_knl = cl.elementwise.ElementwiseKernel( + context, + arguments=Template(""" + __global char *responsible_boxes, + __global ${particle_id_t} *box_particle_starts, + __global ${particle_id_t} *box_particle_counts_nonchild, + __global ${particle_id_t} *particle_mask + """, strict_undefined=True).render( + particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype) + ), + operation=Template(""" + if(responsible_boxes[i]) { + for(${particle_id_t} pid = box_particle_starts[i]; + pid < box_particle_starts[i] + box_particle_counts_nonchild[i]; + ++pid) { + particle_mask[pid] = 1; + } + } + """).render(particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype)) + ) + + from pyopencl.scan import GenericScanKernel + mask_scan_knl = GenericScanKernel( + context, global_tree.particle_id_dtype, + arguments=Template(""" + __global ${mask_t} *ary, + __global ${mask_t} *scan + """, strict_undefined=True).render( + mask_t=dtype_to_ctype(global_tree.particle_id_dtype) + ), + input_expr="ary[i]", + scan_expr="a+b", neutral="0", + output_statement="scan[i + 1] = item;" + ) + + fetch_local_paticles_arguments = Template(""" + __global const ${mask_t} *particle_mask, + __global const ${mask_t} *particle_scan + % for dim in range(ndims): + , __global const ${coord_t} *particles_${dim} + % endfor + % for dim in range(ndims): + , __global ${coord_t} *local_particles_${dim} + % endfor + % if particles_have_extent: + , __global const ${coord_t} *particle_radii + , __global ${coord_t} *local_particle_radii + % endif + """, strict_undefined=True) + + fetch_local_particles_prg = Template(""" + if(particle_mask[i]) { + ${particle_id_t} des = particle_scan[i]; + % for dim in range(ndims): + local_particles_${dim}[des] = particles_${dim}[i]; + % endfor + % if particles_have_extent: + local_particle_radii[des] = particle_radii[i]; + % endif + } + """, strict_undefined=True) + + fetch_local_src_knl = cl.elementwise.ElementwiseKernel( + context, + fetch_local_paticles_arguments.render( + mask_t=dtype_to_ctype(global_tree.particle_id_dtype), + coord_t=dtype_to_ctype(global_tree.coord_dtype), + ndims=global_tree.dimensions, + particles_have_extent=global_tree.sources_have_extent + ), + fetch_local_particles_prg.render( + particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype), + ndims=global_tree.dimensions, + particles_have_extent=global_tree.sources_have_extent + ) + ) + + fetch_local_tgt_knl = cl.elementwise.ElementwiseKernel( + context, + fetch_local_paticles_arguments.render( + mask_t=dtype_to_ctype(global_tree.particle_id_dtype), + coord_t=dtype_to_ctype(global_tree.coord_dtype), + ndims=global_tree.dimensions, + particles_have_extent=global_tree.targets_have_extent + ), + fetch_local_particles_prg.render( + particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype), + ndims=global_tree.dimensions, + particles_have_extent=global_tree.targets_have_extent + ) + ) + + generate_box_particle_starts = cl.elementwise.ElementwiseKernel( + context, + Template(""" + __global ${particle_id_t} *old_starts, + __global ${particle_id_t} *particle_scan, + __global ${particle_id_t} *new_starts + """, strict_undefined=True).render( + particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype) + ), + "new_starts[i] = particle_scan[old_starts[i]]", + name="generate_box_particle_starts" + ) + + generate_box_particle_counts_nonchild = cl.elementwise.ElementwiseKernel( + context, + Template(""" + __global char *res_boxes, + __global ${particle_id_t} *old_counts_nonchild, + __global ${particle_id_t} *new_counts_nonchild + """, strict_undefined=True).render( + particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype) + ), + "if(res_boxes[i]) new_counts_nonchild[i] = old_counts_nonchild[i];" + ) + + generate_box_particle_counts_cumul = cl.elementwise.ElementwiseKernel( + context, + Template(""" + __global ${particle_id_t} *old_counts_cumul, + __global ${particle_id_t} *old_starts, + __global ${particle_id_t} *new_counts_cumul, + __global ${particle_id_t} *particle_scan + """, strict_undefined=True).render( + particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype) + ), + """ + new_counts_cumul[i] = + particle_scan[old_starts[i] + old_counts_cumul[i]] - + particle_scan[old_starts[i]] + """ + ) + + return FetchLocalParticlesKernels( + particle_mask_knl=particle_mask_knl, + mask_scan_knl=mask_scan_knl, + fetch_local_src_knl=fetch_local_src_knl, + fetch_local_tgt_knl=fetch_local_tgt_knl, + generate_box_particle_starts=generate_box_particle_starts, + generate_box_particle_counts_nonchild=generate_box_particle_counts_nonchild, + generate_box_particle_counts_cumul=generate_box_particle_counts_cumul + ) + + +def fetch_local_particles(queue, global_tree, src_box_mask, tgt_box_mask, local_tree, + local_data, knls): + """ This helper function fetches particles needed for worker processes, and + reconstruct list of lists indexing. + + Specifically, this function generates the following fields for the local tree: + sources, targets, target_radii, box_source_starts, box_source_counts_nonchild, + box_source_counts_cumul, box_target_starts, box_target_counts_nonchild, + box_target_counts_cumul. + + These generated fields are stored directly into :arg:local_tree. + + """ + global_tree_dev = global_tree.to_device(queue).with_queue(queue) + nsources = global_tree.nsources + + # {{{ source particle mask + + src_particle_mask = cl.array.zeros( + queue, (nsources,), + dtype=global_tree.particle_id_dtype + ) + + knls.particle_mask_knl( + src_box_mask, + global_tree_dev.box_source_starts, + global_tree_dev.box_source_counts_nonchild, + src_particle_mask + ) + + # }}} + + # {{{ scan of source particle mask + + src_particle_scan = cl.array.empty( + queue, (nsources + 1,), + dtype=global_tree.particle_id_dtype + ) + + src_particle_scan[0] = 0 + knls.mask_scan_knl(src_particle_mask, src_particle_scan) + + # }}} + + # {{{ local sources + + local_nsources = src_particle_scan[-1].get(queue) + + local_sources = cl.array.empty( + queue, (global_tree.dimensions, local_nsources), + dtype=global_tree.coord_dtype + ) + + local_sources_list = [ + local_sources[idim, :] + for idim in range(global_tree.dimensions) + ] + + assert(global_tree.sources_have_extent is False) + + knls.fetch_local_src_knl( + src_particle_mask, src_particle_scan, + *global_tree_dev.sources.tolist(), + *local_sources_list + ) + + # }}} + + # {{{ box_source_starts + + local_box_source_starts = cl.array.empty( + queue, (global_tree.nboxes,), + dtype=global_tree.particle_id_dtype + ) + + knls.generate_box_particle_starts( + global_tree_dev.box_source_starts, + src_particle_scan, + local_box_source_starts + ) + + # }}} + + # {{{ box_source_counts_nonchild + + local_box_source_counts_nonchild = cl.array.zeros( + queue, (global_tree.nboxes,), + dtype=global_tree.particle_id_dtype + ) + + knls.generate_box_particle_counts_nonchild( + src_box_mask, + global_tree_dev.box_source_counts_nonchild, + local_box_source_counts_nonchild + ) + + # }}} + + # {{{ box_source_counts_cumul + + local_box_source_counts_cumul = cl.array.empty( + queue, (global_tree.nboxes,), + dtype=global_tree.particle_id_dtype + ) + + knls.generate_box_particle_counts_cumul( + global_tree_dev.box_source_counts_cumul, + global_tree_dev.box_source_starts, + local_box_source_counts_cumul, + src_particle_scan + ) + + # }}} + + # {{{ target particle mask + + ntargets = global_tree.ntargets + + tgt_particle_mask = cl.array.zeros( + queue, (ntargets,), + dtype=global_tree.particle_id_dtype + ) + + knls.particle_mask_knl( + tgt_box_mask, + global_tree_dev.box_target_starts, + global_tree_dev.box_target_counts_nonchild, + tgt_particle_mask + ) + + # }}} + + # {{{ scan of target particle mask + + tgt_particle_scan = cl.array.empty( + queue, (ntargets + 1,), + dtype=global_tree.particle_id_dtype + ) + + tgt_particle_scan[0] = 0 + knls.mask_scan_knl(tgt_particle_mask, tgt_particle_scan) + + # }}} + + # {{{ local targets + + local_ntargets = tgt_particle_scan[-1].get(queue) + + local_targets = cl.array.empty( + queue, (local_tree.dimensions, local_ntargets), + dtype=local_tree.coord_dtype + ) + + local_targets_list = [ + local_targets[idim, :] + for idim in range(local_tree.dimensions) + ] + + if local_tree.targets_have_extent: + + local_target_radii = cl.array.empty( + queue, (local_ntargets,), + dtype=global_tree.coord_dtype + ) + + knls.fetch_local_tgt_knl( + tgt_particle_mask, tgt_particle_scan, + *global_tree_dev.targets.tolist(), + *local_targets_list, + global_tree_dev.target_radii, + local_target_radii + ) + + else: + + knls.fetch_local_tgt_knl( + tgt_particle_mask, tgt_particle_scan, + *global_tree_dev.targets.tolist(), + *local_targets_list + ) + + # {{{ box_target_starts + + local_box_target_starts = cl.array.empty( + queue, (global_tree.nboxes,), + dtype=global_tree.particle_id_dtype + ) + + knls.generate_box_particle_starts( + global_tree_dev.box_target_starts, + tgt_particle_scan, + local_box_target_starts + ) + + # }}} + + # {{{ box_target_counts_nonchild + + local_box_target_counts_nonchild = cl.array.zeros( + queue, (global_tree.nboxes,), + dtype=global_tree.particle_id_dtype) + + knls.generate_box_particle_counts_nonchild( + tgt_box_mask, + global_tree_dev.box_target_counts_nonchild, + local_box_target_counts_nonchild + ) + + # }}} + + # {{{ box_target_counts_cumul + + local_box_target_counts_cumul = cl.array.empty( + queue, (global_tree.nboxes,), + dtype=global_tree.particle_id_dtype + ) + + knls.generate_box_particle_counts_cumul( + global_tree_dev.box_target_counts_cumul, + global_tree_dev.box_target_starts, + local_box_target_counts_cumul, + tgt_particle_scan + ) + + # }}} + + # {{{ Fetch fields to local_tree + + local_sources = local_sources.get(queue=queue) + local_tree.sources = local_sources + + local_targets = local_targets.get(queue=queue) + local_tree.targets = local_targets + + if global_tree.targets_have_extent: + local_tree.target_radii = local_target_radii.get(queue=queue) + + local_tree.box_source_starts = local_box_source_starts.get(queue=queue) + + local_tree.box_source_counts_nonchild = \ + local_box_source_counts_nonchild.get(queue=queue) + + local_tree.box_source_counts_cumul = \ + local_box_source_counts_cumul.get(queue=queue) + + local_tree.box_target_starts = local_box_target_starts.get(queue=queue) + + local_tree.box_target_counts_nonchild = \ + local_box_target_counts_nonchild.get(queue=queue) + + local_tree.box_target_counts_cumul = \ + local_box_target_counts_cumul.get(queue=queue) + + # }}} + + # {{{ Fetch fields to local_data + + local_data["src_mask"] = src_particle_mask + local_data["src_scan"] = src_particle_scan + local_data["nsources"] = local_nsources + local_data["tgt_mask"] = tgt_particle_mask + local_data["tgt_scan"] = tgt_particle_scan + local_data["ntargets"] = local_ntargets + local_data["tgt_box_mask"] = tgt_box_mask + + # }}} + + +class LocalTreeBuilder: + + def __init__(self, global_tree, queue): + self.global_tree = global_tree + self.knls = get_fetch_local_particles_knls(queue.context, global_tree) + self.queue = queue + + def from_global_tree(self, responsible_boxes_list, responsible_boxes_mask, + src_boxes_mask, ancestor_mask): + + local_tree = self.global_tree.copy( + responsible_boxes_list=responsible_boxes_list, + ancestor_mask=ancestor_mask.get(), + box_to_user_starts=None, + box_to_user_lists=None, + _dimensions=None, + _ntargets=None, + _nsources=None, + ) + + local_tree.user_source_ids = None + local_tree.sorted_target_ids = None + + local_data = { + "src_mask": None, "src_scan": None, "nsources": None, + "tgt_mask": None, "tgt_scan": None, "ntargets": None + } + + fetch_local_particles( + self.queue, + self.global_tree, + src_boxes_mask, + responsible_boxes_mask, + local_tree, + local_data, + self.knls + ) + + local_tree._dimensions = local_tree.dimensions + local_tree._ntargets = local_tree.targets[0].shape[0] + local_tree._nsources = local_tree.sources[0].shape[0] + + local_tree.__class__ = LocalTree + + return local_tree, local_data + + +class LocalTree(Tree): + """ + .. attribute:: box_to_user_starts + + ``box_id_t [nboxes + 1]`` + + .. attribute:: box_to_user_lists + + ``int32 [*]`` + + A :ref:`csr` array. For each box, the list of processes which own + targets that *use* the multipole expansion at this box, via either List + 3 or (possibly downward propagated from an ancestor) List 2. + """ + + @property + def nboxes(self): + return self.box_source_starts.shape[0] + + @property + def nsources(self): + return self._nsources + + @property + def ntargets(self): + return self._ntargets + + @property + def dimensions(self): + return self._dimensions + + +def generate_local_tree(queue, traversal, responsible_boxes_list, + responsible_box_query, comm=MPI.COMM_WORLD): + + # Get MPI information + current_rank = comm.Get_rank() + total_rank = comm.Get_size() + + if current_rank == 0: + start_time = time.time() + + if current_rank == 0: + local_data = np.empty((total_rank,), dtype=object) + else: + local_data = None + + if current_rank == 0: + tree = traversal.tree + + local_tree_builder = LocalTreeBuilder(tree, queue) + + box_mpole_is_used = cl.array.empty( + queue, (total_rank, tree.nboxes,), dtype=np.int8 + ) + + # request objects for non-blocking communication + tree_req = [] + particles_req = [] + + # buffer holding communication data so that it is not garbage collected + local_tree = np.empty((total_rank,), dtype=object) + local_targets = np.empty((total_rank,), dtype=object) + local_sources = np.empty((total_rank,), dtype=object) + local_target_radii = np.empty((total_rank,), dtype=object) + + for irank in range(total_rank): + + (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, + box_mpole_is_used[irank]) = \ + responsible_box_query.get_boxes_mask(responsible_boxes_list[irank]) + + local_tree[irank], local_data[irank] = \ + local_tree_builder.from_global_tree( + responsible_boxes_list[irank], responsible_boxes_mask, + src_boxes_mask, ancestor_boxes + ) + + # master process does not need to communicate with itself + if irank == 0: + continue + + # {{{ Peel sources and targets off tree + + local_targets[irank] = local_tree[irank].targets + local_tree[irank].targets = None + + local_sources[irank] = local_tree[irank].sources + local_tree[irank].sources = None + + local_target_radii[irank] = local_tree[irank].target_radii + local_tree[irank].target_radii = None + + # }}} + + # Send the local tree skeleton without sources and targets + tree_req.append(comm.isend( + local_tree[irank], dest=irank, tag=MPITags["DIST_TREE"])) + + # Send the sources and targets + particles_req.append(comm.Isend( + local_sources[irank], dest=irank, tag=MPITags["DIST_SOURCES"])) + + particles_req.append(comm.Isend( + local_targets[irank], dest=irank, tag=MPITags["DIST_TARGETS"])) + + if tree.targets_have_extent: + particles_req.append(comm.Isend( + local_target_radii[irank], dest=irank, tag=MPITags["DIST_RADII"]) + ) + + from boxtree.tools import MaskCompressorKernel + matcompr = MaskCompressorKernel(queue.context) + (box_to_user_starts, box_to_user_lists, evt) = \ + matcompr(queue, box_mpole_is_used.transpose(), + list_dtype=np.int32) + + cl.wait_for_events([evt]) + del box_mpole_is_used + + box_to_user_starts = box_to_user_starts.get() + box_to_user_lists = box_to_user_lists.get() + + logger.debug("computing box_to_user: done") + + # Receive the local tree from root + if current_rank == 0: + MPI.Request.Waitall(tree_req) + local_tree = local_tree[0] + else: + local_tree = comm.recv(source=0, tag=MPITags["DIST_TREE"]) + + # Receive sources and targets + if current_rank == 0: + MPI.Request.Waitall(particles_req) + else: + reqs = [] + + local_tree.sources = np.empty( + (local_tree.dimensions, local_tree.nsources), + dtype=local_tree.coord_dtype + ) + reqs.append(comm.Irecv( + local_tree.sources, source=0, tag=MPITags["DIST_SOURCES"])) + + local_tree.targets = np.empty( + (local_tree.dimensions, local_tree.ntargets), + dtype=local_tree.coord_dtype + ) + reqs.append(comm.Irecv( + local_tree.targets, source=0, tag=MPITags["DIST_TARGETS"])) + + if local_tree.targets_have_extent: + local_tree.target_radii = np.empty( + (local_tree.ntargets,), + dtype=local_tree.coord_dtype + ) + reqs.append(comm.Irecv( + local_tree.target_radii, source=0, tag=MPITags["DIST_RADII"])) + + MPI.Request.Waitall(reqs) + + # Receive box extent + if current_rank == 0: + box_target_bounding_box_min = traversal.box_target_bounding_box_min + box_target_bounding_box_max = traversal.box_target_bounding_box_max + else: + box_target_bounding_box_min = np.empty( + (local_tree.dimensions, local_tree.aligned_nboxes), + dtype=local_tree.coord_dtype + ) + box_target_bounding_box_max = np.empty( + (local_tree.dimensions, local_tree.aligned_nboxes), + dtype=local_tree.coord_dtype + ) + comm.Bcast(box_target_bounding_box_min, root=0) + comm.Bcast(box_target_bounding_box_max, root=0) + box_bounding_box = { + "min": box_target_bounding_box_min, + "max": box_target_bounding_box_max + } + + if current_rank != 0: + box_to_user_starts = None + box_to_user_lists = None + + box_to_user_starts = comm.bcast(box_to_user_starts, root=0) + box_to_user_lists = comm.bcast(box_to_user_lists, root=0) + + local_tree.box_to_user_starts = box_to_user_starts + local_tree.box_to_user_lists = box_to_user_lists + + if current_rank == 0: + logger.info("Distribute local tree in {} sec.".format( + str(time.time() - start_time)) + ) + + return local_tree, local_data, box_bounding_box diff --git a/boxtree/partition.py b/boxtree/distributed/partition.py similarity index 99% rename from boxtree/partition.py rename to boxtree/distributed/partition.py index f7dc353..a21a078 100644 --- a/boxtree/partition.py +++ b/boxtree/distributed/partition.py @@ -1,7 +1,4 @@ -import numpy as np -import pyopencl as cl -from pyopencl.tools import dtype_to_ctype -from mako.template import Template +from __future__ import division __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2018 Hao Gao" @@ -26,6 +23,11 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import numpy as np +import pyopencl as cl +from pyopencl.tools import dtype_to_ctype +from mako.template import Template + def partition_work(traversal, total_rank, workload_weight): """ This function assigns responsible boxes of each process. diff --git a/test/test_distributed.py b/test/test_distributed.py index d2c1827..d92f479 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,7 +1,8 @@ import numpy as np +import pyopencl as cl from mpi4py import MPI -from boxtree.distributed import ( - DistributedFMMInfo, DistributedFMMLibExpansionWrangler) +from boxtree.distributed.calculation import DistributedFMMLibExpansionWrangler +from boxtree.distributed import DistributedFMMInfo import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler import logging @@ -26,6 +27,13 @@ trav = None sources_weights = None HELMHOLTZ_K = 0 +# Configure PyOpenCL +# from boxtree.distributed_old import queue +# ctx = queue.context +# print(queue.context.devices) +ctx = cl.create_some_context() +queue = cl.CommandQueue(ctx) + def fmm_level_to_nterms(tree, level): return max(level, 3) @@ -33,11 +41,6 @@ def fmm_level_to_nterms(tree, level): # Generate particles and run shared-memory parallelism on rank 0 if rank == 0: - # Configure PyOpenCL - import pyopencl as cl - ctx = cl.create_some_context() - queue = cl.CommandQueue(ctx) - print(queue.context.devices) # Generate random particles and source weights from boxtree.tools import make_normal_particle_array as p_normal @@ -75,17 +78,16 @@ if rank == 0: from boxtree.fmm import drive_fmm pot_fmm = drive_fmm(trav, wrangler, sources_weights) * 2 * np.pi -# Compute FMM using distributed memory parallelism -from boxtree.distributed import queue +# Compute FMM using distributed memory parallelism def distributed_expansion_wrangler_factory(tree): return DistributedFMMLibExpansionWrangler( - tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) + queue, tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) distribued_fmm_info = DistributedFMMInfo( - trav, distributed_expansion_wrangler_factory, comm=comm) + queue, trav, distributed_expansion_wrangler_factory, comm=comm) pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) if rank == 0: -- GitLab From 751d19022994bab41c456d441d933d3d4927b55a Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 5 Jul 2018 14:05:16 +0800 Subject: [PATCH 113/260] Add more documentation --- .gitignore | 5 +++++ boxtree/distributed/__init__.py | 28 ++++++++++++++++++++++++++++ test/test_distributed.py | 5 +---- 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index e271a14..6bd925c 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,8 @@ distribute*tar.gz a.out .cache +.pytest_cache + +.DS_Store +.idea +.vscode diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 8efa5e5..79eb295 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -72,6 +72,8 @@ class DistributedFMMInfo(object): self.comm = comm current_rank = comm.Get_rank() + # {{{ Broadcast well_sep_is_n_away + if current_rank == 0: well_sep_is_n_away = global_trav.well_sep_is_n_away else: @@ -79,6 +81,10 @@ class DistributedFMMInfo(object): well_sep_is_n_away = comm.bcast(well_sep_is_n_away, root=0) + # }}} + + # {{{ Partiton work + if current_rank == 0: from boxtree.distributed.partition import partition_work workload_weight = WorkloadWeight( @@ -90,6 +96,10 @@ class DistributedFMMInfo(object): else: responsible_boxes_list = None + # }}} + + # {{{ Compute and distribute local tree + if current_rank == 0: from boxtree.distributed.partition import ResponsibleBoxesQuery responsible_box_query = ResponsibleBoxesQuery(queue, global_trav) @@ -101,11 +111,27 @@ class DistributedFMMInfo(object): generate_local_tree(queue, self.global_trav, responsible_boxes_list, responsible_box_query) + # }}} + + # {{{ Compute traversal object on each process + from boxtree.distributed.local_traversal import generate_local_travs self.local_trav = generate_local_travs( queue, self.local_tree, self.box_bounding_box, well_sep_is_n_away=well_sep_is_n_away) + # }}} + + # {{{ Get local and global wrangler + + """ + Note: The difference between "local wrangler" and "global wrangler" is that + they reference different tree object. "local wrangler" uses local tree + object on each worker process for FMM computation, whereas "global wrangler" + is only valid on root process used for assembling results from worker + processes. + """ + self.local_wrangler = self.distributed_expansion_wrangler_factory( self.local_tree) @@ -115,6 +141,8 @@ class DistributedFMMInfo(object): else: self.global_wrangler = None + # }}} + def drive_dfmm(self, source_weights): from boxtree.distributed.calculation import calculate_pot return calculate_pot( diff --git a/test/test_distributed.py b/test/test_distributed.py index d92f479..817b8d5 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -22,15 +22,12 @@ dtype = np.float64 comm = MPI.COMM_WORLD rank = comm.Get_rank() -# Initialization +# Initialize arguments for worker processes trav = None sources_weights = None HELMHOLTZ_K = 0 # Configure PyOpenCL -# from boxtree.distributed_old import queue -# ctx = queue.context -# print(queue.context.devices) ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) -- GitLab From 659fee556646e23f81b2a8134dfbe4b9bd158344 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 5 Jul 2018 02:58:01 -0500 Subject: [PATCH 114/260] Remove mask and scan from local_data to save device memory --- boxtree/distributed/calculation.py | 40 +++++--------------------- boxtree/distributed/local_tree.py | 46 ++++++++++++++++++++---------- 2 files changed, 38 insertions(+), 48 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index e45cd19..7e64944 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -309,15 +309,9 @@ def distribute_source_weights(queue, source_weights, global_tree, local_data, local_src_weights = np.empty((total_rank,), dtype=object) # Generate local_weights - source_weights = cl.array.to_device(queue, source_weights) - gen_local_weights_helper = get_gen_local_weights_helper( - queue, global_tree.particle_id_dtype, source_weights.dtype) for rank in range(total_rank): - local_src_weights[rank] = gen_local_weights_helper( - source_weights, - local_data[rank]["src_mask"], - local_data[rank]["src_scan"] - ) + local_src_weights[rank] = source_weights[local_data[rank].src_idx] + weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, tag=MPITags["DIST_WEIGHT"]) @@ -490,7 +484,7 @@ def calculate_pot(queue, wrangler, global_wrangler, local_trav, source_weights, potentials_all_ranks[0] = potentials for i in range(1, total_rank): potentials_all_ranks[i] = np.empty( - (local_data[i]["ntargets"],), dtype=potentials.dtype) + (local_data[i].ntargets,), dtype=potentials.dtype) comm.Recv([potentials_all_ranks[i], potentials_mpi_type], source=i, tag=MPITags["GATHER_POTENTIALS"]) else: @@ -498,32 +492,12 @@ def calculate_pot(queue, wrangler, global_wrangler, local_trav, source_weights, dest=0, tag=MPITags["GATHER_POTENTIALS"]) if current_rank == 0: - d_potentials = cl.array.empty(queue, (global_wrangler.tree.ntargets,), - dtype=potentials.dtype) - fill_potentials_knl = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${particle_id_t} *particle_mask, - __global ${particle_id_t} *particle_scan, - __global ${potential_t} *local_potentials, - __global ${potential_t} *potentials - """).render( - particle_id_t=dtype_to_ctype(global_wrangler.tree.particle_id_dtype), - potential_t=dtype_to_ctype(potentials.dtype)), - r""" - if(particle_mask[i]) { - potentials[i] = local_potentials[particle_scan[i]]; - } - """ - ) - for i in range(total_rank): - local_potentials = cl.array.to_device(queue, potentials_all_ranks[i]) - fill_potentials_knl( - local_data[i]["tgt_mask"], local_data[i]["tgt_scan"], - local_potentials, d_potentials) + potentials = np.empty((global_wrangler.tree.ntargets,), + dtype=potentials.dtype) - potentials = d_potentials.get() + for irank in range(total_rank): + potentials[local_data[irank].tgt_idx] = potentials_all_ranks[irank] logger.debug("reorder potentials") result = global_wrangler.reorder_potentials(potentials) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 9bfb2ed..11a96e9 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -205,8 +205,19 @@ def get_fetch_local_particles_knls(context, global_tree): ) +LocalData = namedtuple( + 'LocalData', + [ + 'nsources', + 'ntargets', + 'src_idx', + 'tgt_idx' + ] +) + + def fetch_local_particles(queue, global_tree, src_box_mask, tgt_box_mask, local_tree, - local_data, knls): + knls): """ This helper function fetches particles needed for worker processes, and reconstruct list of lists indexing. @@ -460,18 +471,29 @@ def fetch_local_particles(queue, global_tree, src_box_mask, tgt_box_mask, local_ # }}} + # {{{ src_idx and tgt_idx + + src_particle_mask = src_particle_mask.get(queue=queue).astype(bool) + src_idx = np.arange(nsources)[src_particle_mask] + + tgt_particle_mask = tgt_particle_mask.get(queue=queue).astype(bool) + tgt_idx = np.arange(ntargets)[tgt_particle_mask] + + # }}} + # {{{ Fetch fields to local_data - local_data["src_mask"] = src_particle_mask - local_data["src_scan"] = src_particle_scan - local_data["nsources"] = local_nsources - local_data["tgt_mask"] = tgt_particle_mask - local_data["tgt_scan"] = tgt_particle_scan - local_data["ntargets"] = local_ntargets - local_data["tgt_box_mask"] = tgt_box_mask + local_data = LocalData( + nsources=local_nsources, + ntargets=local_ntargets, + src_idx=src_idx, + tgt_idx=tgt_idx + ) # }}} + return local_tree, local_data + class LocalTreeBuilder: @@ -496,18 +518,12 @@ class LocalTreeBuilder: local_tree.user_source_ids = None local_tree.sorted_target_ids = None - local_data = { - "src_mask": None, "src_scan": None, "nsources": None, - "tgt_mask": None, "tgt_scan": None, "ntargets": None - } - - fetch_local_particles( + local_tree, local_data = fetch_local_particles( self.queue, self.global_tree, src_boxes_mask, responsible_boxes_mask, local_tree, - local_data, self.knls ) -- GitLab From 18b72120959b42dee1900f8b7a8f1ce3ef3e7406 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 5 Jul 2018 07:40:27 -0500 Subject: [PATCH 115/260] Improve code quality --- boxtree/distributed/__init__.py | 7 ++- boxtree/distributed/calculation.py | 77 ++++++++++-------------------- 2 files changed, 27 insertions(+), 57 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 79eb295..ce4ae04 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -63,8 +63,6 @@ class DistributedFMMInfo(object): def __init__(self, queue, global_trav, distributed_expansion_wrangler_factory, comm=MPI.COMM_WORLD): - self.queue = queue - self.global_trav = global_trav self.distributed_expansion_wrangler_factory = \ distributed_expansion_wrangler_factory @@ -146,5 +144,6 @@ class DistributedFMMInfo(object): def drive_dfmm(self, source_weights): from boxtree.distributed.calculation import calculate_pot return calculate_pot( - self.queue, self.local_wrangler, self.global_wrangler, self.local_trav, - source_weights, self.local_data) + self.local_wrangler, self.global_wrangler, self.local_trav, + source_weights, self.local_data + ) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 7e64944..842d7aa 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -27,8 +27,6 @@ import numpy as np import pyopencl as cl from boxtree.distributed import MPITags from mpi4py import MPI -from mako.template import Template -from pyopencl.tools import dtype_to_ctype import time from boxtree.distributed import dtype_to_mpi from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler @@ -40,7 +38,7 @@ import logging logger = logging.getLogger(__name__) -# {{{ distributed fmm wrangler +# {{{ Distributed FMM wrangler class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): @@ -264,59 +262,35 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # }}} -def get_gen_local_weights_helper(queue, particle_dtype, weight_dtype): - gen_local_source_weights_knl = cl.elementwise.ElementwiseKernel( - queue.context, - arguments=Template(""" - __global ${weight_t} *src_weights, - __global ${particle_id_t} *particle_mask, - __global ${particle_id_t} *particle_scan, - __global ${weight_t} *local_weights - """, strict_undefined=True).render( - weight_t=dtype_to_ctype(weight_dtype), - particle_id_t=dtype_to_ctype(particle_dtype) - ), - operation=""" - if(particle_mask[i]) { - local_weights[particle_scan[i]] = src_weights[i]; - } - """ - ) - - def gen_local_weights(global_weights, source_mask, source_scan): - local_nsources = source_scan[-1].get(queue) - local_weights = cl.array.empty(queue, (local_nsources,), - dtype=weight_dtype) - gen_local_source_weights_knl(global_weights, source_mask, source_scan, - local_weights) - return local_weights.get(queue) - - return gen_local_weights +def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): + """ This function transfers needed source_weights from root process to each + worker process in communicator :arg comm. + This function needs to be called by all processes in the :arg comm communicator. -def distribute_source_weights(queue, source_weights, global_tree, local_data, - comm=MPI.COMM_WORLD): - """ - source_weights: source weights in tree order - global_tree: complete tree structure on root, None otherwise. - local_data: returned from *generate_local_tree* + :param source_weights: Source weights in tree order on root, None on worker + processes. + :param local_data: Returned from *generate_local_tree*. None on worker processes. + :return Source weights needed for the current process. """ current_rank = comm.Get_rank() total_rank = comm.Get_size() if current_rank == 0: - weight_req = np.empty((total_rank,), dtype=object) + weight_req = [] local_src_weights = np.empty((total_rank,), dtype=object) - # Generate local_weights - for rank in range(total_rank): - local_src_weights[rank] = source_weights[local_data[rank].src_idx] + for irank in range(total_rank): + local_src_weights[irank] = source_weights[local_data[irank].src_idx] - weight_req[rank] = comm.isend(local_src_weights[rank], dest=rank, - tag=MPITags["DIST_WEIGHT"]) + if irank != 0: + weight_req.append( + comm.isend(local_src_weights[irank], dest=irank, + tag=MPITags["DIST_WEIGHT"]) + ) + + MPI.Request.Waitall(weight_req) - for rank in range(1, total_rank): - weight_req[rank].wait() local_src_weights = local_src_weights[0] else: local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) @@ -324,9 +298,8 @@ def distribute_source_weights(queue, source_weights, global_tree, local_data, return local_src_weights -def calculate_pot(queue, wrangler, global_wrangler, local_trav, source_weights, - local_data, comm=MPI.COMM_WORLD, - _communicate_mpoles_via_allreduce=False): +def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_data, + comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): # Get MPI information current_rank = comm.Get_rank() @@ -338,14 +311,12 @@ def calculate_pot(queue, wrangler, global_wrangler, local_trav, source_weights, # {{{ Distribute source weights if current_rank == 0: - global_tree = global_wrangler.tree # Convert src_weights to tree order - source_weights = source_weights[global_tree.user_source_ids] - else: - global_tree = None + source_weights = source_weights[global_wrangler.tree.user_source_ids] local_src_weights = distribute_source_weights( - queue, source_weights, global_tree, local_data, comm=comm) + source_weights, local_data, comm=comm + ) # }}} -- GitLab From 726bd2480913d68bc4cd4440218a6e140fb26550 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 6 Jul 2018 07:25:14 -0500 Subject: [PATCH 116/260] Add documentation for FMM calculation --- boxtree/distributed/calculation.py | 82 +++++++++++++++++++++++------- 1 file changed, 63 insertions(+), 19 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 842d7aa..e0a9d02 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -142,7 +142,7 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): # }}} -# {{{ communicate mpoles +# {{{ Communicate mpoles def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): """Based on Algorithm 3: Reduce and Scatter in [1]. @@ -151,6 +151,8 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): decrease the bandwidth cost by sending only information that is relevant to the processes receiving the message. + This function needs to be called collectively by all processes in :arg comm. + .. [1] Lashuk, Ilya, Aparna Chandramowlishwaran, Harper Langston, Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, Lexing Ying, Denis Zorin, and George Biros. “A massively parallel adaptive fast @@ -262,6 +264,8 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # }}} +# {{{ Distribute source weights + def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): """ This function transfers needed source_weights from root process to each worker process in communicator :arg comm. @@ -297,9 +301,33 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): return local_src_weights +# }}} -def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_data, - comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): + +# {{{ FMM driver for calculating potentials + +def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, + local_data, comm=MPI.COMM_WORLD, + _communicate_mpoles_via_allreduce=False): + """ Calculate potentials for targets on distributed memory machines. + + This function needs to be called collectively by all process in :arg comm. + + :param local_wrangler: Expansion wranglers for each worker process for FMM. + :param global_wrangler: Expansion wrangler on root process for assembling partial + results from worker processes together. This argument differs from + :arg local_wrangler by referening the global tree instead of local trees. + This argument is None on worker process. + :param local_trav: Local traversal object returned from generate_local_travs. + :param source_weights: Source weights for FMM. None on worker processes. + :param local_data: LocalData object returned from generate_local_tree. + :param comm: MPI communicator. + :param _communicate_mpoles_via_allreduce: Use MPI allreduce for communicating + multipole expressions. Using MPI allreduce is slower but might be helpful for + debugging purpose. + :return: On the root process, this function returns calculated potentials. On + worker processes, this function returns None. + """ # Get MPI information current_rank = comm.Get_rank() @@ -323,7 +351,7 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d # {{{ "Step 2.1:" Construct local multipoles logger.debug("construct local multipoles") - mpole_exps = wrangler.form_multipoles( + mpole_exps = local_wrangler.form_multipoles( local_trav.level_start_source_box_nrs, local_trav.source_boxes, local_src_weights) @@ -333,7 +361,7 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d # {{{ "Step 2.2:" Propagate multipoles upward logger.debug("propagate multipoles upward") - wrangler.coarsen_multipoles( + local_wrangler.coarsen_multipoles( local_trav.level_start_source_parent_box_nrs, local_trav.source_parent_boxes, mpole_exps) @@ -349,14 +377,14 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all else: - communicate_mpoles(wrangler, comm, local_trav, mpole_exps) + communicate_mpoles(local_wrangler, comm, local_trav, mpole_exps) # }}} # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") logger.debug("direct evaluation from neighbor source boxes ('list 1')") - potentials = wrangler.eval_direct( + potentials = local_wrangler.eval_direct( local_trav.target_boxes, local_trav.neighbor_source_boxes_starts, local_trav.neighbor_source_boxes_lists, @@ -369,7 +397,7 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local logger.debug("translate separated siblings' ('list 2') mpoles to local") - local_exps = wrangler.multipole_to_local( + local_exps = local_wrangler.multipole_to_local( local_trav.level_start_target_or_target_parent_box_nrs, local_trav.target_or_target_parent_boxes, local_trav.from_sep_siblings_starts, @@ -387,7 +415,7 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d # (the point of aiming this stage at particles is specifically to keep its # contribution *out* of the downward-propagating local expansions) - potentials = potentials + wrangler.eval_multipoles( + potentials = potentials + local_wrangler.eval_multipoles( local_trav.target_boxes_sep_smaller_by_source_level, local_trav.from_sep_smaller_by_level, mpole_exps) @@ -397,7 +425,7 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d if local_trav.from_sep_close_smaller_starts is not None: logger.debug("evaluate separated close smaller interactions directly " "('list 3 close')") - potentials = potentials + wrangler.eval_direct( + potentials = potentials + local_wrangler.eval_direct( local_trav.target_boxes, local_trav.from_sep_close_smaller_starts, local_trav.from_sep_close_smaller_lists, @@ -409,7 +437,7 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d logger.debug("form locals for separated bigger source boxes ('list 4 far')") - local_exps = local_exps + wrangler.form_locals( + local_exps = local_exps + local_wrangler.form_locals( local_trav.level_start_target_or_target_parent_box_nrs, local_trav.target_or_target_parent_boxes, local_trav.from_sep_bigger_starts, @@ -420,7 +448,7 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d logger.debug("evaluate separated close bigger interactions directly " "('list 4 close')") - potentials = potentials + wrangler.eval_direct( + potentials = potentials + local_wrangler.eval_direct( local_trav.target_or_target_parent_boxes, local_trav.from_sep_close_bigger_starts, local_trav.from_sep_close_bigger_lists, @@ -432,7 +460,7 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d logger.debug("propagate local_exps downward") - wrangler.refine_locals( + local_wrangler.refine_locals( local_trav.level_start_target_or_target_parent_box_nrs, local_trav.target_or_target_parent_boxes, local_exps) @@ -442,26 +470,36 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d # {{{ "Stage 8:" evaluate locals logger.debug("evaluate locals") - potentials = potentials + wrangler.eval_locals( + potentials = potentials + local_wrangler.eval_locals( local_trav.level_start_target_box_nrs, local_trav.target_boxes, local_exps) # }}} + # {{{ Worker processes send calculated potentials to the root process + potentials_mpi_type = dtype_to_mpi(potentials.dtype) + if current_rank == 0: + potentials_all_ranks = np.empty((total_rank,), dtype=object) potentials_all_ranks[0] = potentials - for i in range(1, total_rank): - potentials_all_ranks[i] = np.empty( - (local_data[i].ntargets,), dtype=potentials.dtype) - comm.Recv([potentials_all_ranks[i], potentials_mpi_type], - source=i, tag=MPITags["GATHER_POTENTIALS"]) + + for irank in range(1, total_rank): + potentials_all_ranks[irank] = np.empty( + (local_data[irank].ntargets,), dtype=potentials.dtype) + + comm.Recv([potentials_all_ranks[irank], potentials_mpi_type], + source=irank, tag=MPITags["GATHER_POTENTIALS"]) else: comm.Send([potentials, potentials_mpi_type], dest=0, tag=MPITags["GATHER_POTENTIALS"]) + # }}} + + # {{{ Assemble potentials from worker processes together on the root process + if current_rank == 0: potentials = np.empty((global_wrangler.tree.ntargets,), @@ -476,8 +514,14 @@ def calculate_pot(wrangler, global_wrangler, local_trav, source_weights, local_d logger.debug("finalize potentials") result = global_wrangler.finalize_potentials(result) + # }}} + + if current_rank == 0: + logger.info("Distributed FMM evaluation completes in {} sec.".format( str(time.time() - start_time) )) return result + +# }}} -- GitLab From 1cfeb4514bde283e7ce662a3f7b5370ee58b4578 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 9 Jul 2018 04:04:10 -0500 Subject: [PATCH 117/260] Integrate test_distributed with pytest --- test/test_distributed.py | 151 ++++++++++++++++++++++++--------------- 1 file changed, 94 insertions(+), 57 deletions(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index 817b8d5..6152146 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -7,86 +7,123 @@ import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler import logging import os +import pytest # Configure logging logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING")) logging.getLogger("boxtree.distributed").setLevel(logging.INFO) -# Parameters -dims = 3 -nsources = 10000 -ntargets = 10000 -dtype = np.float64 -# Get the current rank -comm = MPI.COMM_WORLD -rank = comm.Get_rank() +def _test_distributed(dims, nsources, ntargets, dtype): -# Initialize arguments for worker processes -trav = None -sources_weights = None -HELMHOLTZ_K = 0 + # Get the current rank + comm = MPI.COMM_WORLD + rank = comm.Get_rank() -# Configure PyOpenCL -ctx = cl.create_some_context() -queue = cl.CommandQueue(ctx) + # Initialize arguments for worker processes + trav = None + sources_weights = None + HELMHOLTZ_K = 0 + # Configure PyOpenCL + ctx = cl.create_some_context() + queue = cl.CommandQueue(ctx) -def fmm_level_to_nterms(tree, level): - return max(level, 3) + def fmm_level_to_nterms(tree, level): + return max(level, 3) + # Generate particles and run shared-memory parallelism on rank 0 + if rank == 0: -# Generate particles and run shared-memory parallelism on rank 0 -if rank == 0: + # Generate random particles and source weights + from boxtree.tools import make_normal_particle_array as p_normal + sources = p_normal(queue, nsources, dims, dtype, seed=15) + targets = p_normal(queue, ntargets, dims, dtype, seed=18) - # Generate random particles and source weights - from boxtree.tools import make_normal_particle_array as p_normal - sources = p_normal(queue, nsources, dims, dtype, seed=15) - targets = p_normal(queue, ntargets, dims, dtype, seed=18) + from pyopencl.clrandom import PhiloxGenerator + rng = PhiloxGenerator(queue.context, seed=20) + sources_weights = rng.uniform(queue, nsources, dtype=np.float64).get() - from boxtree.tools import particle_array_to_host - sources_host = particle_array_to_host(sources) - targets_host = particle_array_to_host(targets) + from pyopencl.clrandom import PhiloxGenerator + rng = PhiloxGenerator(queue.context, seed=22) + target_radii = rng.uniform( + queue, ntargets, a=0, b=0.05, dtype=np.float64).get() - from pyopencl.clrandom import PhiloxGenerator - rng = PhiloxGenerator(queue.context, seed=20) - sources_weights = rng.uniform(queue, nsources, dtype=np.float64).get() + # Build the tree and interaction lists + from boxtree import TreeBuilder + tb = TreeBuilder(ctx) + tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, + stick_out_factor=0.25, max_particles_in_box=30, debug=True) - from pyopencl.clrandom import PhiloxGenerator - rng = PhiloxGenerator(queue.context, seed=22) - target_radii = rng.uniform(queue, ntargets, a=0, b=0.05, dtype=np.float64).get() + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(ctx, well_sep_is_n_away=2) + d_trav, _ = tg(queue, tree, debug=True) + trav = d_trav.get(queue=queue) - # Build the tree and interaction lists - from boxtree import TreeBuilder - tb = TreeBuilder(ctx) - tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, - stick_out_factor=0.25, max_particles_in_box=30, debug=True) + # Get pyfmmlib expansion wrangler + wrangler = FMMLibExpansionWrangler( + trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(ctx, well_sep_is_n_away=2) - d_trav, _ = tg(queue, tree, debug=True) - trav = d_trav.get(queue=queue) + # Compute FMM using shared memory parallelism + from boxtree.fmm import drive_fmm + pot_fmm = drive_fmm(trav, wrangler, sources_weights) * 2 * np.pi - # Get pyfmmlib expansion wrangler - wrangler = FMMLibExpansionWrangler( - trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) + # Compute FMM using distributed memory parallelism - # Compute FMM using shared memory parallelism - from boxtree.fmm import drive_fmm - pot_fmm = drive_fmm(trav, wrangler, sources_weights) * 2 * np.pi + def distributed_expansion_wrangler_factory(tree): + return DistributedFMMLibExpansionWrangler( + queue, tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) + distribued_fmm_info = DistributedFMMInfo( + queue, trav, distributed_expansion_wrangler_factory, comm=comm) + pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) -# Compute FMM using distributed memory parallelism + if rank == 0: + error = (la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf) / + la.norm(pot_fmm, ord=np.inf)) + print(error) + assert error < 1e-14 -def distributed_expansion_wrangler_factory(tree): - return DistributedFMMLibExpansionWrangler( - queue, tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) +@pytest.mark.mpi +@pytest.mark.parametrize("num_processes, dims, nsources, ntargets", [ + (4, 3, 10000, 10000) +]) +def test_distributed(num_processes, dims, nsources, ntargets): + pytest.importorskip("mpi4py") -distribued_fmm_info = DistributedFMMInfo( - queue, trav, distributed_expansion_wrangler_factory, comm=comm) -pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) + newenv = os.environ.copy() + newenv["PYTEST"] = "1" + newenv["dims"] = str(dims) + newenv["nsources"] = str(nsources) + newenv["ntargets"] = str(ntargets) -if rank == 0: - print((la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf) / - la.norm(pot_fmm, ord=np.inf))) + import subprocess + import sys + subprocess.run([ + "mpiexec", "-np", str(num_processes), + "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", + sys.executable, __file__], + env=newenv, + check=True + ) + + +if __name__ == "__main__": + + dtype = np.float64 + + if "PYTEST" in os.environ: + # Run pytest test case + dims = int(os.environ["dims"]) + nsources = int(os.environ["nsources"]) + ntargets = int(os.environ["ntargets"]) + + _test_distributed(dims, nsources, ntargets, dtype) + else: + + dims = 3 + nsources = 10000 + ntargets = 10000 + + _test_distributed(dims, nsources, ntargets, dtype) -- GitLab From 2c601d9622524ae59f5e6a3f26f5a6da6502d709 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 9 Jul 2018 10:32:25 -0500 Subject: [PATCH 118/260] Integrate constantone test case into test_distributed --- boxtree/distributed/__init__.py | 5 +- boxtree/distributed/local_tree.py | 5 +- test/test_constantone.py | 240 ------------------------- test/test_distributed.py | 286 ++++++++++++++++++++++++++++-- 4 files changed, 277 insertions(+), 259 deletions(-) delete mode 100644 test/test_constantone.py diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index ce4ae04..1989bd9 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -141,9 +141,10 @@ class DistributedFMMInfo(object): # }}} - def drive_dfmm(self, source_weights): + def drive_dfmm(self, source_weights, _communicate_mpoles_via_allreduce=False): from boxtree.distributed.calculation import calculate_pot return calculate_pot( self.local_wrangler, self.global_wrangler, self.local_trav, - source_weights, self.local_data + source_weights, self.local_data, + _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce ) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 11a96e9..9dfbae9 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -626,8 +626,9 @@ def generate_local_tree(queue, traversal, responsible_boxes_list, local_sources[irank] = local_tree[irank].sources local_tree[irank].sources = None - local_target_radii[irank] = local_tree[irank].target_radii - local_tree[irank].target_radii = None + if tree.targets_have_extent: + local_target_radii[irank] = local_tree[irank].target_radii + local_tree[irank].target_radii = None # }}} diff --git a/test/test_constantone.py b/test/test_constantone.py deleted file mode 100644 index 8eca7c7..0000000 --- a/test/test_constantone.py +++ /dev/null @@ -1,240 +0,0 @@ -import numpy as np -from mpi4py import MPI -from boxtree.distributed import generate_local_tree, generate_local_travs, drive_dfmm - - -class ConstantOneExpansionWrangler(object): - """This implements the 'analytical routines' for a Green's function that is - constant 1 everywhere. For 'charges' of 'ones', this should get every particle - a copy of the particle count. - """ - - def __init__(self, tree): - self.tree = tree - - def multipole_expansion_zeros(self): - return np.zeros(self.tree.nboxes, dtype=np.float64) - - local_expansion_zeros = multipole_expansion_zeros - - def potential_zeros(self): - return np.zeros(self.tree.ntargets, dtype=np.float64) - - def _get_source_slice(self, ibox): - pstart = self.tree.box_source_starts[ibox] - return slice( - pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) - - def _get_target_slice(self, ibox): - pstart = self.tree.box_target_starts[ibox] - return slice( - pstart, pstart + self.tree.box_target_counts_nonchild[ibox]) - - def reorder_sources(self, source_array): - return source_array[self.tree.user_source_ids] - - def reorder_potentials(self, potentials): - return potentials[self.tree.sorted_target_ids] - - def form_multipoles(self, level_start_source_box_nrs, source_boxes, src_weights): - mpoles = self.multipole_expansion_zeros() - for ibox in source_boxes: - pslice = self._get_source_slice(ibox) - mpoles[ibox] += np.sum(src_weights[pslice]) - - return mpoles - - def coarsen_multipoles(self, level_start_source_parent_box_nrs, - source_parent_boxes, mpoles): - tree = self.tree - - # nlevels-1 is the last valid level index - # nlevels-2 is the last valid level that could have children - # - # 3 is the last relevant source_level. - # 2 is the last relevant target_level. - # (because no level 1 box will be well-separated from another) - for source_level in range(tree.nlevels-1, 2, -1): - target_level = source_level - 1 - start, stop = level_start_source_parent_box_nrs[ - target_level:target_level+2] - for ibox in source_parent_boxes[start:stop]: - for child in tree.box_child_ids[:, ibox]: - if child: - mpoles[ibox] += mpoles[child] - - def eval_direct(self, target_boxes, neighbor_sources_starts, - neighbor_sources_lists, src_weights): - pot = self.potential_zeros() - - for itgt_box, tgt_ibox in enumerate(target_boxes): - tgt_pslice = self._get_target_slice(tgt_ibox) - - src_sum = 0 - start, end = neighbor_sources_starts[itgt_box:itgt_box+2] - #print "DIR: %s <- %s" % (tgt_ibox, neighbor_sources_lists[start:end]) - for src_ibox in neighbor_sources_lists[start:end]: - src_pslice = self._get_source_slice(src_ibox) - - src_sum += np.sum(src_weights[src_pslice]) - - pot[tgt_pslice] = src_sum - - return pot - - def multipole_to_local(self, - level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, - starts, lists, mpole_exps): - local_exps = self.local_expansion_zeros() - - for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): - start, end = starts[itgt_box:itgt_box+2] - - contrib = 0 - #print tgt_ibox, "<-", lists[start:end] - for src_ibox in lists[start:end]: - contrib += mpole_exps[src_ibox] - - local_exps[tgt_ibox] += contrib - - return local_exps - - def eval_multipoles(self, level_start_target_box_nrs, target_boxes, - from_sep_smaller_nonsiblings_by_level, mpole_exps): - pot = self.potential_zeros() - - for ssn in from_sep_smaller_nonsiblings_by_level: - for itgt_box, tgt_ibox in enumerate(target_boxes): - tgt_pslice = self._get_target_slice(tgt_ibox) - - contrib = 0 - - start, end = ssn.starts[itgt_box:itgt_box+2] - for src_ibox in ssn.lists[start:end]: - contrib += mpole_exps[src_ibox] - - pot[tgt_pslice] += contrib - - return pot - - def form_locals(self, - level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, starts, lists, src_weights): - local_exps = self.local_expansion_zeros() - - for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): - start, end = starts[itgt_box:itgt_box+2] - - #print "LIST 4", tgt_ibox, "<-", lists[start:end] - contrib = 0 - for src_ibox in lists[start:end]: - src_pslice = self._get_source_slice(src_ibox) - - contrib += np.sum(src_weights[src_pslice]) - - local_exps[tgt_ibox] += contrib - - return local_exps - - def refine_locals(self, level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, local_exps): - - for target_lev in range(1, self.tree.nlevels): - start, stop = level_start_target_or_target_parent_box_nrs[ - target_lev:target_lev+2] - for ibox in target_or_target_parent_boxes[start:stop]: - local_exps[ibox] += local_exps[self.tree.box_parent_ids[ibox]] - - return local_exps - - def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): - pot = self.potential_zeros() - - for ibox in target_boxes: - tgt_pslice = self._get_target_slice(ibox) - pot[tgt_pslice] += local_exps[ibox] - - return pot - - def finalize_potentials(self, potentials): - return potentials - - -# Parameters -dims = 2 -nsources = 100000 -ntargets = 100000 -dtype = np.float64 - -# Get the current rank -comm = MPI.COMM_WORLD -rank = comm.Get_rank() - -# Initialization -trav = None -sources_weights = None -wrangler = None - -# Generate particles and run shared-memory parallelism on rank 0 -if rank == 0: - # Configure PyOpenCL - import pyopencl as cl - ctx = cl.create_some_context() - queue = cl.CommandQueue(ctx) - print(queue.context.devices) - - # Generate random particles and source weights - from boxtree.tools import make_normal_particle_array as p_normal - sources = p_normal(queue, nsources, dims, dtype, seed=15) - targets = (p_normal(queue, ntargets, dims, dtype, seed=18) + - np.array([2, 0, 0])[:dims]) - - from boxtree.tools import particle_array_to_host - sources_host = particle_array_to_host(sources) - targets_host = particle_array_to_host(targets) - - from pyopencl.clrandom import PhiloxGenerator - rng = PhiloxGenerator(queue.context, seed=20) - # sources_weights = rng.uniform(queue, nsources, dtype=np.float64).get() - sources_weights = np.ones((nsources,)) - - # Build the tree and interaction lists - from boxtree import TreeBuilder - tb = TreeBuilder(ctx) - tree, _ = tb(queue, sources, targets=targets, max_particles_in_box=30, - debug=True) - - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(ctx) - d_trav, _ = tg(queue, tree, debug=True) - trav = d_trav.get(queue=queue) - - wrangler = ConstantOneExpansionWrangler(trav.tree) - - # Compute FMM using shared memory parallelism - from boxtree.fmm import drive_fmm - pot_fmm = drive_fmm(trav, wrangler, sources_weights) - -local_tree, local_src_weights, local_target, box_bounding_box = \ - generate_local_tree(trav, sources_weights) - -trav_local, trav_global = generate_local_travs(local_tree, local_src_weights, - box_bounding_box) - -local_wrangler = ConstantOneExpansionWrangler(local_tree) - -if rank == 0: - global_wrangler = ConstantOneExpansionWrangler(trav.tree) -else: - global_wrangler = None - -pot_dfmm = drive_dfmm( - local_wrangler, trav_local, trav_global, local_src_weights, global_wrangler, - local_target["mask"], local_target["scan"], local_target["size"], - _communicate_mpoles_via_allreduce=True -) - -if rank == 0: - assert(np.all(pot_fmm == nsources)) - assert(np.all(pot_dfmm == nsources)) diff --git a/test/test_distributed.py b/test/test_distributed.py index 6152146..6044dbf 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -14,7 +14,7 @@ logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING")) logging.getLogger("boxtree.distributed").setLevel(logging.INFO) -def _test_distributed(dims, nsources, ntargets, dtype): +def _test_against_shared(dims, nsources, ntargets, dtype): # Get the current rank comm = MPI.COMM_WORLD @@ -23,7 +23,7 @@ def _test_distributed(dims, nsources, ntargets, dtype): # Initialize arguments for worker processes trav = None sources_weights = None - HELMHOLTZ_K = 0 + helmholtz_k = 0 # Configure PyOpenCL ctx = cl.create_some_context() @@ -62,7 +62,7 @@ def _test_distributed(dims, nsources, ntargets, dtype): # Get pyfmmlib expansion wrangler wrangler = FMMLibExpansionWrangler( - trav.tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) + trav.tree, helmholtz_k, fmm_level_to_nterms=fmm_level_to_nterms) # Compute FMM using shared memory parallelism from boxtree.fmm import drive_fmm @@ -72,7 +72,7 @@ def _test_distributed(dims, nsources, ntargets, dtype): def distributed_expansion_wrangler_factory(tree): return DistributedFMMLibExpansionWrangler( - queue, tree, HELMHOLTZ_K, fmm_level_to_nterms=fmm_level_to_nterms) + queue, tree, helmholtz_k, fmm_level_to_nterms=fmm_level_to_nterms) distribued_fmm_info = DistributedFMMInfo( queue, trav, distributed_expansion_wrangler_factory, comm=comm) @@ -89,7 +89,7 @@ def _test_distributed(dims, nsources, ntargets, dtype): @pytest.mark.parametrize("num_processes, dims, nsources, ntargets", [ (4, 3, 10000, 10000) ]) -def test_distributed(num_processes, dims, nsources, ntargets): +def test_against_shared(num_processes, dims, nsources, ntargets): pytest.importorskip("mpi4py") newenv = os.environ.copy() @@ -109,21 +109,277 @@ def test_distributed(num_processes, dims, nsources, ntargets): ) +# {{{ Constantone expansion wrangler + +class ConstantOneExpansionWrangler(object): + """This implements the 'analytical routines' for a Green's function that is + constant 1 everywhere. For 'charges' of 'ones', this should get every particle + a copy of the particle count. + """ + + def __init__(self, tree): + self.tree = tree + + def multipole_expansion_zeros(self): + return np.zeros(self.tree.nboxes, dtype=np.float64) + + local_expansion_zeros = multipole_expansion_zeros + + def potential_zeros(self): + return np.zeros(self.tree.ntargets, dtype=np.float64) + + def _get_source_slice(self, ibox): + pstart = self.tree.box_source_starts[ibox] + return slice( + pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) + + def _get_target_slice(self, ibox): + pstart = self.tree.box_target_starts[ibox] + return slice( + pstart, pstart + self.tree.box_target_counts_nonchild[ibox]) + + def reorder_sources(self, source_array): + return source_array[self.tree.user_source_ids] + + def reorder_potentials(self, potentials): + return potentials[self.tree.sorted_target_ids] + + def form_multipoles(self, level_start_source_box_nrs, source_boxes, src_weights): + mpoles = self.multipole_expansion_zeros() + for ibox in source_boxes: + pslice = self._get_source_slice(ibox) + mpoles[ibox] += np.sum(src_weights[pslice]) + + return mpoles + + def coarsen_multipoles(self, level_start_source_parent_box_nrs, + source_parent_boxes, mpoles): + tree = self.tree + + # nlevels-1 is the last valid level index + # nlevels-2 is the last valid level that could have children + # + # 3 is the last relevant source_level. + # 2 is the last relevant target_level. + # (because no level 1 box will be well-separated from another) + for source_level in range(tree.nlevels-1, 2, -1): + target_level = source_level - 1 + start, stop = level_start_source_parent_box_nrs[ + target_level:target_level+2] + for ibox in source_parent_boxes[start:stop]: + for child in tree.box_child_ids[:, ibox]: + if child: + mpoles[ibox] += mpoles[child] + + def eval_direct(self, target_boxes, neighbor_sources_starts, + neighbor_sources_lists, src_weights): + pot = self.potential_zeros() + + for itgt_box, tgt_ibox in enumerate(target_boxes): + tgt_pslice = self._get_target_slice(tgt_ibox) + + src_sum = 0 + start, end = neighbor_sources_starts[itgt_box:itgt_box+2] + #print "DIR: %s <- %s" % (tgt_ibox, neighbor_sources_lists[start:end]) + for src_ibox in neighbor_sources_lists[start:end]: + src_pslice = self._get_source_slice(src_ibox) + + src_sum += np.sum(src_weights[src_pslice]) + + pot[tgt_pslice] = src_sum + + return pot + + def multipole_to_local(self, + level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, + starts, lists, mpole_exps): + local_exps = self.local_expansion_zeros() + + for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): + start, end = starts[itgt_box:itgt_box+2] + + contrib = 0 + #print tgt_ibox, "<-", lists[start:end] + for src_ibox in lists[start:end]: + contrib += mpole_exps[src_ibox] + + local_exps[tgt_ibox] += contrib + + return local_exps + + def eval_multipoles(self, + target_boxes_by_source_level, from_sep_smaller_nonsiblings_by_level, + mpole_exps): + pot = self.potential_zeros() + + for level, ssn in enumerate(from_sep_smaller_nonsiblings_by_level): + for itgt_box, tgt_ibox in \ + enumerate(target_boxes_by_source_level[level]): + tgt_pslice = self._get_target_slice(tgt_ibox) + + contrib = 0 + + start, end = ssn.starts[itgt_box:itgt_box+2] + for src_ibox in ssn.lists[start:end]: + contrib += mpole_exps[src_ibox] + + pot[tgt_pslice] += contrib + + return pot + + def form_locals(self, + level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, starts, lists, src_weights): + local_exps = self.local_expansion_zeros() + + for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): + start, end = starts[itgt_box:itgt_box+2] + + #print "LIST 4", tgt_ibox, "<-", lists[start:end] + contrib = 0 + for src_ibox in lists[start:end]: + src_pslice = self._get_source_slice(src_ibox) + + contrib += np.sum(src_weights[src_pslice]) + + local_exps[tgt_ibox] += contrib + + return local_exps + + def refine_locals(self, level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, local_exps): + + for target_lev in range(1, self.tree.nlevels): + start, stop = level_start_target_or_target_parent_box_nrs[ + target_lev:target_lev+2] + for ibox in target_or_target_parent_boxes[start:stop]: + local_exps[ibox] += local_exps[self.tree.box_parent_ids[ibox]] + + return local_exps + + def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): + pot = self.potential_zeros() + + for ibox in target_boxes: + tgt_pslice = self._get_target_slice(ibox) + pot[tgt_pslice] += local_exps[ibox] + + return pot + + def finalize_potentials(self, potentials): + return potentials + +# }}} + + +def _test_constantone(dims, nsources, ntargets, dtype): + + # Get the current rank + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + + # Initialization + trav = None + sources_weights = None + + # Configure PyOpenCL + import pyopencl as cl + ctx = cl.create_some_context() + queue = cl.CommandQueue(ctx) + print(queue.context.devices) + + if rank == 0: + + # Generate random particles + from boxtree.tools import make_normal_particle_array as p_normal + sources = p_normal(queue, nsources, dims, dtype, seed=15) + targets = (p_normal(queue, ntargets, dims, dtype, seed=18) + + np.array([2, 0, 0])[:dims]) + + # Constant one source weights + sources_weights = np.ones((nsources,), dtype=dtype) + + # Build the global tree + from boxtree import TreeBuilder + tb = TreeBuilder(ctx) + tree, _ = tb(queue, sources, targets=targets, max_particles_in_box=30, + debug=True) + + # Build global interaction lists + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(ctx) + d_trav, _ = tg(queue, tree, debug=True) + trav = d_trav.get(queue=queue) + + def constantone_expansion_wrangler_factory(tree): + return ConstantOneExpansionWrangler(tree) + + from boxtree.distributed import DistributedFMMInfo + distributed_fmm_info = DistributedFMMInfo( + queue, trav, constantone_expansion_wrangler_factory, comm=MPI.COMM_WORLD + ) + + pot_dfmm = distributed_fmm_info.drive_dfmm( + sources_weights, _communicate_mpoles_via_allreduce=True + ) + + if rank == 0: + assert (np.all(pot_dfmm == nsources)) + + +@pytest.mark.mpi +@pytest.mark.parametrize("num_processes, dims, nsources, ntargets", [ + (4, 3, 10000, 10000) +]) +def test_constantone(num_processes, dims, nsources, ntargets): + pytest.importorskip("mpi4py") + + newenv = os.environ.copy() + newenv["PYTEST"] = "2" + newenv["dims"] = str(dims) + newenv["nsources"] = str(nsources) + newenv["ntargets"] = str(ntargets) + + import subprocess + import sys + subprocess.run([ + "mpiexec", "-np", str(num_processes), + "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", + sys.executable, __file__], + env=newenv, + check=True + ) + + if __name__ == "__main__": dtype = np.float64 if "PYTEST" in os.environ: - # Run pytest test case - dims = int(os.environ["dims"]) - nsources = int(os.environ["nsources"]) - ntargets = int(os.environ["ntargets"]) + if os.environ["PYTEST"] == "1": + # Run "test_against_shared" test case + dims = int(os.environ["dims"]) + nsources = int(os.environ["nsources"]) + ntargets = int(os.environ["ntargets"]) - _test_distributed(dims, nsources, ntargets, dtype) - else: + _test_against_shared(dims, nsources, ntargets, dtype) - dims = 3 - nsources = 10000 - ntargets = 10000 + elif os.environ["PYTEST"] == "2": + # Run "test_constantone" test case + dims = int(os.environ["dims"]) + nsources = int(os.environ["nsources"]) + ntargets = int(os.environ["ntargets"]) + + _test_constantone(dims, nsources, ntargets, dtype) + + else: + import sys + if len(sys.argv) > 1: + exec(sys.argv[1]) + elif len(sys.argv) == 1: + dims = 3 + nsources = 10000 + ntargets = 10000 - _test_distributed(dims, nsources, ntargets, dtype) + _test_against_shared(dims, nsources, ntargets, dtype) -- GitLab From 835f9aff546eac51ec22856b3bfabfb1affbf285 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 10 Jul 2018 03:00:16 -0500 Subject: [PATCH 119/260] Improve doc --- boxtree/distributed/calculation.py | 2 +- test/test_distributed.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index e0a9d02..d16050e 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -317,7 +317,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, :param global_wrangler: Expansion wrangler on root process for assembling partial results from worker processes together. This argument differs from :arg local_wrangler by referening the global tree instead of local trees. - This argument is None on worker process. + This argument is None on worker processes. :param local_trav: Local traversal object returned from generate_local_travs. :param source_weights: Source weights for FMM. None on worker processes. :param local_data: LocalData object returned from generate_local_tree. diff --git a/test/test_distributed.py b/test/test_distributed.py index 6044dbf..b320c0c 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -375,9 +375,16 @@ if __name__ == "__main__": else: import sys + if len(sys.argv) > 1: + + # You can test individual routines by typing + # $ python test_distributed.py 'test_constantone(4, 3, 10000, 10000)' exec(sys.argv[1]) + elif len(sys.argv) == 1: + + # Run against_shared test case with default parameter dims = 3 nsources = 10000 ntargets = 10000 -- GitLab From 87cb89efe5eda39cf4c07d082b4a5911b248dcc9 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 12 Jul 2018 18:04:18 -0500 Subject: [PATCH 120/260] Add performance model for form_multipole --- boxtree/fmm.py | 89 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 54a1649..ee03a3c 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -26,6 +26,8 @@ import logging logger = logging.getLogger(__name__) from pytools import ProcessLogger, Record +import pyopencl as cl +import numpy as np def drive_fmm(traversal, expansion_wrangler, src_weights, timing_data=None): @@ -430,4 +432,91 @@ class TimingRecorder(object): # }}} +def calculate_nsources_by_level(tree): + nsources_by_level = np.empty((tree.nlevels,), dtype=np.int32) + + for ilevel in range(tree.nlevels): + start_ibox = tree.level_start_box_nrs[ilevel] + end_ibox = tree.level_start_box_nrs[ilevel + 1] + count = 0 + + for ibox in range(start_ibox, end_ibox): + count += tree.box_source_counts_nonchild[ibox] + + nsources_by_level[ilevel] = count + + return nsources_by_level + + +class PerformanceModel: + + def __init__(self, cl_context, wrangler_factory, uses_pde_expansions): + self.cl_context = cl_context + self.wrangler_factory = wrangler_factory + self.uses_pde_expansions = uses_pde_expansions + + self.time_result = [] + + from pyopencl.clrandom import PhiloxGenerator + self.rng = PhiloxGenerator(cl_context) + + def time_performance(self, traversal): + # Calculate "nterms_fmm_total" + dimensions = traversal.tree.dimensions + wrangler = self.wrangler_factory(traversal.tree) + nsources_by_level = calculate_nsources_by_level(traversal.tree) + + level_nterms = wrangler.level_nterms + + if self.uses_pde_expansions: + ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) + else: + ncoeffs_fmm_by_level = level_nterms ** dimensions + + nterms_fmm_total = np.sum(nsources_by_level * ncoeffs_fmm_by_level) + + # Record useful metadata for assembling performance data + timing_data = { + "nterms_fmm_total": nterms_fmm_total + } + + # Generate random source weights + with cl.CommandQueue(self.cl_context) as queue: + source_weights = self.rng.uniform( + queue, + traversal.tree.nsources, + traversal.tree.coord_dtype + ).get() + + # Time a FMM run + drive_fmm(traversal, wrangler, source_weights, timing_data=timing_data) + + self.time_result.append(timing_data) + + def form_multipole_model(self): + nresult = len(self.time_result) + + if nresult < 1: + raise RuntimeError("Please run FMM at lease once using time_performance" + "before forming models.") + elif nresult == 1: + result = self.time_result[0] + wall_elapsed_time = result["form_multipoles"].wall_elapsed + nterm_fmm_total = result["nterms_fmm_total"] + return wall_elapsed_time / nterm_fmm_total, 0.0 + else: + wall_elapsed_time = np.empty((nresult,), dtype=float) + coeff_matrix = np.empty((nresult, 2), dtype=float) + + for iresult, result in enumerate(self.time_result): + wall_elapsed_time[iresult] = result["form_multipoles"].wall_elapsed + coeff_matrix[iresult, 0] = result["nterms_fmm_total"] + + coeff_matrix[:, 1] = 1 + + from numpy.linalg import lstsq + coeff = lstsq(coeff_matrix, wall_elapsed_time, rcond=-1)[0] + + return coeff[0], coeff[1] + # vim: filetype=pyopencl:fdm=marker -- GitLab From 53882035911a19867b0f8610359cf5b2160a9482 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 13 Jul 2018 05:32:42 -0500 Subject: [PATCH 121/260] Correction for timing API --- boxtree/distributed/calculation.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index d16050e..80beef7 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -354,7 +354,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, mpole_exps = local_wrangler.form_multipoles( local_trav.level_start_source_box_nrs, local_trav.source_boxes, - local_src_weights) + local_src_weights)[0] # }}} @@ -388,7 +388,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, local_trav.target_boxes, local_trav.neighbor_source_boxes_starts, local_trav.neighbor_source_boxes_lists, - local_src_weights) + local_src_weights)[0] # these potentials are called alpha in [1] @@ -402,7 +402,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, local_trav.target_or_target_parent_boxes, local_trav.from_sep_siblings_starts, local_trav.from_sep_siblings_lists, - mpole_exps) + mpole_exps)[0] # local_exps represents both Gamma and Delta in [1] @@ -418,7 +418,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, potentials = potentials + local_wrangler.eval_multipoles( local_trav.target_boxes_sep_smaller_by_source_level, local_trav.from_sep_smaller_by_level, - mpole_exps) + mpole_exps)[0] # these potentials are called beta in [1] @@ -429,7 +429,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, local_trav.target_boxes, local_trav.from_sep_close_smaller_starts, local_trav.from_sep_close_smaller_lists, - local_src_weights) + local_src_weights)[0] # }}} @@ -442,7 +442,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, local_trav.target_or_target_parent_boxes, local_trav.from_sep_bigger_starts, local_trav.from_sep_bigger_lists, - local_src_weights) + local_src_weights)[0] if local_trav.from_sep_close_bigger_starts is not None: logger.debug("evaluate separated close bigger interactions directly " @@ -452,7 +452,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, local_trav.target_or_target_parent_boxes, local_trav.from_sep_close_bigger_starts, local_trav.from_sep_close_bigger_lists, - local_src_weights) + local_src_weights)[0] # }}} @@ -473,7 +473,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, potentials = potentials + local_wrangler.eval_locals( local_trav.level_start_target_box_nrs, local_trav.target_boxes, - local_exps) + local_exps)[0] # }}} -- GitLab From ca451a87a3a4e4a95022cb23929a47f9b3ab4524 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 16 Jul 2018 15:01:13 -0500 Subject: [PATCH 122/260] Add no_targets option --- boxtree/distributed/local_tree.py | 46 +++++++++++++++++++------------ 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 9dfbae9..19c4423 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -569,7 +569,8 @@ class LocalTree(Tree): def generate_local_tree(queue, traversal, responsible_boxes_list, - responsible_box_query, comm=MPI.COMM_WORLD): + responsible_box_query, comm=MPI.COMM_WORLD, + no_targets=False): # Get MPI information current_rank = comm.Get_rank() @@ -640,13 +641,15 @@ def generate_local_tree(queue, traversal, responsible_boxes_list, particles_req.append(comm.Isend( local_sources[irank], dest=irank, tag=MPITags["DIST_SOURCES"])) - particles_req.append(comm.Isend( - local_targets[irank], dest=irank, tag=MPITags["DIST_TARGETS"])) - - if tree.targets_have_extent: + if not no_targets: particles_req.append(comm.Isend( - local_target_radii[irank], dest=irank, tag=MPITags["DIST_RADII"]) - ) + local_targets[irank], dest=irank, tag=MPITags["DIST_TARGETS"])) + + if tree.targets_have_extent: + particles_req.append(comm.Isend( + local_target_radii[irank], dest=irank, + tag=MPITags["DIST_RADII"]) + ) from boxtree.tools import MaskCompressorKernel matcompr = MaskCompressorKernel(queue.context) @@ -682,20 +685,27 @@ def generate_local_tree(queue, traversal, responsible_boxes_list, reqs.append(comm.Irecv( local_tree.sources, source=0, tag=MPITags["DIST_SOURCES"])) - local_tree.targets = np.empty( - (local_tree.dimensions, local_tree.ntargets), - dtype=local_tree.coord_dtype - ) - reqs.append(comm.Irecv( - local_tree.targets, source=0, tag=MPITags["DIST_TARGETS"])) - - if local_tree.targets_have_extent: - local_tree.target_radii = np.empty( - (local_tree.ntargets,), + if no_targets: + local_tree.targets = None + if local_tree.targets_have_extent: + local_tree.target_radii = None + else: + local_tree.targets = np.empty( + (local_tree.dimensions, local_tree.ntargets), dtype=local_tree.coord_dtype ) + reqs.append(comm.Irecv( - local_tree.target_radii, source=0, tag=MPITags["DIST_RADII"])) + local_tree.targets, source=0, tag=MPITags["DIST_TARGETS"])) + + if local_tree.targets_have_extent: + local_tree.target_radii = np.empty( + (local_tree.ntargets,), + dtype=local_tree.coord_dtype + ) + + reqs.append(comm.Irecv( + local_tree.target_radii, source=0, tag=MPITags["DIST_RADII"])) MPI.Request.Waitall(reqs) -- GitLab From 40bbd4b81a11d48d7f74a46f4e176d2e11d3bd41 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 17 Jul 2018 09:24:46 -0500 Subject: [PATCH 123/260] Refactor total FMM terms computation --- boxtree/fmm.py | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index ee03a3c..3a18afb 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -433,6 +433,10 @@ class TimingRecorder(object): def calculate_nsources_by_level(tree): + """ + :return: A numpy array of share (tree.nlevels,) such that the ith index documents + the number of sources on level i. + """ nsources_by_level = np.empty((tree.nlevels,), dtype=np.int32) for ilevel in range(tree.nlevels): @@ -461,23 +465,11 @@ class PerformanceModel: self.rng = PhiloxGenerator(cl_context) def time_performance(self, traversal): - # Calculate "nterms_fmm_total" - dimensions = traversal.tree.dimensions wrangler = self.wrangler_factory(traversal.tree) - nsources_by_level = calculate_nsources_by_level(traversal.tree) - - level_nterms = wrangler.level_nterms - - if self.uses_pde_expansions: - ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) - else: - ncoeffs_fmm_by_level = level_nterms ** dimensions - - nterms_fmm_total = np.sum(nsources_by_level * ncoeffs_fmm_by_level) # Record useful metadata for assembling performance data timing_data = { - "nterms_fmm_total": nterms_fmm_total + "nterms_fmm_total": self._calculate_nters_fmm_total(wrangler) } # Generate random source weights @@ -519,4 +511,23 @@ class PerformanceModel: return coeff[0], coeff[1] + def _calculate_nters_fmm_total(self, wrangler): + """ + :return: total number of terms formed during form_multipole + """ + dimensions = wrangler.tree.dimensions + + # Calculate "nterms_fmm_total" + nsources_by_level = calculate_nsources_by_level(wrangler.tree) + level_nterms = wrangler.level_nterms + + if self.uses_pde_expansions: + ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) + else: + ncoeffs_fmm_by_level = level_nterms ** dimensions + + nterms_fmm_total = np.sum(nsources_by_level * ncoeffs_fmm_by_level) + + return nterms_fmm_total + # vim: filetype=pyopencl:fdm=marker -- GitLab From 689a2a3af95a86ad584cfaa73024ce17c873ad6d Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 17 Jul 2018 10:45:25 -0500 Subject: [PATCH 124/260] Count the workload of direct evaluation --- boxtree/fmm.py | 95 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 76 insertions(+), 19 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 3a18afb..5963656 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -432,24 +432,80 @@ class TimingRecorder(object): # }}} -def calculate_nsources_by_level(tree): - """ - :return: A numpy array of share (tree.nlevels,) such that the ith index documents - the number of sources on level i. - """ - nsources_by_level = np.empty((tree.nlevels,), dtype=np.int32) +class PerformanceCounter: + + def __init__(self, traversal): + self.traversal = traversal + + def count_nsources_by_level(self): + """ + :return: A numpy array of share (tree.nlevels,) such that the ith index + documents the number of sources on level i. + """ + tree = self.traversal.tree + + nsources_by_level = np.empty((tree.nlevels,), dtype=np.int32) + + for ilevel in range(tree.nlevels): + start_ibox = tree.level_start_box_nrs[ilevel] + end_ibox = tree.level_start_box_nrs[ilevel + 1] + count = 0 + + for ibox in range(start_ibox, end_ibox): + count += tree.box_source_counts_nonchild[ibox] + + nsources_by_level[ilevel] = count + + return nsources_by_level + + def count_direct(self, use_global_idx=False): + """ + :return: If *use_global_idx* is True, return a numpy array of shape + (tree.nboxes,) such that the ith entry represents the workload from + direct evaluation on box i. If *use_global_idx* is False, return a numpy + array of shape (ntarget_boxes,) such that the ith entry represents the + workload on *target_boxes* i. + """ + traversal = self.traversal + tree = traversal.tree + + if use_global_idx: + direct_workload = np.zeros((tree.nboxes,), dtype=np.int64) + else: + ntarget_boxes = len(traversal.target_boxes) + direct_workload = np.zeros((ntarget_boxes,), dtype=np.int64) + + for itgt_box, tgt_ibox in enumerate(traversal.target_boxes): + ntargets = traversal.box_target_counts_nonchild[tgt_ibox] + nsources = 0 + + start, end = traversal.neighbor_source_boxes_starts[itgt_box:itgt_box+2] + + for src_ibox in traversal.neighbor_source_boxes_lists[start:end]: + nsources += tree.box_source_counts_nonchild[src_ibox] + + if traversal.from_sep_close_smaller_starts is not None: + start, end = ( + traversal.from_sep_close_smaller_starts[itgt_box:itgt_box+2]) + + for src_ibox in traversal.from_sep_close_smaller_lists[start:end]: + nsources += tree.box_source_counts_nonchild[src_ibox] + + if traversal.from_sep_close_bigger_starts is not None: + start, end = ( + traversal.from_sep_close_bigger_starts[itgt_box:itgt_box+2]) - for ilevel in range(tree.nlevels): - start_ibox = tree.level_start_box_nrs[ilevel] - end_ibox = tree.level_start_box_nrs[ilevel + 1] - count = 0 + for src_ibox in traversal.from_sep_close_bigger_lists[start:end]: + nsources += tree.box_source_counts_nonchild[src_ibox] - for ibox in range(start_ibox, end_ibox): - count += tree.box_source_counts_nonchild[ibox] + count = nsources * ntargets - nsources_by_level[ilevel] = count + if use_global_idx: + direct_workload[tgt_ibox] = count + else: + direct_workload[itgt_box] = count - return nsources_by_level + return direct_workload class PerformanceModel: @@ -466,10 +522,12 @@ class PerformanceModel: def time_performance(self, traversal): wrangler = self.wrangler_factory(traversal.tree) + counter = PerformanceCounter(traversal) # Record useful metadata for assembling performance data timing_data = { - "nterms_fmm_total": self._calculate_nters_fmm_total(wrangler) + "nterms_fmm_total": self._calculate_nters_fmm_total(wrangler, counter), + "direct_workload": np.sum(counter.count_direct()) } # Generate random source weights @@ -511,14 +569,13 @@ class PerformanceModel: return coeff[0], coeff[1] - def _calculate_nters_fmm_total(self, wrangler): + def _calculate_nters_fmm_total(self, wrangler, counter): """ - :return: total number of terms formed during form_multipole + :return: total number of terms formed across all levels during form_multipole """ dimensions = wrangler.tree.dimensions - # Calculate "nterms_fmm_total" - nsources_by_level = calculate_nsources_by_level(wrangler.tree) + nsources_by_level = counter.count_nsources_by_level(wrangler.tree) level_nterms = wrangler.level_nterms if self.uses_pde_expansions: -- GitLab From 2d79680b4b54275b20846b9097d3922ebc5973d4 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 17 Jul 2018 17:10:46 -0500 Subject: [PATCH 125/260] Refactor linear regression, add eval_direct model --- boxtree/fmm.py | 73 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 5963656..cb0fa66 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -476,7 +476,7 @@ class PerformanceCounter: direct_workload = np.zeros((ntarget_boxes,), dtype=np.int64) for itgt_box, tgt_ibox in enumerate(traversal.target_boxes): - ntargets = traversal.box_target_counts_nonchild[tgt_ibox] + ntargets = tree.box_target_counts_nonchild[tgt_ibox] nsources = 0 start, end = traversal.neighbor_source_boxes_starts[itgt_box:itgt_box+2] @@ -543,31 +543,13 @@ class PerformanceModel: self.time_result.append(timing_data) - def form_multipole_model(self): - nresult = len(self.time_result) - - if nresult < 1: - raise RuntimeError("Please run FMM at lease once using time_performance" - "before forming models.") - elif nresult == 1: - result = self.time_result[0] - wall_elapsed_time = result["form_multipoles"].wall_elapsed - nterm_fmm_total = result["nterms_fmm_total"] - return wall_elapsed_time / nterm_fmm_total, 0.0 - else: - wall_elapsed_time = np.empty((nresult,), dtype=float) - coeff_matrix = np.empty((nresult, 2), dtype=float) - - for iresult, result in enumerate(self.time_result): - wall_elapsed_time[iresult] = result["form_multipoles"].wall_elapsed - coeff_matrix[iresult, 0] = result["nterms_fmm_total"] - - coeff_matrix[:, 1] = 1 - - from numpy.linalg import lstsq - coeff = lstsq(coeff_matrix, wall_elapsed_time, rcond=-1)[0] + def form_multipoles_model(self, wall_time=True): + return self._linear_regression("nterms_fmm_total", "form_multipoles", + wall_time=wall_time) - return coeff[0], coeff[1] + def eval_direct_model(self, wall_time=True): + return self._linear_regression("direct_workload", "eval_direct", + wall_time=wall_time) def _calculate_nters_fmm_total(self, wrangler, counter): """ @@ -575,7 +557,7 @@ class PerformanceModel: """ dimensions = wrangler.tree.dimensions - nsources_by_level = counter.count_nsources_by_level(wrangler.tree) + nsources_by_level = counter.count_nsources_by_level() level_nterms = wrangler.level_nterms if self.uses_pde_expansions: @@ -587,4 +569,43 @@ class PerformanceModel: return nterms_fmm_total + def _linear_regression(self, x_name, y_name, wall_time=True): + nresult = len(self.time_result) + + if nresult < 1: + raise RuntimeError("Please run FMM at lease once using time_performance" + "before forming models.") + elif nresult == 1: + result = self.time_result[0] + + if wall_time: + dependent_value = result[y_name].wall_elapsed + else: + dependent_value = result[y_name].process_elapsed + + independent_value = result[x_name] + return dependent_value / independent_value, 0.0 + else: + dependent_value = np.empty((nresult,), dtype=float) + coeff_matrix = np.empty((nresult, 2), dtype=float) + + for iresult, result in enumerate(self.time_result): + if wall_time: + dependent_value[iresult] = result[y_name].wall_elapsed + else: + dependent_value[iresult] = result[y_name].process_elapsed + + coeff_matrix[iresult, 0] = result[x_name] + + coeff_matrix[:, 1] = 1 + + from numpy.linalg import lstsq + coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] + + print(coeff_matrix) + print(dependent_value) + + return coeff[0], coeff[1] + + # vim: filetype=pyopencl:fdm=marker -- GitLab From b3853b2ed5975f9f5868bb705b593cdc18c07ab3 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 18 Jul 2018 10:35:07 -0500 Subject: [PATCH 126/260] Extend linear regression to multiple variables --- boxtree/fmm.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index cb0fa66..3521711 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -544,11 +544,11 @@ class PerformanceModel: self.time_result.append(timing_data) def form_multipoles_model(self, wall_time=True): - return self._linear_regression("nterms_fmm_total", "form_multipoles", + return self._linear_regression("form_multipoles", ["nterms_fmm_total"], wall_time=wall_time) def eval_direct_model(self, wall_time=True): - return self._linear_regression("direct_workload", "eval_direct", + return self._linear_regression("eval_direct", ["direct_workload"], wall_time=wall_time) def _calculate_nters_fmm_total(self, wrangler, counter): @@ -569,8 +569,13 @@ class PerformanceModel: return nterms_fmm_total - def _linear_regression(self, x_name, y_name, wall_time=True): + def _linear_regression(self, y_name, x_name, wall_time=True): + """ + :arg y_name: Name of the depedent variable + :arg x_name: A list of names of independent variables + """ nresult = len(self.time_result) + nvariables = len(x_name) if nresult < 1: raise RuntimeError("Please run FMM at lease once using time_performance" @@ -583,11 +588,13 @@ class PerformanceModel: else: dependent_value = result[y_name].process_elapsed - independent_value = result[x_name] - return dependent_value / independent_value, 0.0 + independent_value = result[x_name[0]] + coeff = dependent_value / independent_value + + return (coeff,) + tuple(0.0 for _ in range(nvariables - 1)) else: dependent_value = np.empty((nresult,), dtype=float) - coeff_matrix = np.empty((nresult, 2), dtype=float) + coeff_matrix = np.empty((nresult, nvariables + 1), dtype=float) for iresult, result in enumerate(self.time_result): if wall_time: @@ -595,9 +602,10 @@ class PerformanceModel: else: dependent_value[iresult] = result[y_name].process_elapsed - coeff_matrix[iresult, 0] = result[x_name] + for icol, variable_name in enumerate(x_name): + coeff_matrix[iresult, icol] = result[variable_name] - coeff_matrix[:, 1] = 1 + coeff_matrix[:, -1] = 1 from numpy.linalg import lstsq coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] @@ -605,7 +613,7 @@ class PerformanceModel: print(coeff_matrix) print(dependent_value) - return coeff[0], coeff[1] + return coeff # vim: filetype=pyopencl:fdm=marker -- GitLab From 5077f8cb6852df261b93660fcecc0b49be23f5a2 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 19 Jul 2018 11:05:59 -0500 Subject: [PATCH 127/260] Refactor FMM parameters --- boxtree/fmm.py | 71 +++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 59 insertions(+), 12 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 3521711..ed24c02 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -28,6 +28,7 @@ logger = logging.getLogger(__name__) from pytools import ProcessLogger, Record import pyopencl as cl import numpy as np +from collections import namedtuple def drive_fmm(traversal, expansion_wrangler, src_weights, timing_data=None): @@ -508,6 +509,15 @@ class PerformanceCounter: return direct_workload +FMMParameters = namedtuple( + "FMMParameters", + ['ncoeffs_fmm_by_level', + 'translation_source_power', + 'translation_target_power', + 'translation_max_power'] +) + + class PerformanceModel: def __init__(self, cl_context, wrangler_factory, uses_pde_expansions): @@ -522,12 +532,20 @@ class PerformanceModel: def time_performance(self, traversal): wrangler = self.wrangler_factory(traversal.tree) + counter = PerformanceCounter(traversal) + parameters = self.get_fmm_parameters( + traversal.tree.dimensions, + self.uses_pde_expansions, + wrangler.level_nterms + ) + # Record useful metadata for assembling performance data timing_data = { - "nterms_fmm_total": self._calculate_nters_fmm_total(wrangler, counter), - "direct_workload": np.sum(counter.count_direct()) + "nterms_fmm_total": self.calculate_nters_fmm_total(counter, parameters), + "direct_workload": np.sum(counter.count_direct()), + "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1] } # Generate random source weights @@ -548,22 +566,51 @@ class PerformanceModel: wall_time=wall_time) def eval_direct_model(self, wall_time=True): - return self._linear_regression("eval_direct", ["direct_workload"], - wall_time=wall_time) + return self._linear_regression( + "eval_direct", + ["direct_workload", "direct_nsource_boxes"], + wall_time=wall_time) + + @staticmethod + def get_fmm_parameters(dimensions, use_pde_expansions, level_nterms): + if use_pde_expansions: + ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) + + if dimensions == 2: + translation_source_power = 1 + translation_target_power = 1 + translation_max_power = 0 + elif dimensions == 3: + # Based on a reading of FMMlib, i.e. a point-and-shoot FMM. + translation_source_power = 0 + translation_target_power = 0 + translation_max_power = 3 + else: + raise ValueError("Don't know how to estimate expansion complexities " + "for dimension %d" % dimensions) - def _calculate_nters_fmm_total(self, wrangler, counter): + else: + ncoeffs_fmm_by_level = level_nterms ** dimensions + + translation_source_power = dimensions + translation_target_power = dimensions + translation_max_power = 0 + + return FMMParameters( + ncoeffs_fmm_by_level=ncoeffs_fmm_by_level, + translation_source_power=translation_source_power, + translation_target_power=translation_target_power, + translation_max_power=translation_max_power + ) + + @staticmethod + def calculate_nters_fmm_total(counter, parameters): """ :return: total number of terms formed across all levels during form_multipole """ - dimensions = wrangler.tree.dimensions - nsources_by_level = counter.count_nsources_by_level() - level_nterms = wrangler.level_nterms - if self.uses_pde_expansions: - ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) - else: - ncoeffs_fmm_by_level = level_nterms ** dimensions + ncoeffs_fmm_by_level = parameters.ncoeffs_fmm_by_level nterms_fmm_total = np.sum(nsources_by_level * ncoeffs_fmm_by_level) -- GitLab From 81a3bb10bf83acc626d6978820415ab6039653e6 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 20 Jul 2018 17:25:49 -0500 Subject: [PATCH 128/260] Count m2l operations --- boxtree/fmm.py | 188 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 123 insertions(+), 65 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index ed24c02..ef6c82e 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -433,10 +433,71 @@ class TimingRecorder(object): # }}} +FMMParameters = namedtuple( + "FMMParameters", + ['ncoeffs_fmm_by_level', + 'translation_source_power', + 'translation_target_power', + 'translation_max_power'] +) + + class PerformanceCounter: - def __init__(self, traversal): + def __init__(self, traversal, wrangler, uses_pde_expansions): self.traversal = traversal + self.wrangler = wrangler + self.uses_pde_expansions = uses_pde_expansions + + self.parameters = self.get_fmm_parameters( + traversal.tree.dimensions, + uses_pde_expansions, + wrangler.level_nterms + ) + + @staticmethod + def xlat_cost(p_source, p_target, parameters): + """ + :param p_source: A numpy array of numbers of source terms + :return: The same shape as *p_source* + """ + return ( + p_source ** parameters.translation_source_power + * p_target ** parameters.translation_target_power + * np.maximum(p_source, p_target) ** parameters.translation_max_power + ) + + @staticmethod + def get_fmm_parameters(dimensions, use_pde_expansions, level_nterms): + if use_pde_expansions: + ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) + + if dimensions == 2: + translation_source_power = 1 + translation_target_power = 1 + translation_max_power = 0 + elif dimensions == 3: + # Based on a reading of FMMlib, i.e. a point-and-shoot FMM. + translation_source_power = 0 + translation_target_power = 0 + translation_max_power = 3 + else: + raise ValueError("Don't know how to estimate expansion complexities " + "for dimension %d" % dimensions) + + else: + ncoeffs_fmm_by_level = level_nterms ** dimensions + + translation_source_power = dimensions + translation_target_power = dimensions + translation_max_power = 0 + + return FMMParameters( + ncoeffs_fmm_by_level=ncoeffs_fmm_by_level, + translation_source_power=translation_source_power, + translation_target_power=translation_target_power, + translation_max_power=translation_max_power + ) def count_nsources_by_level(self): """ @@ -459,6 +520,18 @@ class PerformanceCounter: return nsources_by_level + def count_nters_fmm_total(self): + """ + :return: total number of terms formed across all levels during form_multipole + """ + nsources_by_level = self.count_nsources_by_level() + + ncoeffs_fmm_by_level = self.parameters.ncoeffs_fmm_by_level + + nterms_fmm_total = np.sum(nsources_by_level * ncoeffs_fmm_by_level) + + return nterms_fmm_total + def count_direct(self, use_global_idx=False): """ :return: If *use_global_idx* is True, return a numpy array of shape @@ -508,14 +581,49 @@ class PerformanceCounter: return direct_workload + def count_m2l(self, use_global_idx=False): + """ + :return: If *use_global_idx* is True, return a numpy array of shape + (tree.nboxes,) such that the ith entry represents the workload from + multipole to local expansion on box i. If *use_global_idx* is False, + return a numpy array of shape (ntarget_or_target_parent_boxes,) such that + the ith entry represents the workload on *target_or_target_parent_boxes* + i. + """ + trav = self.traversal + wrangler = self.wrangler + parameters = self.parameters -FMMParameters = namedtuple( - "FMMParameters", - ['ncoeffs_fmm_by_level', - 'translation_source_power', - 'translation_target_power', - 'translation_max_power'] -) + ntarget_or_target_parent_boxes = len(trav.target_or_target_parent_boxes) + + if use_global_idx: + nm2l = np.zeros((trav.tree.nboxes,), dtype=np.intp) + else: + nm2l = np.zeros((ntarget_or_target_parent_boxes,), dtype=np.intp) + + for itgt_box, tgt_ibox in enumerate(trav.target_or_target_parent_boxes): + start, end = trav.from_sep_siblings_starts[itgt_box:itgt_box+2] + from_sep_siblings_level = trav.tree.box_levels[ + trav.from_sep_siblings_lists[start:end] + ] + + if start == end: + continue + + tgt_box_level = trav.tree.box_levels[tgt_ibox] + + from_sep_siblings_nterms = wrangler.level_nterms[from_sep_siblings_level] + tgt_box_nterms = wrangler.level_nterms[tgt_box_level] + + from_sep_siblings_costs = self.xlat_cost( + from_sep_siblings_nterms, tgt_box_nterms, parameters) + + if use_global_idx: + nm2l[tgt_ibox] += np.sum(from_sep_siblings_costs) + else: + nm2l[itgt_box] += np.sum(from_sep_siblings_costs) + + return nm2l class PerformanceModel: @@ -533,19 +641,14 @@ class PerformanceModel: def time_performance(self, traversal): wrangler = self.wrangler_factory(traversal.tree) - counter = PerformanceCounter(traversal) - - parameters = self.get_fmm_parameters( - traversal.tree.dimensions, - self.uses_pde_expansions, - wrangler.level_nterms - ) + counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) # Record useful metadata for assembling performance data timing_data = { - "nterms_fmm_total": self.calculate_nters_fmm_total(counter, parameters), + "nterms_fmm_total": counter.count_nters_fmm_total(), "direct_workload": np.sum(counter.count_direct()), - "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1] + "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1], + "m2l_workload": np.sum(counter.count_m2l()) } # Generate random source weights @@ -562,61 +665,16 @@ class PerformanceModel: self.time_result.append(timing_data) def form_multipoles_model(self, wall_time=True): - return self._linear_regression("form_multipoles", ["nterms_fmm_total"], + return self.linear_regression("form_multipoles", ["nterms_fmm_total"], wall_time=wall_time) def eval_direct_model(self, wall_time=True): - return self._linear_regression( + return self.linear_regression( "eval_direct", ["direct_workload", "direct_nsource_boxes"], wall_time=wall_time) - @staticmethod - def get_fmm_parameters(dimensions, use_pde_expansions, level_nterms): - if use_pde_expansions: - ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) - - if dimensions == 2: - translation_source_power = 1 - translation_target_power = 1 - translation_max_power = 0 - elif dimensions == 3: - # Based on a reading of FMMlib, i.e. a point-and-shoot FMM. - translation_source_power = 0 - translation_target_power = 0 - translation_max_power = 3 - else: - raise ValueError("Don't know how to estimate expansion complexities " - "for dimension %d" % dimensions) - - else: - ncoeffs_fmm_by_level = level_nterms ** dimensions - - translation_source_power = dimensions - translation_target_power = dimensions - translation_max_power = 0 - - return FMMParameters( - ncoeffs_fmm_by_level=ncoeffs_fmm_by_level, - translation_source_power=translation_source_power, - translation_target_power=translation_target_power, - translation_max_power=translation_max_power - ) - - @staticmethod - def calculate_nters_fmm_total(counter, parameters): - """ - :return: total number of terms formed across all levels during form_multipole - """ - nsources_by_level = counter.count_nsources_by_level() - - ncoeffs_fmm_by_level = parameters.ncoeffs_fmm_by_level - - nterms_fmm_total = np.sum(nsources_by_level * ncoeffs_fmm_by_level) - - return nterms_fmm_total - - def _linear_regression(self, y_name, x_name, wall_time=True): + def linear_regression(self, y_name, x_name, wall_time=True): """ :arg y_name: Name of the depedent variable :arg x_name: A list of names of independent variables -- GitLab From b855e88810ef64678efe9d703c6b9e26bf089c27 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 22 Jul 2018 21:54:31 -0500 Subject: [PATCH 129/260] Add script for testing performance model --- boxtree/fmm.py | 3 - examples/performance_model.py | 103 ++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 3 deletions(-) create mode 100644 examples/performance_model.py diff --git a/boxtree/fmm.py b/boxtree/fmm.py index ef6c82e..bae2346 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -715,9 +715,6 @@ class PerformanceModel: from numpy.linalg import lstsq coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] - print(coeff_matrix) - print(dependent_value) - return coeff diff --git a/examples/performance_model.py b/examples/performance_model.py new file mode 100644 index 0000000..3ea75b9 --- /dev/null +++ b/examples/performance_model.py @@ -0,0 +1,103 @@ +from __future__ import division +import pyopencl as cl +import numpy as np +from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +import functools +from boxtree.fmm import PerformanceModel, PerformanceCounter +from boxtree.fmm import drive_fmm +from pyopencl.clrandom import PhiloxGenerator + +context = cl.create_some_context() +queue = cl.CommandQueue(context) +dtype = np.float64 +helmholtz_k = 0 + + +def fmm_level_to_nterms(tree, level): + return max(level, 3) + + +# {{{ Generate traversal objects for forming models and verification + +traversals = [] + +for nsources, ntargets, dims in [(6000, 6000, 3), + (9000, 9000, 3), + (12000, 12000, 3), + (15000, 15000, 3), + (20000, 20000, 3)]: + + from boxtree.tools import make_normal_particle_array as p_normal + sources = p_normal(queue, nsources, dims, dtype, seed=15) + targets = p_normal(queue, ntargets, dims, dtype, seed=18) + + rng = PhiloxGenerator(context, seed=22) + target_radii = rng.uniform( + queue, ntargets, a=0, b=0.05, dtype=np.float64).get() + + from boxtree import TreeBuilder + tb = TreeBuilder(context) + tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, + stick_out_factor=0.25, max_particles_in_box=30, debug=True) + + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(context, well_sep_is_n_away=2) + d_trav, _ = tg(queue, tree, debug=True) + trav = d_trav.get(queue=queue) + + traversals.append(trav) + +# }}} + +wrangler_factory = functools.partial( + FMMLibExpansionWrangler, helmholtz_k=0, fmm_level_to_nterms=fmm_level_to_nterms) + +ntraversals = len(traversals) +model = PerformanceModel(context, wrangler_factory, True) +for i in range(ntraversals - 1): + model.time_performance(traversals[i]) + +eval_traversal = traversals[-1] +eval_wrangler = wrangler_factory(eval_traversal.tree) +dimensions = eval_traversal.tree.dimensions +eval_counter = PerformanceCounter(eval_traversal, eval_wrangler, True) + +predict_timing = {} +wall_time = True + +# {{{ Predict eval_direct + +param = model.eval_direct_model(wall_time=wall_time) + +direct_workload = np.sum(eval_counter.count_direct()) +direct_nsource_boxes = eval_traversal.neighbor_source_boxes_starts[-1] + +predict_timing["eval_direct"] = ( + direct_workload * param[0] + direct_nsource_boxes * param[1] + param[2]) + +# }}} + +# {{{ Actual timing + +true_timing = {} + +rng = PhiloxGenerator(context) +source_weights = rng.uniform( + queue, eval_traversal.tree.nsources, eval_traversal.tree.coord_dtype).get() + +_ = drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_timing) + +# }}} + + +for field in ["eval_direct"]: + wall_time_field = predict_timing[field] + + if wall_time: + true_time_field = true_timing[field].wall_elapsed + else: + true_time_field = true_timing[field].process_elapsed + + diff = abs(wall_time_field - true_time_field) + + print(field + " error: " + str(diff / true_time_field)) -- GitLab From 535032d04f0a04072fa39b5659d688ca10358402 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 22 Jul 2018 22:12:02 -0500 Subject: [PATCH 130/260] Add m2l model --- boxtree/fmm.py | 6 ++++++ examples/performance_model.py | 12 +++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index bae2346..d4147f9 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -674,6 +674,12 @@ class PerformanceModel: ["direct_workload", "direct_nsource_boxes"], wall_time=wall_time) + def multipole_to_local_model(self, wall_time=True): + return self.linear_regression( + "multipole_to_local", ["m2l_workload"], + wall_time=wall_time + ) + def linear_regression(self, y_name, x_name, wall_time=True): """ :arg y_name: Name of the depedent variable diff --git a/examples/performance_model.py b/examples/performance_model.py index 3ea75b9..075b250 100644 --- a/examples/performance_model.py +++ b/examples/performance_model.py @@ -77,6 +77,16 @@ predict_timing["eval_direct"] = ( # }}} +# {{{ Predict multipole_to_local + +param = model.multipole_to_local_model(wall_time=wall_time) + +m2l_workload = np.sum(eval_counter.count_m2l()) + +predict_timing["multipole_to_local"] = m2l_workload * param[0] + param[1] + +# }}} + # {{{ Actual timing true_timing = {} @@ -90,7 +100,7 @@ _ = drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_ti # }}} -for field in ["eval_direct"]: +for field in ["eval_direct", "multipole_to_local"]: wall_time_field = predict_timing[field] if wall_time: -- GitLab From 00f47b3e02c7fc7a2904d74d49266a30a4af059a Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 23 Jul 2018 13:58:28 -0500 Subject: [PATCH 131/260] Add eval_multipoles model --- boxtree/fmm.py | 41 ++++++++++++++++++++++++++++++++--- examples/performance_model.py | 12 +++++++++- 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index d4147f9..0b65688 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -625,6 +625,33 @@ class PerformanceCounter: return nm2l + def count_m2p(self, use_global_idx=False): + trav = self.traversal + tree = trav.tree + + if use_global_idx: + nm2p = np.zeros((tree.nboxes,), dtype=np.intp) + else: + nm2p = np.zeros((len(trav.target_boxes),), dtype=np.intp) + + for ilevel, sep_smaller_list in enumerate(trav.from_sep_smaller_by_level): + ncoeffs_fmm_cur_level = self.parameters.ncoeffs_fmm_by_level[ilevel] + tgt_box_list = trav.target_boxes_sep_smaller_by_source_level[ilevel] + + for itgt_box, tgt_ibox in enumerate(tgt_box_list): + ntargets = tree.box_target_counts_nonchild[tgt_ibox] + + start, end = sep_smaller_list.starts[itgt_box:itgt_box + 2] + + workload = (end - start) * ntargets * ncoeffs_fmm_cur_level + + if use_global_idx: + nm2p[tgt_ibox] += workload + else: + nm2p[sep_smaller_list.nonempty_indices[itgt_box]] += workload + + return nm2p + class PerformanceModel: @@ -648,7 +675,8 @@ class PerformanceModel: "nterms_fmm_total": counter.count_nters_fmm_total(), "direct_workload": np.sum(counter.count_direct()), "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1], - "m2l_workload": np.sum(counter.count_m2l()) + "m2l_workload": np.sum(counter.count_m2l()), + "m2p_workload": np.sum(counter.count_m2p()) } # Generate random source weights @@ -665,8 +693,9 @@ class PerformanceModel: self.time_result.append(timing_data) def form_multipoles_model(self, wall_time=True): - return self.linear_regression("form_multipoles", ["nterms_fmm_total"], - wall_time=wall_time) + return self.linear_regression( + "form_multipoles", ["nterms_fmm_total"], + wall_time=wall_time) def eval_direct_model(self, wall_time=True): return self.linear_regression( @@ -680,6 +709,12 @@ class PerformanceModel: wall_time=wall_time ) + def eval_multipoles_model(self, wall_time=True): + return self.linear_regression( + "eval_multipoles", ["m2p_workload"], + wall_time=wall_time + ) + def linear_regression(self, y_name, x_name, wall_time=True): """ :arg y_name: Name of the depedent variable diff --git a/examples/performance_model.py b/examples/performance_model.py index 075b250..4ca8872 100644 --- a/examples/performance_model.py +++ b/examples/performance_model.py @@ -87,6 +87,16 @@ predict_timing["multipole_to_local"] = m2l_workload * param[0] + param[1] # }}} +# {{{ Predict eval_multipoles + +param = model.eval_multipoles_model(wall_time=wall_time) + +m2p_workload = np.sum(eval_counter.count_m2p()) + +predict_timing["eval_multipoles"] = m2p_workload * param[0] + param[1] + +# }}} + # {{{ Actual timing true_timing = {} @@ -100,7 +110,7 @@ _ = drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_ti # }}} -for field in ["eval_direct", "multipole_to_local"]: +for field in ["eval_direct", "multipole_to_local", "eval_multipoles"]: wall_time_field = predict_timing[field] if wall_time: -- GitLab From 311f8fa9f5e44342bd0fdfcd7294b433667204f2 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 23 Jul 2018 17:57:07 -0500 Subject: [PATCH 132/260] Add form_locals model --- boxtree/fmm.py | 36 ++++++++++++++++++++++++++++++++++- examples/performance_model.py | 19 ++++++++++++++---- 2 files changed, 50 insertions(+), 5 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 0b65688..2000225 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -652,6 +652,33 @@ class PerformanceCounter: return nm2p + def count_p2l(self, use_global_idx=False): + trav = self.traversal + tree = trav.tree + parameters = self.parameters + + if use_global_idx: + np2l = np.zeros((tree.nboxes,), dtype=np.intp) + else: + np2l = np.zeros(len(trav.target_or_target_parent_boxes), dtype=np.intp) + + for itgt_box, tgt_ibox in enumerate(trav.target_or_target_parent_boxes): + tgt_box_level = trav.tree.box_levels[tgt_ibox] + ncoeffs = parameters.ncoeffs_fmm_by_level[tgt_box_level] + + start, end = trav.from_sep_bigger_starts[itgt_box:itgt_box + 2] + + np2l_sources = 0 + for src_ibox in trav.from_sep_bigger_lists[start:end]: + np2l_sources += tree.box_source_counts_nonchild[src_ibox] + + if use_global_idx: + np2l[tgt_ibox] = np2l_sources * ncoeffs + else: + np2l[itgt_box] = np2l_sources * ncoeffs + + return np2l + class PerformanceModel: @@ -676,7 +703,8 @@ class PerformanceModel: "direct_workload": np.sum(counter.count_direct()), "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1], "m2l_workload": np.sum(counter.count_m2l()), - "m2p_workload": np.sum(counter.count_m2p()) + "m2p_workload": np.sum(counter.count_m2p()), + "p2l_workload": np.sum(counter.count_p2l()) } # Generate random source weights @@ -715,6 +743,12 @@ class PerformanceModel: wall_time=wall_time ) + def form_locals_model(self, wall_time=True): + return self.linear_regression( + "form_locals", ["p2l_workload"], + wall_time=wall_time + ) + def linear_regression(self, y_name, x_name, wall_time=True): """ :arg y_name: Name of the depedent variable diff --git a/examples/performance_model.py b/examples/performance_model.py index 4ca8872..a139f99 100644 --- a/examples/performance_model.py +++ b/examples/performance_model.py @@ -97,6 +97,16 @@ predict_timing["eval_multipoles"] = m2p_workload * param[0] + param[1] # }}} +# {{{ Predict form_locals + +param = model.form_locals_model(wall_time=wall_time) + +p2l_workload = np.sum(eval_counter.count_p2l()) + +predict_timing["form_locals"] = p2l_workload * param[0] + param[1] + +# }}} + # {{{ Actual timing true_timing = {} @@ -110,14 +120,15 @@ _ = drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_ti # }}} -for field in ["eval_direct", "multipole_to_local", "eval_multipoles"]: - wall_time_field = predict_timing[field] +for field in ["eval_direct", "multipole_to_local", "eval_multipoles", "form_locals"]: + predict_time_field = predict_timing[field] if wall_time: true_time_field = true_timing[field].wall_elapsed else: true_time_field = true_timing[field].process_elapsed - diff = abs(wall_time_field - true_time_field) + diff = abs(predict_time_field - true_time_field) - print(field + " error: " + str(diff / true_time_field)) + print(field + ": predict " + str(predict_time_field) + " actual " + + str(true_time_field) + " error " + str(diff / true_time_field)) -- GitLab From 27f73be20c6b07836cfeff90237406f4de439681 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 24 Jul 2018 11:34:53 -0500 Subject: [PATCH 133/260] Add eval_locals model --- boxtree/fmm.py | 33 +++++++++++++++++++++++++++++++-- examples/performance_model.py | 15 +++++++++++++-- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 2000225..b79e849 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -679,6 +679,28 @@ class PerformanceCounter: return np2l + def count_eval_part(self, use_global_idx=False): + trav = self.traversal + tree = trav.tree + parameters = self.parameters + + if use_global_idx: + neval_part = np.zeros(tree.nboxes, dtype=np.intp) + else: + neval_part = np.zeros(len(trav.target_boxes), dtype=np.intp) + + for itgt_box, tgt_ibox in enumerate(trav.target_boxes): + ntargets = tree.box_target_counts_nonchild[tgt_ibox] + tgt_box_level = trav.tree.box_levels[tgt_ibox] + ncoeffs_fmm = parameters.ncoeffs_fmm_by_level[tgt_box_level] + + if use_global_idx: + neval_part[tgt_ibox] = ntargets * ncoeffs_fmm + else: + neval_part[itgt_box] = ntargets * ncoeffs_fmm + + return neval_part + class PerformanceModel: @@ -692,7 +714,7 @@ class PerformanceModel: from pyopencl.clrandom import PhiloxGenerator self.rng = PhiloxGenerator(cl_context) - def time_performance(self, traversal): + def time_performance(self, traversal, drive_fmm): wrangler = self.wrangler_factory(traversal.tree) counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) @@ -704,7 +726,8 @@ class PerformanceModel: "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1], "m2l_workload": np.sum(counter.count_m2l()), "m2p_workload": np.sum(counter.count_m2p()), - "p2l_workload": np.sum(counter.count_p2l()) + "p2l_workload": np.sum(counter.count_p2l()), + "eval_part_workload": np.sum(counter.count_eval_part()) } # Generate random source weights @@ -749,6 +772,12 @@ class PerformanceModel: wall_time=wall_time ) + def eval_locals_model(self, wall_time=True): + return self.linear_regression( + "eval_locals", ["eval_part_workload"], + wall_time=wall_time + ) + def linear_regression(self, y_name, x_name, wall_time=True): """ :arg y_name: Name of the depedent variable diff --git a/examples/performance_model.py b/examples/performance_model.py index a139f99..24fc6db 100644 --- a/examples/performance_model.py +++ b/examples/performance_model.py @@ -55,7 +55,7 @@ wrangler_factory = functools.partial( ntraversals = len(traversals) model = PerformanceModel(context, wrangler_factory, True) for i in range(ntraversals - 1): - model.time_performance(traversals[i]) + model.time_performance(traversals[i], drive_fmm) eval_traversal = traversals[-1] eval_wrangler = wrangler_factory(eval_traversal.tree) @@ -107,6 +107,16 @@ predict_timing["form_locals"] = p2l_workload * param[0] + param[1] # }}} +# {{{ + +param = model.eval_locals_model(wall_time=wall_time) + +eval_part_workload = np.sum(eval_counter.count_eval_part()) + +predict_timing["eval_locals"] = eval_part_workload * param[0] + param[1] + +# }}} + # {{{ Actual timing true_timing = {} @@ -120,7 +130,8 @@ _ = drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_ti # }}} -for field in ["eval_direct", "multipole_to_local", "eval_multipoles", "form_locals"]: +for field in ["eval_direct", "multipole_to_local", "eval_multipoles", "form_locals", + "eval_locals"]: predict_time_field = predict_timing[field] if wall_time: -- GitLab From d7f90bda7870e8f143a6011167e05d506012d0f0 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 24 Jul 2018 17:24:27 -0500 Subject: [PATCH 134/260] Move code around and refactoring --- boxtree/distributed/perf_model.py | 519 ++++++++++++++++++++++++++++++ boxtree/fmm.py | 393 ---------------------- examples/demo_perf_model.py | 80 +++++ examples/performance_model.py | 145 --------- 4 files changed, 599 insertions(+), 538 deletions(-) create mode 100644 boxtree/distributed/perf_model.py create mode 100644 examples/demo_perf_model.py delete mode 100644 examples/performance_model.py diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py new file mode 100644 index 0000000..dc344bb --- /dev/null +++ b/boxtree/distributed/perf_model.py @@ -0,0 +1,519 @@ +from __future__ import division + +__copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ + Copyright (C) 2018 Hao Gao" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import pyopencl as cl +import numpy as np +from collections import namedtuple +from pyopencl.clrandom import PhiloxGenerator + + +def generate_random_traversal(context, nsources, ntargets, dims, dtype): + with cl.CommandQueue(context) as queue: + from boxtree.tools import make_normal_particle_array as p_normal + sources = p_normal(queue, nsources, dims, dtype, seed=15) + targets = p_normal(queue, ntargets, dims, dtype, seed=18) + + rng = PhiloxGenerator(context, seed=22) + target_radii = rng.uniform( + queue, ntargets, a=0, b=0.05, dtype=np.float64).get() + + from boxtree import TreeBuilder + tb = TreeBuilder(context) + tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, + stick_out_factor=0.25, max_particles_in_box=30, debug=True) + + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(context, well_sep_is_n_away=2) + d_trav, _ = tg(queue, tree, debug=True) + trav = d_trav.get(queue=queue) + + return trav + + +FMMParameters = namedtuple( + "FMMParameters", + ['ncoeffs_fmm_by_level', + 'translation_source_power', + 'translation_target_power', + 'translation_max_power'] +) + + +class PerformanceCounter: + + def __init__(self, traversal, wrangler, uses_pde_expansions): + self.traversal = traversal + self.wrangler = wrangler + self.uses_pde_expansions = uses_pde_expansions + + self.parameters = self.get_fmm_parameters( + traversal.tree.dimensions, + uses_pde_expansions, + wrangler.level_nterms + ) + + @staticmethod + def xlat_cost(p_source, p_target, parameters): + """ + :param p_source: A numpy array of numbers of source terms + :return: The same shape as *p_source* + """ + return ( + p_source ** parameters.translation_source_power + * p_target ** parameters.translation_target_power + * np.maximum(p_source, p_target) ** parameters.translation_max_power + ) + + @staticmethod + def get_fmm_parameters(dimensions, use_pde_expansions, level_nterms): + if use_pde_expansions: + ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) + + if dimensions == 2: + translation_source_power = 1 + translation_target_power = 1 + translation_max_power = 0 + elif dimensions == 3: + # Based on a reading of FMMlib, i.e. a point-and-shoot FMM. + translation_source_power = 0 + translation_target_power = 0 + translation_max_power = 3 + else: + raise ValueError("Don't know how to estimate expansion complexities " + "for dimension %d" % dimensions) + + else: + ncoeffs_fmm_by_level = level_nterms ** dimensions + + translation_source_power = dimensions + translation_target_power = dimensions + translation_max_power = 0 + + return FMMParameters( + ncoeffs_fmm_by_level=ncoeffs_fmm_by_level, + translation_source_power=translation_source_power, + translation_target_power=translation_target_power, + translation_max_power=translation_max_power + ) + + def count_nsources_by_level(self): + """ + :return: A numpy array of share (tree.nlevels,) such that the ith index + documents the number of sources on level i. + """ + tree = self.traversal.tree + + nsources_by_level = np.empty((tree.nlevels,), dtype=np.int32) + + for ilevel in range(tree.nlevels): + start_ibox = tree.level_start_box_nrs[ilevel] + end_ibox = tree.level_start_box_nrs[ilevel + 1] + count = 0 + + for ibox in range(start_ibox, end_ibox): + count += tree.box_source_counts_nonchild[ibox] + + nsources_by_level[ilevel] = count + + return nsources_by_level + + def count_nters_fmm_total(self): + """ + :return: total number of terms formed across all levels during form_multipole + """ + nsources_by_level = self.count_nsources_by_level() + + ncoeffs_fmm_by_level = self.parameters.ncoeffs_fmm_by_level + + nterms_fmm_total = np.sum(nsources_by_level * ncoeffs_fmm_by_level) + + return nterms_fmm_total + + def count_direct(self, use_global_idx=False): + """ + :return: If *use_global_idx* is True, return a numpy array of shape + (tree.nboxes,) such that the ith entry represents the workload from + direct evaluation on box i. If *use_global_idx* is False, return a numpy + array of shape (ntarget_boxes,) such that the ith entry represents the + workload on *target_boxes* i. + """ + traversal = self.traversal + tree = traversal.tree + + if use_global_idx: + direct_workload = np.zeros((tree.nboxes,), dtype=np.int64) + else: + ntarget_boxes = len(traversal.target_boxes) + direct_workload = np.zeros((ntarget_boxes,), dtype=np.int64) + + for itgt_box, tgt_ibox in enumerate(traversal.target_boxes): + ntargets = tree.box_target_counts_nonchild[tgt_ibox] + nsources = 0 + + start, end = traversal.neighbor_source_boxes_starts[itgt_box:itgt_box+2] + + for src_ibox in traversal.neighbor_source_boxes_lists[start:end]: + nsources += tree.box_source_counts_nonchild[src_ibox] + + if traversal.from_sep_close_smaller_starts is not None: + start, end = ( + traversal.from_sep_close_smaller_starts[itgt_box:itgt_box+2]) + + for src_ibox in traversal.from_sep_close_smaller_lists[start:end]: + nsources += tree.box_source_counts_nonchild[src_ibox] + + if traversal.from_sep_close_bigger_starts is not None: + start, end = ( + traversal.from_sep_close_bigger_starts[itgt_box:itgt_box+2]) + + for src_ibox in traversal.from_sep_close_bigger_lists[start:end]: + nsources += tree.box_source_counts_nonchild[src_ibox] + + count = nsources * ntargets + + if use_global_idx: + direct_workload[tgt_ibox] = count + else: + direct_workload[itgt_box] = count + + return direct_workload + + def count_m2l(self, use_global_idx=False): + """ + :return: If *use_global_idx* is True, return a numpy array of shape + (tree.nboxes,) such that the ith entry represents the workload from + multipole to local expansion on box i. If *use_global_idx* is False, + return a numpy array of shape (ntarget_or_target_parent_boxes,) such that + the ith entry represents the workload on *target_or_target_parent_boxes* + i. + """ + trav = self.traversal + wrangler = self.wrangler + parameters = self.parameters + + ntarget_or_target_parent_boxes = len(trav.target_or_target_parent_boxes) + + if use_global_idx: + nm2l = np.zeros((trav.tree.nboxes,), dtype=np.intp) + else: + nm2l = np.zeros((ntarget_or_target_parent_boxes,), dtype=np.intp) + + for itgt_box, tgt_ibox in enumerate(trav.target_or_target_parent_boxes): + start, end = trav.from_sep_siblings_starts[itgt_box:itgt_box+2] + from_sep_siblings_level = trav.tree.box_levels[ + trav.from_sep_siblings_lists[start:end] + ] + + if start == end: + continue + + tgt_box_level = trav.tree.box_levels[tgt_ibox] + + from_sep_siblings_nterms = wrangler.level_nterms[from_sep_siblings_level] + tgt_box_nterms = wrangler.level_nterms[tgt_box_level] + + from_sep_siblings_costs = self.xlat_cost( + from_sep_siblings_nterms, tgt_box_nterms, parameters) + + if use_global_idx: + nm2l[tgt_ibox] += np.sum(from_sep_siblings_costs) + else: + nm2l[itgt_box] += np.sum(from_sep_siblings_costs) + + return nm2l + + def count_m2p(self, use_global_idx=False): + trav = self.traversal + tree = trav.tree + + if use_global_idx: + nm2p = np.zeros((tree.nboxes,), dtype=np.intp) + else: + nm2p = np.zeros((len(trav.target_boxes),), dtype=np.intp) + + for ilevel, sep_smaller_list in enumerate(trav.from_sep_smaller_by_level): + ncoeffs_fmm_cur_level = self.parameters.ncoeffs_fmm_by_level[ilevel] + tgt_box_list = trav.target_boxes_sep_smaller_by_source_level[ilevel] + + for itgt_box, tgt_ibox in enumerate(tgt_box_list): + ntargets = tree.box_target_counts_nonchild[tgt_ibox] + + start, end = sep_smaller_list.starts[itgt_box:itgt_box + 2] + + workload = (end - start) * ntargets * ncoeffs_fmm_cur_level + + if use_global_idx: + nm2p[tgt_ibox] += workload + else: + nm2p[sep_smaller_list.nonempty_indices[itgt_box]] += workload + + return nm2p + + def count_p2l(self, use_global_idx=False): + trav = self.traversal + tree = trav.tree + parameters = self.parameters + + if use_global_idx: + np2l = np.zeros((tree.nboxes,), dtype=np.intp) + else: + np2l = np.zeros(len(trav.target_or_target_parent_boxes), dtype=np.intp) + + for itgt_box, tgt_ibox in enumerate(trav.target_or_target_parent_boxes): + tgt_box_level = trav.tree.box_levels[tgt_ibox] + ncoeffs = parameters.ncoeffs_fmm_by_level[tgt_box_level] + + start, end = trav.from_sep_bigger_starts[itgt_box:itgt_box + 2] + + np2l_sources = 0 + for src_ibox in trav.from_sep_bigger_lists[start:end]: + np2l_sources += tree.box_source_counts_nonchild[src_ibox] + + if use_global_idx: + np2l[tgt_ibox] = np2l_sources * ncoeffs + else: + np2l[itgt_box] = np2l_sources * ncoeffs + + return np2l + + def count_eval_part(self, use_global_idx=False): + trav = self.traversal + tree = trav.tree + parameters = self.parameters + + if use_global_idx: + neval_part = np.zeros(tree.nboxes, dtype=np.intp) + else: + neval_part = np.zeros(len(trav.target_boxes), dtype=np.intp) + + for itgt_box, tgt_ibox in enumerate(trav.target_boxes): + ntargets = tree.box_target_counts_nonchild[tgt_ibox] + tgt_box_level = trav.tree.box_levels[tgt_ibox] + ncoeffs_fmm = parameters.ncoeffs_fmm_by_level[tgt_box_level] + + if use_global_idx: + neval_part[tgt_ibox] = ntargets * ncoeffs_fmm + else: + neval_part[itgt_box] = ntargets * ncoeffs_fmm + + return neval_part + + +class PerformanceModel: + + def __init__(self, cl_context, wrangler_factory, uses_pde_expansions, drive_fmm): + self.cl_context = cl_context + self.wrangler_factory = wrangler_factory + self.uses_pde_expansions = uses_pde_expansions + self.drive_fmm = drive_fmm + + self.time_result = [] + + from pyopencl.clrandom import PhiloxGenerator + self.rng = PhiloxGenerator(cl_context) + + def time_performance(self, traversal): + wrangler = self.wrangler_factory(traversal.tree) + + counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) + + # Record useful metadata for assembling performance data + timing_data = { + "nterms_fmm_total": counter.count_nters_fmm_total(), + "direct_workload": np.sum(counter.count_direct()), + "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1], + "m2l_workload": np.sum(counter.count_m2l()), + "m2p_workload": np.sum(counter.count_m2p()), + "p2l_workload": np.sum(counter.count_p2l()), + "eval_part_workload": np.sum(counter.count_eval_part()) + } + + # Generate random source weights + with cl.CommandQueue(self.cl_context) as queue: + source_weights = self.rng.uniform( + queue, + traversal.tree.nsources, + traversal.tree.coord_dtype + ).get() + + # Time a FMM run + self.drive_fmm(traversal, wrangler, source_weights, timing_data=timing_data) + + self.time_result.append(timing_data) + + def form_multipoles_model(self, wall_time=True): + return self.linear_regression( + "form_multipoles", ["nterms_fmm_total"], + wall_time=wall_time) + + def eval_direct_model(self, wall_time=True): + return self.linear_regression( + "eval_direct", + ["direct_workload", "direct_nsource_boxes"], + wall_time=wall_time) + + def multipole_to_local_model(self, wall_time=True): + return self.linear_regression( + "multipole_to_local", ["m2l_workload"], + wall_time=wall_time + ) + + def eval_multipoles_model(self, wall_time=True): + return self.linear_regression( + "eval_multipoles", ["m2p_workload"], + wall_time=wall_time + ) + + def form_locals_model(self, wall_time=True): + return self.linear_regression( + "form_locals", ["p2l_workload"], + wall_time=wall_time + ) + + def eval_locals_model(self, wall_time=True): + return self.linear_regression( + "eval_locals", ["eval_part_workload"], + wall_time=wall_time + ) + + def linear_regression(self, y_name, x_name, wall_time=True): + """ + :arg y_name: Name of the depedent variable + :arg x_name: A list of names of independent variables + """ + nresult = len(self.time_result) + nvariables = len(x_name) + + if nresult < 1: + raise RuntimeError("Please run FMM at lease once using time_performance" + "before forming models.") + elif nresult == 1: + result = self.time_result[0] + + if wall_time: + dependent_value = result[y_name].wall_elapsed + else: + dependent_value = result[y_name].process_elapsed + + independent_value = result[x_name[0]] + coeff = dependent_value / independent_value + + return (coeff,) + tuple(0.0 for _ in range(nvariables - 1)) + else: + dependent_value = np.empty((nresult,), dtype=float) + coeff_matrix = np.empty((nresult, nvariables + 1), dtype=float) + + for iresult, result in enumerate(self.time_result): + if wall_time: + dependent_value[iresult] = result[y_name].wall_elapsed + else: + dependent_value[iresult] = result[y_name].process_elapsed + + for icol, variable_name in enumerate(x_name): + coeff_matrix[iresult, icol] = result[variable_name] + + coeff_matrix[:, -1] = 1 + + from numpy.linalg import lstsq + coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] + + return coeff + + def time_random_traversals(self): + context = self.cl_context + dtype = np.float64 + + traversals = [] + + for nsources, ntargets, dims in [(9000, 9000, 3), + (12000, 12000, 3), + (15000, 15000, 3), + (18000, 18000, 3), + (21000, 21000, 3)]: + generated_traversal = generate_random_traversal( + context, nsources, ntargets, dims, dtype + ) + + traversals.append(generated_traversal) + + for trav in traversals: + self.time_performance(trav) + + def predict_time(self, eval_traversal, eval_counter, wall_time=True): + predict_timing = {} + + # {{{ Predict eval_direct + + param = self.eval_direct_model(wall_time=wall_time) + + direct_workload = np.sum(eval_counter.count_direct()) + direct_nsource_boxes = eval_traversal.neighbor_source_boxes_starts[-1] + + predict_timing["eval_direct"] = ( + direct_workload * param[0] + direct_nsource_boxes * param[1] + param[2]) + + # }}} + + # {{{ Predict multipole_to_local + + param = self.multipole_to_local_model(wall_time=wall_time) + + m2l_workload = np.sum(eval_counter.count_m2l()) + + predict_timing["multipole_to_local"] = m2l_workload * param[0] + param[1] + + # }}} + + # {{{ Predict eval_multipoles + + param = self.eval_multipoles_model(wall_time=wall_time) + + m2p_workload = np.sum(eval_counter.count_m2p()) + + predict_timing["eval_multipoles"] = m2p_workload * param[0] + param[1] + + # }}} + + # {{{ Predict form_locals + + param = self.form_locals_model(wall_time=wall_time) + + p2l_workload = np.sum(eval_counter.count_p2l()) + + predict_timing["form_locals"] = p2l_workload * param[0] + param[1] + + # }}} + + # {{{ + + param = self.eval_locals_model(wall_time=wall_time) + + eval_part_workload = np.sum(eval_counter.count_eval_part()) + + predict_timing["eval_locals"] = eval_part_workload * param[0] + param[1] + + # }}} + + return predict_timing diff --git a/boxtree/fmm.py b/boxtree/fmm.py index b79e849..e4f494a 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -26,9 +26,6 @@ import logging logger = logging.getLogger(__name__) from pytools import ProcessLogger, Record -import pyopencl as cl -import numpy as np -from collections import namedtuple def drive_fmm(traversal, expansion_wrangler, src_weights, timing_data=None): @@ -432,394 +429,4 @@ class TimingRecorder(object): # }}} - -FMMParameters = namedtuple( - "FMMParameters", - ['ncoeffs_fmm_by_level', - 'translation_source_power', - 'translation_target_power', - 'translation_max_power'] -) - - -class PerformanceCounter: - - def __init__(self, traversal, wrangler, uses_pde_expansions): - self.traversal = traversal - self.wrangler = wrangler - self.uses_pde_expansions = uses_pde_expansions - - self.parameters = self.get_fmm_parameters( - traversal.tree.dimensions, - uses_pde_expansions, - wrangler.level_nterms - ) - - @staticmethod - def xlat_cost(p_source, p_target, parameters): - """ - :param p_source: A numpy array of numbers of source terms - :return: The same shape as *p_source* - """ - return ( - p_source ** parameters.translation_source_power - * p_target ** parameters.translation_target_power - * np.maximum(p_source, p_target) ** parameters.translation_max_power - ) - - @staticmethod - def get_fmm_parameters(dimensions, use_pde_expansions, level_nterms): - if use_pde_expansions: - ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) - - if dimensions == 2: - translation_source_power = 1 - translation_target_power = 1 - translation_max_power = 0 - elif dimensions == 3: - # Based on a reading of FMMlib, i.e. a point-and-shoot FMM. - translation_source_power = 0 - translation_target_power = 0 - translation_max_power = 3 - else: - raise ValueError("Don't know how to estimate expansion complexities " - "for dimension %d" % dimensions) - - else: - ncoeffs_fmm_by_level = level_nterms ** dimensions - - translation_source_power = dimensions - translation_target_power = dimensions - translation_max_power = 0 - - return FMMParameters( - ncoeffs_fmm_by_level=ncoeffs_fmm_by_level, - translation_source_power=translation_source_power, - translation_target_power=translation_target_power, - translation_max_power=translation_max_power - ) - - def count_nsources_by_level(self): - """ - :return: A numpy array of share (tree.nlevels,) such that the ith index - documents the number of sources on level i. - """ - tree = self.traversal.tree - - nsources_by_level = np.empty((tree.nlevels,), dtype=np.int32) - - for ilevel in range(tree.nlevels): - start_ibox = tree.level_start_box_nrs[ilevel] - end_ibox = tree.level_start_box_nrs[ilevel + 1] - count = 0 - - for ibox in range(start_ibox, end_ibox): - count += tree.box_source_counts_nonchild[ibox] - - nsources_by_level[ilevel] = count - - return nsources_by_level - - def count_nters_fmm_total(self): - """ - :return: total number of terms formed across all levels during form_multipole - """ - nsources_by_level = self.count_nsources_by_level() - - ncoeffs_fmm_by_level = self.parameters.ncoeffs_fmm_by_level - - nterms_fmm_total = np.sum(nsources_by_level * ncoeffs_fmm_by_level) - - return nterms_fmm_total - - def count_direct(self, use_global_idx=False): - """ - :return: If *use_global_idx* is True, return a numpy array of shape - (tree.nboxes,) such that the ith entry represents the workload from - direct evaluation on box i. If *use_global_idx* is False, return a numpy - array of shape (ntarget_boxes,) such that the ith entry represents the - workload on *target_boxes* i. - """ - traversal = self.traversal - tree = traversal.tree - - if use_global_idx: - direct_workload = np.zeros((tree.nboxes,), dtype=np.int64) - else: - ntarget_boxes = len(traversal.target_boxes) - direct_workload = np.zeros((ntarget_boxes,), dtype=np.int64) - - for itgt_box, tgt_ibox in enumerate(traversal.target_boxes): - ntargets = tree.box_target_counts_nonchild[tgt_ibox] - nsources = 0 - - start, end = traversal.neighbor_source_boxes_starts[itgt_box:itgt_box+2] - - for src_ibox in traversal.neighbor_source_boxes_lists[start:end]: - nsources += tree.box_source_counts_nonchild[src_ibox] - - if traversal.from_sep_close_smaller_starts is not None: - start, end = ( - traversal.from_sep_close_smaller_starts[itgt_box:itgt_box+2]) - - for src_ibox in traversal.from_sep_close_smaller_lists[start:end]: - nsources += tree.box_source_counts_nonchild[src_ibox] - - if traversal.from_sep_close_bigger_starts is not None: - start, end = ( - traversal.from_sep_close_bigger_starts[itgt_box:itgt_box+2]) - - for src_ibox in traversal.from_sep_close_bigger_lists[start:end]: - nsources += tree.box_source_counts_nonchild[src_ibox] - - count = nsources * ntargets - - if use_global_idx: - direct_workload[tgt_ibox] = count - else: - direct_workload[itgt_box] = count - - return direct_workload - - def count_m2l(self, use_global_idx=False): - """ - :return: If *use_global_idx* is True, return a numpy array of shape - (tree.nboxes,) such that the ith entry represents the workload from - multipole to local expansion on box i. If *use_global_idx* is False, - return a numpy array of shape (ntarget_or_target_parent_boxes,) such that - the ith entry represents the workload on *target_or_target_parent_boxes* - i. - """ - trav = self.traversal - wrangler = self.wrangler - parameters = self.parameters - - ntarget_or_target_parent_boxes = len(trav.target_or_target_parent_boxes) - - if use_global_idx: - nm2l = np.zeros((trav.tree.nboxes,), dtype=np.intp) - else: - nm2l = np.zeros((ntarget_or_target_parent_boxes,), dtype=np.intp) - - for itgt_box, tgt_ibox in enumerate(trav.target_or_target_parent_boxes): - start, end = trav.from_sep_siblings_starts[itgt_box:itgt_box+2] - from_sep_siblings_level = trav.tree.box_levels[ - trav.from_sep_siblings_lists[start:end] - ] - - if start == end: - continue - - tgt_box_level = trav.tree.box_levels[tgt_ibox] - - from_sep_siblings_nterms = wrangler.level_nterms[from_sep_siblings_level] - tgt_box_nterms = wrangler.level_nterms[tgt_box_level] - - from_sep_siblings_costs = self.xlat_cost( - from_sep_siblings_nterms, tgt_box_nterms, parameters) - - if use_global_idx: - nm2l[tgt_ibox] += np.sum(from_sep_siblings_costs) - else: - nm2l[itgt_box] += np.sum(from_sep_siblings_costs) - - return nm2l - - def count_m2p(self, use_global_idx=False): - trav = self.traversal - tree = trav.tree - - if use_global_idx: - nm2p = np.zeros((tree.nboxes,), dtype=np.intp) - else: - nm2p = np.zeros((len(trav.target_boxes),), dtype=np.intp) - - for ilevel, sep_smaller_list in enumerate(trav.from_sep_smaller_by_level): - ncoeffs_fmm_cur_level = self.parameters.ncoeffs_fmm_by_level[ilevel] - tgt_box_list = trav.target_boxes_sep_smaller_by_source_level[ilevel] - - for itgt_box, tgt_ibox in enumerate(tgt_box_list): - ntargets = tree.box_target_counts_nonchild[tgt_ibox] - - start, end = sep_smaller_list.starts[itgt_box:itgt_box + 2] - - workload = (end - start) * ntargets * ncoeffs_fmm_cur_level - - if use_global_idx: - nm2p[tgt_ibox] += workload - else: - nm2p[sep_smaller_list.nonempty_indices[itgt_box]] += workload - - return nm2p - - def count_p2l(self, use_global_idx=False): - trav = self.traversal - tree = trav.tree - parameters = self.parameters - - if use_global_idx: - np2l = np.zeros((tree.nboxes,), dtype=np.intp) - else: - np2l = np.zeros(len(trav.target_or_target_parent_boxes), dtype=np.intp) - - for itgt_box, tgt_ibox in enumerate(trav.target_or_target_parent_boxes): - tgt_box_level = trav.tree.box_levels[tgt_ibox] - ncoeffs = parameters.ncoeffs_fmm_by_level[tgt_box_level] - - start, end = trav.from_sep_bigger_starts[itgt_box:itgt_box + 2] - - np2l_sources = 0 - for src_ibox in trav.from_sep_bigger_lists[start:end]: - np2l_sources += tree.box_source_counts_nonchild[src_ibox] - - if use_global_idx: - np2l[tgt_ibox] = np2l_sources * ncoeffs - else: - np2l[itgt_box] = np2l_sources * ncoeffs - - return np2l - - def count_eval_part(self, use_global_idx=False): - trav = self.traversal - tree = trav.tree - parameters = self.parameters - - if use_global_idx: - neval_part = np.zeros(tree.nboxes, dtype=np.intp) - else: - neval_part = np.zeros(len(trav.target_boxes), dtype=np.intp) - - for itgt_box, tgt_ibox in enumerate(trav.target_boxes): - ntargets = tree.box_target_counts_nonchild[tgt_ibox] - tgt_box_level = trav.tree.box_levels[tgt_ibox] - ncoeffs_fmm = parameters.ncoeffs_fmm_by_level[tgt_box_level] - - if use_global_idx: - neval_part[tgt_ibox] = ntargets * ncoeffs_fmm - else: - neval_part[itgt_box] = ntargets * ncoeffs_fmm - - return neval_part - - -class PerformanceModel: - - def __init__(self, cl_context, wrangler_factory, uses_pde_expansions): - self.cl_context = cl_context - self.wrangler_factory = wrangler_factory - self.uses_pde_expansions = uses_pde_expansions - - self.time_result = [] - - from pyopencl.clrandom import PhiloxGenerator - self.rng = PhiloxGenerator(cl_context) - - def time_performance(self, traversal, drive_fmm): - wrangler = self.wrangler_factory(traversal.tree) - - counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) - - # Record useful metadata for assembling performance data - timing_data = { - "nterms_fmm_total": counter.count_nters_fmm_total(), - "direct_workload": np.sum(counter.count_direct()), - "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1], - "m2l_workload": np.sum(counter.count_m2l()), - "m2p_workload": np.sum(counter.count_m2p()), - "p2l_workload": np.sum(counter.count_p2l()), - "eval_part_workload": np.sum(counter.count_eval_part()) - } - - # Generate random source weights - with cl.CommandQueue(self.cl_context) as queue: - source_weights = self.rng.uniform( - queue, - traversal.tree.nsources, - traversal.tree.coord_dtype - ).get() - - # Time a FMM run - drive_fmm(traversal, wrangler, source_weights, timing_data=timing_data) - - self.time_result.append(timing_data) - - def form_multipoles_model(self, wall_time=True): - return self.linear_regression( - "form_multipoles", ["nterms_fmm_total"], - wall_time=wall_time) - - def eval_direct_model(self, wall_time=True): - return self.linear_regression( - "eval_direct", - ["direct_workload", "direct_nsource_boxes"], - wall_time=wall_time) - - def multipole_to_local_model(self, wall_time=True): - return self.linear_regression( - "multipole_to_local", ["m2l_workload"], - wall_time=wall_time - ) - - def eval_multipoles_model(self, wall_time=True): - return self.linear_regression( - "eval_multipoles", ["m2p_workload"], - wall_time=wall_time - ) - - def form_locals_model(self, wall_time=True): - return self.linear_regression( - "form_locals", ["p2l_workload"], - wall_time=wall_time - ) - - def eval_locals_model(self, wall_time=True): - return self.linear_regression( - "eval_locals", ["eval_part_workload"], - wall_time=wall_time - ) - - def linear_regression(self, y_name, x_name, wall_time=True): - """ - :arg y_name: Name of the depedent variable - :arg x_name: A list of names of independent variables - """ - nresult = len(self.time_result) - nvariables = len(x_name) - - if nresult < 1: - raise RuntimeError("Please run FMM at lease once using time_performance" - "before forming models.") - elif nresult == 1: - result = self.time_result[0] - - if wall_time: - dependent_value = result[y_name].wall_elapsed - else: - dependent_value = result[y_name].process_elapsed - - independent_value = result[x_name[0]] - coeff = dependent_value / independent_value - - return (coeff,) + tuple(0.0 for _ in range(nvariables - 1)) - else: - dependent_value = np.empty((nresult,), dtype=float) - coeff_matrix = np.empty((nresult, nvariables + 1), dtype=float) - - for iresult, result in enumerate(self.time_result): - if wall_time: - dependent_value[iresult] = result[y_name].wall_elapsed - else: - dependent_value[iresult] = result[y_name].process_elapsed - - for icol, variable_name in enumerate(x_name): - coeff_matrix[iresult, icol] = result[variable_name] - - coeff_matrix[:, -1] = 1 - - from numpy.linalg import lstsq - coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] - - return coeff - - # vim: filetype=pyopencl:fdm=marker diff --git a/examples/demo_perf_model.py b/examples/demo_perf_model.py new file mode 100644 index 0000000..a5c85b9 --- /dev/null +++ b/examples/demo_perf_model.py @@ -0,0 +1,80 @@ +from __future__ import division +import pyopencl as cl +import numpy as np +from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +import functools +from boxtree.distributed.perf_model import PerformanceModel, PerformanceCounter +from boxtree.fmm import drive_fmm +from pyopencl.clrandom import PhiloxGenerator + +context = cl.create_some_context() +queue = cl.CommandQueue(context) +dtype = np.float64 +helmholtz_k = 0 + + +def fmm_level_to_nterms(tree, level): + return max(level, 3) + + +# {{{ Generate traversal objects for forming models and verification + +traversals = [] + +for nsources, ntargets, dims in [(6000, 6000, 3), + (9000, 9000, 3), + (12000, 12000, 3), + (15000, 15000, 3), + (20000, 20000, 3)]: + + from boxtree.distributed.perf_model import generate_random_traversal + traversals.append(generate_random_traversal( + context, nsources, ntargets, dims, dtype + )) + +# }}} + +wrangler_factory = functools.partial( + FMMLibExpansionWrangler, helmholtz_k=0, fmm_level_to_nterms=fmm_level_to_nterms) + +ntraversals = len(traversals) +model = PerformanceModel(context, wrangler_factory, True, drive_fmm) +for i in range(ntraversals - 1): + model.time_performance(traversals[i]) + +eval_traversal = traversals[-1] +eval_wrangler = wrangler_factory(eval_traversal.tree) +dimensions = eval_traversal.tree.dimensions +eval_counter = PerformanceCounter(eval_traversal, eval_wrangler, True) + +wall_time = True + +predict_timing = model.predict_time(eval_traversal, eval_counter, + wall_time=wall_time) + +# {{{ Actual timing + +true_timing = {} + +rng = PhiloxGenerator(context) +source_weights = rng.uniform( + queue, eval_traversal.tree.nsources, eval_traversal.tree.coord_dtype).get() + +_ = drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_timing) + +# }}} + + +for field in ["eval_direct", "multipole_to_local", "eval_multipoles", "form_locals", + "eval_locals"]: + predict_time_field = predict_timing[field] + + if wall_time: + true_time_field = true_timing[field].wall_elapsed + else: + true_time_field = true_timing[field].process_elapsed + + diff = abs(predict_time_field - true_time_field) + + print(field + ": predict " + str(predict_time_field) + " actual " + + str(true_time_field) + " error " + str(diff / true_time_field)) diff --git a/examples/performance_model.py b/examples/performance_model.py deleted file mode 100644 index 24fc6db..0000000 --- a/examples/performance_model.py +++ /dev/null @@ -1,145 +0,0 @@ -from __future__ import division -import pyopencl as cl -import numpy as np -from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler -import functools -from boxtree.fmm import PerformanceModel, PerformanceCounter -from boxtree.fmm import drive_fmm -from pyopencl.clrandom import PhiloxGenerator - -context = cl.create_some_context() -queue = cl.CommandQueue(context) -dtype = np.float64 -helmholtz_k = 0 - - -def fmm_level_to_nterms(tree, level): - return max(level, 3) - - -# {{{ Generate traversal objects for forming models and verification - -traversals = [] - -for nsources, ntargets, dims in [(6000, 6000, 3), - (9000, 9000, 3), - (12000, 12000, 3), - (15000, 15000, 3), - (20000, 20000, 3)]: - - from boxtree.tools import make_normal_particle_array as p_normal - sources = p_normal(queue, nsources, dims, dtype, seed=15) - targets = p_normal(queue, ntargets, dims, dtype, seed=18) - - rng = PhiloxGenerator(context, seed=22) - target_radii = rng.uniform( - queue, ntargets, a=0, b=0.05, dtype=np.float64).get() - - from boxtree import TreeBuilder - tb = TreeBuilder(context) - tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, - stick_out_factor=0.25, max_particles_in_box=30, debug=True) - - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(context, well_sep_is_n_away=2) - d_trav, _ = tg(queue, tree, debug=True) - trav = d_trav.get(queue=queue) - - traversals.append(trav) - -# }}} - -wrangler_factory = functools.partial( - FMMLibExpansionWrangler, helmholtz_k=0, fmm_level_to_nterms=fmm_level_to_nterms) - -ntraversals = len(traversals) -model = PerformanceModel(context, wrangler_factory, True) -for i in range(ntraversals - 1): - model.time_performance(traversals[i], drive_fmm) - -eval_traversal = traversals[-1] -eval_wrangler = wrangler_factory(eval_traversal.tree) -dimensions = eval_traversal.tree.dimensions -eval_counter = PerformanceCounter(eval_traversal, eval_wrangler, True) - -predict_timing = {} -wall_time = True - -# {{{ Predict eval_direct - -param = model.eval_direct_model(wall_time=wall_time) - -direct_workload = np.sum(eval_counter.count_direct()) -direct_nsource_boxes = eval_traversal.neighbor_source_boxes_starts[-1] - -predict_timing["eval_direct"] = ( - direct_workload * param[0] + direct_nsource_boxes * param[1] + param[2]) - -# }}} - -# {{{ Predict multipole_to_local - -param = model.multipole_to_local_model(wall_time=wall_time) - -m2l_workload = np.sum(eval_counter.count_m2l()) - -predict_timing["multipole_to_local"] = m2l_workload * param[0] + param[1] - -# }}} - -# {{{ Predict eval_multipoles - -param = model.eval_multipoles_model(wall_time=wall_time) - -m2p_workload = np.sum(eval_counter.count_m2p()) - -predict_timing["eval_multipoles"] = m2p_workload * param[0] + param[1] - -# }}} - -# {{{ Predict form_locals - -param = model.form_locals_model(wall_time=wall_time) - -p2l_workload = np.sum(eval_counter.count_p2l()) - -predict_timing["form_locals"] = p2l_workload * param[0] + param[1] - -# }}} - -# {{{ - -param = model.eval_locals_model(wall_time=wall_time) - -eval_part_workload = np.sum(eval_counter.count_eval_part()) - -predict_timing["eval_locals"] = eval_part_workload * param[0] + param[1] - -# }}} - -# {{{ Actual timing - -true_timing = {} - -rng = PhiloxGenerator(context) -source_weights = rng.uniform( - queue, eval_traversal.tree.nsources, eval_traversal.tree.coord_dtype).get() - -_ = drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_timing) - -# }}} - - -for field in ["eval_direct", "multipole_to_local", "eval_multipoles", "form_locals", - "eval_locals"]: - predict_time_field = predict_timing[field] - - if wall_time: - true_time_field = true_timing[field].wall_elapsed - else: - true_time_field = true_timing[field].process_elapsed - - diff = abs(predict_time_field - true_time_field) - - print(field + ": predict " + str(predict_time_field) + " actual " + - str(true_time_field) + " error " + str(diff / true_time_field)) -- GitLab From 949004ccc676796954777726314406baf65a05dd Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 25 Jul 2018 11:38:54 -0500 Subject: [PATCH 135/260] Integrate performance model into distributed implementation --- boxtree/distributed/__init__.py | 46 +++++++++------ boxtree/distributed/partition.py | 97 ++++++++------------------------ 2 files changed, 52 insertions(+), 91 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 1989bd9..2bfd2e2 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -24,8 +24,8 @@ THE SOFTWARE. """ from mpi4py import MPI -from collections import namedtuple import numpy as np +from boxtree.distributed.perf_model import PerformanceModel, PerformanceCounter MPITags = dict( DIST_TREE=0, @@ -38,11 +38,6 @@ MPITags = dict( REDUCE_INDICES=7 ) -WorkloadWeight = namedtuple( - 'Workload', - ['direct', 'm2l', 'm2p', 'p2l', 'multipole'] -) - def dtype_to_mpi(dtype): """ This function translates a numpy.dtype object into the corresponding type @@ -70,6 +65,17 @@ class DistributedFMMInfo(object): self.comm = comm current_rank = comm.Get_rank() + # {{{ Get global wrangler + + if current_rank == 0: + self.global_wrangler = distributed_expansion_wrangler_factory( + self.global_trav.tree + ) + else: + self.global_wrangler = None + + # }}} + # {{{ Broadcast well_sep_is_n_away if current_rank == 0: @@ -81,15 +87,27 @@ class DistributedFMMInfo(object): # }}} + # {{{ Get performance model and counter + + if current_rank == 0: + from boxtree.fmm import drive_fmm + model = PerformanceModel( + queue.context, + distributed_expansion_wrangler_factory, + True, drive_fmm + ) + model.time_random_traversals() + + counter = PerformanceCounter(global_trav, self.global_wrangler, True) + + # }}} + # {{{ Partiton work if current_rank == 0: from boxtree.distributed.partition import partition_work - workload_weight = WorkloadWeight( - direct=1, m2l=1, m2p=1, p2l=1, multipole=5 - ) responsible_boxes_list = partition_work( - global_trav, comm.Get_size(), workload_weight + model, counter, global_trav, comm.Get_size() ) else: responsible_boxes_list = None @@ -120,7 +138,7 @@ class DistributedFMMInfo(object): # }}} - # {{{ Get local and global wrangler + # {{{ Get local wrangler """ Note: The difference between "local wrangler" and "global wrangler" is that @@ -133,12 +151,6 @@ class DistributedFMMInfo(object): self.local_wrangler = self.distributed_expansion_wrangler_factory( self.local_tree) - if current_rank == 0: - self.global_wrangler = self.distributed_expansion_wrangler_factory( - self.global_trav.tree) - else: - self.global_wrangler = None - # }}} def drive_dfmm(self, source_weights, _communicate_mpoles_via_allreduce=False): diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index a21a078..c5c15bb 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -29,7 +29,7 @@ from pyopencl.tools import dtype_to_ctype from mako.template import Template -def partition_work(traversal, total_rank, workload_weight): +def partition_work(perf_model, perf_counter, traversal, total_rank): """ This function assigns responsible boxes of each process. Each process is responsible for calculating the multiple expansions as well as @@ -37,87 +37,36 @@ def partition_work(traversal, total_rank, workload_weight): :arg traversal: The traversal object built on root containing all particles. :arg total_rank: The total number of processes. - :arg workload_weight: Workload coefficients of various operations (e.g. direct - evaluations, multipole-to-local, etc.) used for load balacing. :return: A numpy array of shape (total_rank,), where the ith element is an numpy array containing the responsible boxes of process i. """ tree = traversal.tree - # store the workload of each box - workload = np.zeros((tree.nboxes,), dtype=np.float64) - - # add workload of list 1 - for itarget_box, box_idx in enumerate(traversal.target_boxes): - box_ntargets = tree.box_target_counts_nonchild[box_idx] - start = traversal.neighbor_source_boxes_starts[itarget_box] - end = traversal.neighbor_source_boxes_starts[itarget_box + 1] - list1 = traversal.neighbor_source_boxes_lists[start:end] - particle_count = 0 - for ibox in list1: - particle_count += tree.box_source_counts_nonchild[ibox] - workload[box_idx] += box_ntargets * particle_count * workload_weight.direct - - # add workload of list 2 - for itarget_or_target_parent_boxes, box_idx in enumerate( - traversal.target_or_target_parent_boxes): - start = traversal.from_sep_siblings_starts[itarget_or_target_parent_boxes] - end = traversal.from_sep_siblings_starts[itarget_or_target_parent_boxes + 1] - workload[box_idx] += (end - start) * workload_weight.m2l - - for ilevel in range(tree.nlevels): - # add workload of list 3 far - for itarget_box, box_idx in enumerate( - traversal.target_boxes_sep_smaller_by_source_level[ilevel]): - box_ntargets = tree.box_target_counts_nonchild[box_idx] - start = traversal.from_sep_smaller_by_level[ilevel].starts[itarget_box] - end = traversal.from_sep_smaller_by_level[ilevel].starts[ - itarget_box + 1] - workload[box_idx] += (end - start) * box_ntargets - - # add workload of list 3 near - if tree.targets_have_extent and \ - traversal.from_sep_close_smaller_starts is not None: - for itarget_box, box_idx in enumerate(traversal.target_boxes): - box_ntargets = tree.box_target_counts_nonchild[box_idx] - start = traversal.from_sep_close_smaller_starts[itarget_box] - end = traversal.from_sep_close_smaller_starts[itarget_box + 1] - particle_count = 0 - for near_box_id in traversal.from_sep_close_smaller_lists[start:end]: - particle_count += tree.box_source_counts_nonchild[near_box_id] - workload[box_idx] += ( - box_ntargets * particle_count * workload_weight.direct) - - # add workload of list 4 - for itarget_or_target_parent_boxes, box_idx in enumerate( - traversal.target_or_target_parent_boxes): - start = traversal.from_sep_bigger_starts[itarget_or_target_parent_boxes] - end = traversal.from_sep_bigger_starts[itarget_or_target_parent_boxes + 1] - particle_count = 0 - for far_box_id in traversal.from_sep_bigger_lists[start:end]: - particle_count += tree.box_source_counts_nonchild[far_box_id] - workload[box_idx] += particle_count * workload_weight.p2l - - if tree.targets_have_extent and \ - traversal.from_sep_close_bigger_starts is not None: - box_ntargets = tree.box_target_counts_nonchild[box_idx] - start = traversal.from_sep_close_bigger_starts[ - itarget_or_target_parent_boxes] - end = traversal.from_sep_close_bigger_starts[ - itarget_or_target_parent_boxes + 1] - particle_count = 0 - for direct_box_id in traversal.from_sep_close_bigger_lists[start:end]: - particle_count += tree.box_source_counts_nonchild[direct_box_id] - workload[box_idx] += ( - box_ntargets * particle_count * workload_weight.direct) + time_increment = np.zeros((tree.nboxes,), dtype=np.float64) - for i in range(tree.nboxes): - # add workload of multipole calculation - workload[i] += tree.box_source_counts_nonchild[i] * workload_weight.multipole + param = perf_model.eval_direct_model() + direct_workload = perf_counter.count_direct(use_global_idx=True) + time_increment += (direct_workload * param[0] + param[1]) + + param = perf_model.multipole_to_local_model() + m2l_workload = perf_counter.count_m2l(use_global_idx=True) + time_increment += (m2l_workload * param[0]) + + param = perf_model.eval_multipoles_model() + m2p_workload = perf_counter.count_m2p(use_global_idx=True) + time_increment += (m2p_workload * param[0]) + + param = perf_model.form_locals_model() + p2l_workload = perf_counter.count_p2l(use_global_idx=True) + time_increment += (p2l_workload * param[0]) + + param = perf_model.eval_locals_model() + eval_part_workload = perf_counter.count_eval_part(use_global_idx=True) + time_increment += (eval_part_workload * param[0]) total_workload = 0 for i in range(tree.nboxes): - total_workload += workload[i] + total_workload += time_increment[i] # transform tree from level order to dfs order dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) @@ -139,7 +88,7 @@ def partition_work(traversal, total_rank, workload_weight): workload_count = 0 for i in range(tree.nboxes): box_idx = dfs_order[i] - workload_count += workload[box_idx] + workload_count += time_increment[box_idx] if (workload_count > (rank + 1)*total_workload/total_rank or i == tree.nboxes - 1): responsible_boxes_list[rank] = dfs_order[start:i+1] -- GitLab From e8b8f1e5f2d990985973edf4dd5c7e258d7fbf5f Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 25 Jul 2018 22:46:46 -0500 Subject: [PATCH 136/260] Bug fix --- boxtree/distributed/partition.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index c5c15bb..45176c6 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -46,7 +46,12 @@ def partition_work(perf_model, perf_counter, traversal, total_rank): param = perf_model.eval_direct_model() direct_workload = perf_counter.count_direct(use_global_idx=True) - time_increment += (direct_workload * param[0] + param[1]) + ndirect_source_boxes = np.zeros((tree.nboxes,), dtype=np.intp) + ndirect_source_boxes[traversal.target_boxes] = ( + traversal.neighbor_source_boxes_starts[1:] + - traversal.neighbor_source_boxes_starts[:-1] + ) + time_increment += (direct_workload * param[0] + ndirect_source_boxes * param[1]) param = perf_model.multipole_to_local_model() m2l_workload = perf_counter.count_m2l(use_global_idx=True) @@ -89,8 +94,8 @@ def partition_work(perf_model, perf_counter, traversal, total_rank): for i in range(tree.nboxes): box_idx = dfs_order[i] workload_count += time_increment[box_idx] - if (workload_count > (rank + 1)*total_workload/total_rank or - i == tree.nboxes - 1): + if (workload_count > (rank + 1)*total_workload/total_rank + or i == tree.nboxes - 1): responsible_boxes_list[rank] = dfs_order[start:i+1] start = i + 1 rank += 1 -- GitLab From c676278126afaa12931483c507a8a20a3cda2a33 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 26 Jul 2018 00:13:18 -0500 Subject: [PATCH 137/260] Improve performance model of list 3 and 4 --- boxtree/distributed/partition.py | 7 +++-- boxtree/distributed/perf_model.py | 46 ++++++++++++++++++++++++++----- examples/demo_perf_model.py | 18 ++++++++---- 3 files changed, 56 insertions(+), 15 deletions(-) diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 45176c6..b1b761d 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -58,12 +58,13 @@ def partition_work(perf_model, perf_counter, traversal, total_rank): time_increment += (m2l_workload * param[0]) param = perf_model.eval_multipoles_model() - m2p_workload = perf_counter.count_m2p(use_global_idx=True) - time_increment += (m2p_workload * param[0]) + m2p_workload, m2p_nboxes = perf_counter.count_m2p(use_global_idx=True) + time_increment += (m2p_workload * param[0] + m2p_nboxes * param[1]) param = perf_model.form_locals_model() p2l_workload = perf_counter.count_p2l(use_global_idx=True) - time_increment += (p2l_workload * param[0]) + p2l_nboxes = perf_counter.count_p2l_source_boxes(use_global_idx=True) + time_increment += (p2l_workload * param[0] + p2l_nboxes * param[1]) param = perf_model.eval_locals_model() eval_part_workload = perf_counter.count_eval_part(use_global_idx=True) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index dc344bb..5796626 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -250,8 +250,10 @@ class PerformanceCounter: if use_global_idx: nm2p = np.zeros((tree.nboxes,), dtype=np.intp) + nm2p_boxes = np.zeros((tree.nboxes,), dtype=np.intp) else: nm2p = np.zeros((len(trav.target_boxes),), dtype=np.intp) + nm2p_boxes = np.zeros((len(trav.target_boxes),), dtype=np.intp) for ilevel, sep_smaller_list in enumerate(trav.from_sep_smaller_by_level): ncoeffs_fmm_cur_level = self.parameters.ncoeffs_fmm_by_level[ilevel] @@ -266,10 +268,14 @@ class PerformanceCounter: if use_global_idx: nm2p[tgt_ibox] += workload + nm2p_boxes[tgt_ibox] += (end - start) else: nm2p[sep_smaller_list.nonempty_indices[itgt_box]] += workload + nm2p_boxes[sep_smaller_list.nonempty_indices[itgt_box]] += ( + end - start + ) - return nm2p + return nm2p, nm2p_boxes def count_p2l(self, use_global_idx=False): trav = self.traversal @@ -298,6 +304,20 @@ class PerformanceCounter: return np2l + def count_p2l_source_boxes(self, use_global_idx=False): + trav = self.traversal + tree = trav.tree + + p2l_nsource_boxes = (trav.from_sep_bigger_starts[1:] + - trav.from_sep_bigger_starts[:-1]) + + if use_global_idx: + np2l = np.zeros((tree.nboxes,), dtype=np.intp) + np2l[trav.target_or_target_parent_boxes] = p2l_nsource_boxes + return np2l + else: + return p2l_nsource_boxes + def count_eval_part(self, use_global_idx=False): trav = self.traversal tree = trav.tree @@ -340,13 +360,17 @@ class PerformanceModel: counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) # Record useful metadata for assembling performance data + nm2p, nm2p_boxes = counter.count_m2p() + timing_data = { "nterms_fmm_total": counter.count_nters_fmm_total(), "direct_workload": np.sum(counter.count_direct()), "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1], "m2l_workload": np.sum(counter.count_m2l()), - "m2p_workload": np.sum(counter.count_m2p()), + "m2p_workload": np.sum(nm2p), + "m2p_nboxes": np.sum(nm2p_boxes), "p2l_workload": np.sum(counter.count_p2l()), + "p2l_nboxes": np.sum(counter.count_p2l_source_boxes()), "eval_part_workload": np.sum(counter.count_eval_part()) } @@ -382,13 +406,13 @@ class PerformanceModel: def eval_multipoles_model(self, wall_time=True): return self.linear_regression( - "eval_multipoles", ["m2p_workload"], + "eval_multipoles", ["m2p_workload", "m2p_nboxes"], wall_time=wall_time ) def form_locals_model(self, wall_time=True): return self.linear_regression( - "form_locals", ["p2l_workload"], + "form_locals", ["p2l_workload", "p2l_nboxes"], wall_time=wall_time ) @@ -490,9 +514,14 @@ class PerformanceModel: param = self.eval_multipoles_model(wall_time=wall_time) - m2p_workload = np.sum(eval_counter.count_m2p()) + nm2p, nm2p_boxes = eval_counter.count_m2p() - predict_timing["eval_multipoles"] = m2p_workload * param[0] + param[1] + m2p_workload = np.sum(nm2p) + m2p_boxes = np.sum(nm2p_boxes) + + predict_timing["eval_multipoles"] = ( + m2p_workload * param[0] + m2p_boxes * param[1] + param[2] + ) # }}} @@ -501,8 +530,11 @@ class PerformanceModel: param = self.form_locals_model(wall_time=wall_time) p2l_workload = np.sum(eval_counter.count_p2l()) + p2l_nboxes = np.sum(eval_counter.count_p2l_source_boxes()) - predict_timing["form_locals"] = p2l_workload * param[0] + param[1] + predict_timing["form_locals"] = ( + p2l_workload * param[0] + p2l_nboxes * param[1] + param[2] + ) # }}} diff --git a/examples/demo_perf_model.py b/examples/demo_perf_model.py index a5c85b9..f4a7e4e 100644 --- a/examples/demo_perf_model.py +++ b/examples/demo_perf_model.py @@ -21,11 +21,19 @@ def fmm_level_to_nterms(tree, level): traversals = [] -for nsources, ntargets, dims in [(6000, 6000, 3), - (9000, 9000, 3), - (12000, 12000, 3), - (15000, 15000, 3), - (20000, 20000, 3)]: +test_cases = [ + (9000, 9000, 3), + (9000, 9000, 3), + (12000, 12000, 3), + (12000, 12000, 3), + (15000, 15000, 3), + (15000, 15000, 3), + (18000, 18000, 3), + (18000, 18000, 3), + (25000, 25000, 3) # this last test case is for evaluation +] + +for nsources, ntargets, dims in test_cases: from boxtree.distributed.perf_model import generate_random_traversal traversals.append(generate_random_traversal( -- GitLab From fafd186d832f74162045e4b7072338e16fd0b469 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 26 Jul 2018 09:28:18 -0500 Subject: [PATCH 138/260] Add save/load to performance model --- boxtree/distributed/perf_model.py | 17 ++++ examples/demo_perf_model.py | 125 ++++++++++++++++++------------ 2 files changed, 93 insertions(+), 49 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 5796626..12eba3a 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -27,6 +27,7 @@ import pyopencl as cl import numpy as np from collections import namedtuple from pyopencl.clrandom import PhiloxGenerator +import pickle def generate_random_traversal(context, nsources, ntargets, dims, dtype): @@ -549,3 +550,19 @@ class PerformanceModel: # }}} return predict_timing + + def save(self, filename): + with open(filename, 'wb') as f: + pickle.dump(self.time_result, f) + print("Save {} records to disk.".format(len(self.time_result))) + + def load(self, filename): + try: + with open(filename, 'rb') as f: + loaded_result = pickle.load(f) + self.time_result.extend(loaded_result) + print("Load {} records from disk.".format(len(loaded_result))) + except IOError: + print("Cannot open file '" + filename + "'") + except EOFError: + print("Nothing to read from file.") diff --git a/examples/demo_perf_model.py b/examples/demo_perf_model.py index f4a7e4e..7946d36 100644 --- a/examples/demo_perf_model.py +++ b/examples/demo_perf_model.py @@ -4,6 +4,7 @@ import numpy as np from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler import functools from boxtree.distributed.perf_model import PerformanceModel, PerformanceCounter +from boxtree.distributed.perf_model import generate_random_traversal from boxtree.fmm import drive_fmm from pyopencl.clrandom import PhiloxGenerator @@ -11,78 +12,104 @@ context = cl.create_some_context() queue = cl.CommandQueue(context) dtype = np.float64 helmholtz_k = 0 +dims = 3 def fmm_level_to_nterms(tree, level): return max(level, 3) -# {{{ Generate traversal objects for forming models and verification +wrangler_factory = functools.partial( + FMMLibExpansionWrangler, helmholtz_k=0, fmm_level_to_nterms=fmm_level_to_nterms) -traversals = [] -test_cases = [ - (9000, 9000, 3), - (9000, 9000, 3), - (12000, 12000, 3), - (12000, 12000, 3), - (15000, 15000, 3), - (15000, 15000, 3), - (18000, 18000, 3), - (18000, 18000, 3), - (25000, 25000, 3) # this last test case is for evaluation -] +def train_model(): + traversals = [] -for nsources, ntargets, dims in test_cases: + test_cases = [ + (9000, 9000), + (9000, 9000), + (12000, 12000), + (12000, 12000), + (15000, 15000), + (15000, 15000), + (18000, 18000), + (18000, 18000) + ] - from boxtree.distributed.perf_model import generate_random_traversal - traversals.append(generate_random_traversal( - context, nsources, ntargets, dims, dtype - )) + for nsources, ntargets in test_cases: + traversals.append(generate_random_traversal( + context, nsources, ntargets, dims, dtype + )) -# }}} + ntraversals = len(traversals) + model = PerformanceModel(context, wrangler_factory, True, drive_fmm) -wrangler_factory = functools.partial( - FMMLibExpansionWrangler, helmholtz_k=0, fmm_level_to_nterms=fmm_level_to_nterms) + model.load('model') -ntraversals = len(traversals) -model = PerformanceModel(context, wrangler_factory, True, drive_fmm) -for i in range(ntraversals - 1): - model.time_performance(traversals[i]) + for i in range(ntraversals - 1): + model.time_performance(traversals[i]) -eval_traversal = traversals[-1] -eval_wrangler = wrangler_factory(eval_traversal.tree) -dimensions = eval_traversal.tree.dimensions -eval_counter = PerformanceCounter(eval_traversal, eval_wrangler, True) + model.save('model') -wall_time = True -predict_timing = model.predict_time(eval_traversal, eval_counter, - wall_time=wall_time) +def eval_model(): + nsources = 25000 + ntargets = 25000 + wall_time = True -# {{{ Actual timing + eval_traversal = generate_random_traversal( + context, nsources, ntargets, dims, dtype) -true_timing = {} + eval_wrangler = wrangler_factory(eval_traversal.tree) -rng = PhiloxGenerator(context) -source_weights = rng.uniform( - queue, eval_traversal.tree.nsources, eval_traversal.tree.coord_dtype).get() + # {{{ Predict timing -_ = drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_timing) + eval_counter = PerformanceCounter(eval_traversal, eval_wrangler, True) -# }}} + model = PerformanceModel(context, wrangler_factory, True, drive_fmm) + model.load('model') + predict_timing = model.predict_time(eval_traversal, eval_counter, + wall_time=wall_time) -for field in ["eval_direct", "multipole_to_local", "eval_multipoles", "form_locals", - "eval_locals"]: - predict_time_field = predict_timing[field] + # }}} - if wall_time: - true_time_field = true_timing[field].wall_elapsed - else: - true_time_field = true_timing[field].process_elapsed + # {{{ Actual timing + + true_timing = {} + + rng = PhiloxGenerator(context) + source_weights = rng.uniform( + queue, eval_traversal.tree.nsources, eval_traversal.tree.coord_dtype).get() + + drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_timing) - diff = abs(predict_time_field - true_time_field) + # }}} - print(field + ": predict " + str(predict_time_field) + " actual " + - str(true_time_field) + " error " + str(diff / true_time_field)) + for field in ["eval_direct", "multipole_to_local", "eval_multipoles", + "form_locals", "eval_locals"]: + predict_time_field = predict_timing[field] + + if wall_time: + true_time_field = true_timing[field].wall_elapsed + else: + true_time_field = true_timing[field].process_elapsed + + diff = abs(predict_time_field - true_time_field) + + print(field + ": predict " + str(predict_time_field) + " actual " + + str(true_time_field) + " error " + str(diff / true_time_field)) + + +if __name__ == '__main__': + import sys + if len(sys.argv) != 2: + raise RuntimeError("Please provide exact 1 argument") + + if sys.argv[1] == 'train': + train_model() + elif sys.argv[1] == 'eval': + eval_model() + else: + raise RuntimeError("Do not recognize the argument") -- GitLab From 7595ffcce938275e67808fe7df43513fe69841c4 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 26 Jul 2018 11:10:59 -0500 Subject: [PATCH 139/260] Use robust linear regression --- boxtree/distributed/perf_model.py | 7 ++++++- examples/demo_perf_model.py | 16 ++++------------ 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 12eba3a..60c9ed5 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -461,10 +461,15 @@ class PerformanceModel: coeff_matrix[:, -1] = 1 + """ from numpy.linalg import lstsq coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] + """ + import statsmodels.api as sm + rlm_model = sm.RLM(dependent_value, coeff_matrix) + rlm_result = rlm_model.fit() - return coeff + return rlm_result.params def time_random_traversals(self): context = self.cl_context diff --git a/examples/demo_perf_model.py b/examples/demo_perf_model.py index 7946d36..318938e 100644 --- a/examples/demo_perf_model.py +++ b/examples/demo_perf_model.py @@ -24,7 +24,8 @@ wrangler_factory = functools.partial( def train_model(): - traversals = [] + model = PerformanceModel(context, wrangler_factory, True, drive_fmm) + model.load('model') test_cases = [ (9000, 9000), @@ -38,17 +39,8 @@ def train_model(): ] for nsources, ntargets in test_cases: - traversals.append(generate_random_traversal( - context, nsources, ntargets, dims, dtype - )) - - ntraversals = len(traversals) - model = PerformanceModel(context, wrangler_factory, True, drive_fmm) - - model.load('model') - - for i in range(ntraversals - 1): - model.time_performance(traversals[i]) + trav = generate_random_traversal(context, nsources, ntargets, dims, dtype) + model.time_performance(trav) model.save('model') -- GitLab From 81070f67226515f74749f08e0b1eb039ef2cbb39 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 26 Jul 2018 14:03:09 -0500 Subject: [PATCH 140/260] Allow distributed fmm to use model on disk, bug fix --- boxtree/distributed/__init__.py | 8 ++++++-- boxtree/distributed/partition.py | 4 ++++ boxtree/distributed/perf_model.py | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 2bfd2e2..917a8c0 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -56,7 +56,7 @@ def dtype_to_mpi(dtype): class DistributedFMMInfo(object): def __init__(self, queue, global_trav, distributed_expansion_wrangler_factory, - comm=MPI.COMM_WORLD): + model_filename=None, comm=MPI.COMM_WORLD): self.global_trav = global_trav self.distributed_expansion_wrangler_factory = \ @@ -96,7 +96,11 @@ class DistributedFMMInfo(object): distributed_expansion_wrangler_factory, True, drive_fmm ) - model.time_random_traversals() + if model_filename is not None: + model.load(model_filename) + + if len(model.time_result) == 0: + model.time_random_traversals() counter = PerformanceCounter(global_trav, self.global_wrangler, True) diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index b1b761d..0b24ecd 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -93,6 +93,10 @@ def partition_work(perf_model, perf_counter, traversal, total_rank): start = 0 workload_count = 0 for i in range(tree.nboxes): + if rank + 1 == total_rank: + responsible_boxes_list[rank] = dfs_order[start:tree.nboxes] + break + box_idx = dfs_order[i] workload_count += time_increment[box_idx] if (workload_count > (rank + 1)*total_workload/total_rank diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 60c9ed5..f1920f1 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -356,7 +356,7 @@ class PerformanceModel: self.rng = PhiloxGenerator(cl_context) def time_performance(self, traversal): - wrangler = self.wrangler_factory(traversal.tree) + wrangler = self.wrangler_factory(tree=traversal.tree) counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) -- GitLab From fadb731c06ada6ab3a25cff5bdb66ec7bc104fa5 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 30 Jul 2018 16:27:54 -0500 Subject: [PATCH 141/260] Improve logging --- boxtree/distributed/calculation.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 80beef7..7b83bff 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -256,7 +256,9 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): comm_pattern.advance() stats["total_time"] = time() - t_start - logger.info("communicate multipoles: done in %.2f s" % stats["total_time"]) + logger.info("Communicate multipoles: done in {0:.4f} sec.".format( + stats["total_time"] + )) if return_stats: return stats @@ -281,6 +283,8 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): total_rank = comm.Get_size() if current_rank == 0: + start_time = time.time() + weight_req = [] local_src_weights = np.empty((total_rank,), dtype=object) @@ -295,6 +299,10 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): MPI.Request.Waitall(weight_req) + logger.info("Distribute source weights in {0:.4f} sec.".format( + time.time() - start_time + )) + local_src_weights = local_src_weights[0] else: local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) @@ -381,6 +389,8 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # }}} + fmm_eval_start_time = time.time() + # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") logger.debug("direct evaluation from neighbor source boxes ('list 1')") @@ -477,6 +487,10 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # }}} + logger.info("FMM Evaluation finished on process {0} in {1:.4f} sec.".format( + current_rank, time.time() - fmm_eval_start_time + )) + # {{{ Worker processes send calculated potentials to the root process potentials_mpi_type = dtype_to_mpi(potentials.dtype) @@ -518,8 +532,8 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, if current_rank == 0: - logger.info("Distributed FMM evaluation completes in {} sec.".format( - str(time.time() - start_time) + logger.info("Distributed FMM evaluation completes in {0:.4f} sec.".format( + time.time() - start_time )) return result -- GitLab From 1bf67f8dd1979ce70821d52f7999b62edfca87b3 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 30 Jul 2018 16:34:52 -0500 Subject: [PATCH 142/260] More logging --- boxtree/distributed/calculation.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 7b83bff..87db43d 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -496,6 +496,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, potentials_mpi_type = dtype_to_mpi(potentials.dtype) if current_rank == 0: + receive_pot_start_time = time.time() potentials_all_ranks = np.empty((total_rank,), dtype=object) potentials_all_ranks[0] = potentials @@ -506,6 +507,9 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, comm.Recv([potentials_all_ranks[irank], potentials_mpi_type], source=irank, tag=MPITags["GATHER_POTENTIALS"]) + + logger.info("Receive potentials from worker processes in {0:.4f} sec." + .format(time.time() - receive_pot_start_time)) else: comm.Send([potentials, potentials_mpi_type], dest=0, tag=MPITags["GATHER_POTENTIALS"]) @@ -515,6 +519,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # {{{ Assemble potentials from worker processes together on the root process if current_rank == 0: + post_processing_start_time = time.time() potentials = np.empty((global_wrangler.tree.ntargets,), dtype=potentials.dtype) @@ -528,6 +533,10 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, logger.debug("finalize potentials") result = global_wrangler.finalize_potentials(result) + logger.info("Post processing in {0:.4f} sec.".format( + time.time() - post_processing_start_time + )) + # }}} if current_rank == 0: -- GitLab From afef2af749c576b2b698b4042d17828250aaef4a Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 30 Jul 2018 17:02:17 -0500 Subject: [PATCH 143/260] Add barriers for accurate timing --- boxtree/distributed/__init__.py | 6 +- boxtree/distributed/calculation.py | 90 +++++++++++++++++++----------- 2 files changed, 62 insertions(+), 34 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 917a8c0..b609022 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -157,10 +157,12 @@ class DistributedFMMInfo(object): # }}} - def drive_dfmm(self, source_weights, _communicate_mpoles_via_allreduce=False): + def drive_dfmm(self, source_weights, _communicate_mpoles_via_allreduce=False, + record_timing=False): from boxtree.distributed.calculation import calculate_pot return calculate_pot( self.local_wrangler, self.global_wrangler, self.local_trav, source_weights, self.local_data, - _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce + _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce, + record_timing=record_timing ) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 87db43d..107cb76 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -144,7 +144,8 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): # {{{ Communicate mpoles -def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): +def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False, + record_timing=False): """Based on Algorithm 3: Reduce and Scatter in [1]. The main idea is to mimic a allreduce as done on a hypercube network, but to @@ -164,9 +165,11 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): stats = {} - from time import time - t_start = time() - logger.debug("communicate multipoles: start") + if record_timing: + comm.Barrier() + from time import time + t_start = time() + logger.debug("communicate multipoles: start") # contributing_boxes: # @@ -255,10 +258,13 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): comm_pattern.advance() - stats["total_time"] = time() - t_start - logger.info("Communicate multipoles: done in {0:.4f} sec.".format( - stats["total_time"] - )) + if record_timing: + stats["total_time"] = time() - t_start + logger.info("Communicate multipoles: done in {0:.4f} sec.".format( + stats["total_time"] + )) + else: + stats["total_time"] = None if return_stats: return stats @@ -268,7 +274,8 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # {{{ Distribute source weights -def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): +def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD, + record_timing=False): """ This function transfers needed source_weights from root process to each worker process in communicator :arg comm. @@ -283,7 +290,8 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): total_rank = comm.Get_size() if current_rank == 0: - start_time = time.time() + if record_timing: + start_time = time.time() weight_req = [] local_src_weights = np.empty((total_rank,), dtype=object) @@ -299,9 +307,10 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): MPI.Request.Waitall(weight_req) - logger.info("Distribute source weights in {0:.4f} sec.".format( - time.time() - start_time - )) + if record_timing: + logger.info("Distribute source weights in {0:.4f} sec.".format( + time.time() - start_time + )) local_src_weights = local_src_weights[0] else: @@ -316,7 +325,8 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, local_data, comm=MPI.COMM_WORLD, - _communicate_mpoles_via_allreduce=False): + _communicate_mpoles_via_allreduce=False, + record_timing=False): """ Calculate potentials for targets on distributed memory machines. This function needs to be called collectively by all process in :arg comm. @@ -333,6 +343,9 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, :param _communicate_mpoles_via_allreduce: Use MPI allreduce for communicating multipole expressions. Using MPI allreduce is slower but might be helpful for debugging purpose. + :param record_timing: This argument controls whether to log various timing data. + Note setting this option to true will incur minor performance degradation due + to the usage of barriers. :return: On the root process, this function returns calculated potentials. On worker processes, this function returns None. """ @@ -341,8 +354,10 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, current_rank = comm.Get_rank() total_rank = comm.Get_size() - if current_rank == 0: - start_time = time.time() + if record_timing: + comm.Barrier() + if current_rank == 0: + start_time = time.time() # {{{ Distribute source weights @@ -351,7 +366,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, source_weights = source_weights[global_wrangler.tree.user_source_ids] local_src_weights = distribute_source_weights( - source_weights, local_data, comm=comm + source_weights, local_data, comm=comm, record_timing=record_timing ) # }}} @@ -385,11 +400,14 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all else: - communicate_mpoles(local_wrangler, comm, local_trav, mpole_exps) + communicate_mpoles(local_wrangler, comm, local_trav, mpole_exps, + record_timing=record_timing) # }}} - fmm_eval_start_time = time.time() + if record_timing: + comm.Barrier() + fmm_eval_start_time = time.time() # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") @@ -487,16 +505,21 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # }}} - logger.info("FMM Evaluation finished on process {0} in {1:.4f} sec.".format( - current_rank, time.time() - fmm_eval_start_time - )) + if record_timing: + logger.info("FMM Evaluation finished on process {0} in {1:.4f} sec.".format( + current_rank, time.time() - fmm_eval_start_time + )) # {{{ Worker processes send calculated potentials to the root process potentials_mpi_type = dtype_to_mpi(potentials.dtype) + if record_timing: + comm.Barrier() + if current_rank == 0: - receive_pot_start_time = time.time() + if record_timing: + receive_pot_start_time = time.time() potentials_all_ranks = np.empty((total_rank,), dtype=object) potentials_all_ranks[0] = potentials @@ -508,8 +531,9 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, comm.Recv([potentials_all_ranks[irank], potentials_mpi_type], source=irank, tag=MPITags["GATHER_POTENTIALS"]) - logger.info("Receive potentials from worker processes in {0:.4f} sec." - .format(time.time() - receive_pot_start_time)) + if record_timing: + logger.info("Receive potentials from worker processes in {0:.4f} sec." + .format(time.time() - receive_pot_start_time)) else: comm.Send([potentials, potentials_mpi_type], dest=0, tag=MPITags["GATHER_POTENTIALS"]) @@ -519,7 +543,8 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # {{{ Assemble potentials from worker processes together on the root process if current_rank == 0: - post_processing_start_time = time.time() + if record_timing: + post_processing_start_time = time.time() potentials = np.empty((global_wrangler.tree.ntargets,), dtype=potentials.dtype) @@ -533,17 +558,18 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, logger.debug("finalize potentials") result = global_wrangler.finalize_potentials(result) - logger.info("Post processing in {0:.4f} sec.".format( - time.time() - post_processing_start_time - )) + if record_timing: + logger.info("Post processing in {0:.4f} sec.".format( + time.time() - post_processing_start_time + )) # }}} if current_rank == 0: - logger.info("Distributed FMM evaluation completes in {0:.4f} sec.".format( - time.time() - start_time - )) + if record_timing: + logger.info("Distributed FMM evaluation completes in {0:.4f} sec." + .format(time.time() - start_time)) return result -- GitLab From aabb48dec60bfcbf155877b1e5e470d0a1191cf0 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 31 Jul 2018 18:22:28 -0500 Subject: [PATCH 144/260] Save and load the perf model to json file --- boxtree/distributed/perf_model.py | 65 +++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index f1920f1..c0619f1 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -28,6 +28,8 @@ import numpy as np from collections import namedtuple from pyopencl.clrandom import PhiloxGenerator import pickle +from boxtree.fmm import TimingResult +import json def generate_random_traversal(context, nsources, ntargets, dims, dtype): @@ -126,7 +128,7 @@ class PerformanceCounter: """ tree = self.traversal.tree - nsources_by_level = np.empty((tree.nlevels,), dtype=np.int32) + nsources_by_level = np.empty((tree.nlevels,), dtype=np.intp) for ilevel in range(tree.nlevels): start_ibox = tree.level_start_box_nrs[ilevel] @@ -164,10 +166,10 @@ class PerformanceCounter: tree = traversal.tree if use_global_idx: - direct_workload = np.zeros((tree.nboxes,), dtype=np.int64) + direct_workload = np.zeros((tree.nboxes,), dtype=np.intp) else: ntarget_boxes = len(traversal.target_boxes) - direct_workload = np.zeros((ntarget_boxes,), dtype=np.int64) + direct_workload = np.zeros((ntarget_boxes,), dtype=np.intp) for itgt_box, tgt_ibox in enumerate(traversal.target_boxes): ntargets = tree.box_target_counts_nonchild[tgt_ibox] @@ -571,3 +573,60 @@ class PerformanceModel: print("Cannot open file '" + filename + "'") except EOFError: print("Nothing to read from file.") + + def loadjson(self, filename): + try: + with open(filename, 'r') as f: + loaded_results = json.load(f) + for current_result in loaded_results: + converted_result = {} + + for field_name in current_result: + entry = current_result[field_name] + + if isinstance(entry, (int, np.integer)): + converted_result[field_name] = entry + + elif isinstance(entry, dict): + converted_result[field_name] = TimingResult( + entry['wall_elapsed'], + entry['process_elapsed'] + ) + + else: + raise RuntimeError("Unknown type loaded") + + self.time_result.append(converted_result) + + except IOError: + print("Cannot open file '" + filename + "'") + except EOFError: + print("Nothing to read from file.") + + def savejson(self, filename): + output = [] + + for current_result in self.time_result: + current_output = {} + + for field_name in current_result: + entry = current_result[field_name] + + if isinstance(entry, (int, np.integer)): + current_output[field_name] = int(entry) + + elif isinstance(entry, TimingResult): + current_output[field_name] = { + 'wall_elapsed': entry.wall_elapsed, + 'process_elapsed': entry.process_elapsed + } + + else: + print(type(entry)) + raise RuntimeError("Unknown type in result") + + output.append(current_output) + + with open(filename, 'w') as f: + json.dump(output, f) + print("Save {} records to disk.".format(len(self.time_result))) -- GitLab From 62025c107aa7112b1323bd3322cf53ff96288c9d Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 31 Jul 2018 18:54:16 -0500 Subject: [PATCH 145/260] Add default performance model --- boxtree/distributed/__init__.py | 8 ++++++-- boxtree/distributed/default_perf_model.json | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 boxtree/distributed/default_perf_model.json diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index b609022..9a0861f 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -97,10 +97,14 @@ class DistributedFMMInfo(object): True, drive_fmm ) if model_filename is not None: - model.load(model_filename) + model.loadjson(model_filename) if len(model.time_result) == 0: - model.time_random_traversals() + import os + current_dir = os.path.dirname(os.path.abspath(__file__)) + default_perf_file_path = os.path.join( + current_dir, 'default_perf_model.json') + model.loadjson(default_perf_file_path) counter = PerformanceCounter(global_trav, self.global_wrangler, True) diff --git a/boxtree/distributed/default_perf_model.json b/boxtree/distributed/default_perf_model.json new file mode 100644 index 0000000..5c6a449 --- /dev/null +++ b/boxtree/distributed/default_perf_model.json @@ -0,0 +1 @@ +[{"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.14289305033162236, "process_elapsed": 0.09281096999999994}, "coarsen_multipoles": {"wall_elapsed": 0.14258357882499695, "process_elapsed": 0.14258194200000007}, "eval_direct": {"wall_elapsed": 2.344248099718243, "process_elapsed": 2.378837522999998}, "multipole_to_local": {"wall_elapsed": 20.023932092823088, "process_elapsed": 19.927228304}, "eval_multipoles": {"wall_elapsed": 3.287798510864377, "process_elapsed": 3.2817736969999984}, "form_locals": {"wall_elapsed": 2.141686537768692, "process_elapsed": 2.1379926530000013}, "refine_locals": {"wall_elapsed": 0.10454159695655107, "process_elapsed": 0.10454057600000155}, "eval_locals": {"wall_elapsed": 0.05774546507745981, "process_elapsed": 0.05774528099999898}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.10452838707715273, "process_elapsed": 0.10452839599999919}, "coarsen_multipoles": {"wall_elapsed": 0.21106432611122727, "process_elapsed": 0.21548113200000074}, "eval_direct": {"wall_elapsed": 4.7187186549417675, "process_elapsed": 4.702423059000012}, "multipole_to_local": {"wall_elapsed": 40.50634287390858, "process_elapsed": 40.512730489999996}, "eval_multipoles": {"wall_elapsed": 9.251218600198627, "process_elapsed": 9.216514626000006}, "form_locals": {"wall_elapsed": 4.5346991759724915, "process_elapsed": 4.524306796000005}, "refine_locals": {"wall_elapsed": 0.1888848263770342, "process_elapsed": 0.19806307300000014}, "eval_locals": {"wall_elapsed": 0.10716611426323652, "process_elapsed": 0.10716471899999647}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.1347822081297636, "process_elapsed": 0.13477609299999926}, "coarsen_multipoles": {"wall_elapsed": 0.3603002396412194, "process_elapsed": 0.3602957989999993}, "eval_direct": {"wall_elapsed": 11.283871918916702, "process_elapsed": 11.367921355000007}, "multipole_to_local": {"wall_elapsed": 92.28659388935193, "process_elapsed": 92.02833609099999}, "eval_multipoles": {"wall_elapsed": 24.550387303810567, "process_elapsed": 24.45371944300001}, "form_locals": {"wall_elapsed": 7.136191665194929, "process_elapsed": 7.125686079000019}, "refine_locals": {"wall_elapsed": 0.42898024804890156, "process_elapsed": 0.42143030599999065}, "eval_locals": {"wall_elapsed": 0.27421190217137337, "process_elapsed": 0.2742086969999775}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.2584445010870695, "process_elapsed": 0.25844339400001104}, "coarsen_multipoles": {"wall_elapsed": 0.578245738055557, "process_elapsed": 0.5796970419999923}, "eval_direct": {"wall_elapsed": 13.274465977214277, "process_elapsed": 13.333639903999995}, "multipole_to_local": {"wall_elapsed": 111.34071257105097, "process_elapsed": 110.98862347400001}, "eval_multipoles": {"wall_elapsed": 27.020350001286715, "process_elapsed": 27.001812247000032}, "form_locals": {"wall_elapsed": 9.117257341276854, "process_elapsed": 9.09303535500004}, "refine_locals": {"wall_elapsed": 0.4617841048166156, "process_elapsed": 0.45366404100002455}, "eval_locals": {"wall_elapsed": 0.29467571387067437, "process_elapsed": 0.29467265900001394}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.3379204752855003, "process_elapsed": 0.3395408740000221}, "coarsen_multipoles": {"wall_elapsed": 0.7178796431981027, "process_elapsed": 0.7149586010000348}, "eval_direct": {"wall_elapsed": 15.559152766596526, "process_elapsed": 15.673222753999994}, "multipole_to_local": {"wall_elapsed": 140.75012073572725, "process_elapsed": 140.313214335}, "eval_multipoles": {"wall_elapsed": 36.27695396123454, "process_elapsed": 36.167522549999944}, "form_locals": {"wall_elapsed": 9.149377660825849, "process_elapsed": 9.114768321999918}, "refine_locals": {"wall_elapsed": 0.48122364515438676, "process_elapsed": 0.481217474999994}, "eval_locals": {"wall_elapsed": 0.3034700150601566, "process_elapsed": 0.3034582699999646}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.2821324490942061, "process_elapsed": 0.2821300870000414}, "coarsen_multipoles": {"wall_elapsed": 0.7399735772050917, "process_elapsed": 0.7435758610000676}, "eval_direct": {"wall_elapsed": 17.014956682454795, "process_elapsed": 17.00256681099995}, "multipole_to_local": {"wall_elapsed": 159.9870855966583, "process_elapsed": 159.52702093099992}, "eval_multipoles": {"wall_elapsed": 44.760278538800776, "process_elapsed": 44.609669542999995}, "form_locals": {"wall_elapsed": 10.070859387051314, "process_elapsed": 10.042829486999949}, "refine_locals": {"wall_elapsed": 0.6633619191125035, "process_elapsed": 0.6633519270000079}, "eval_locals": {"wall_elapsed": 0.32876442186534405, "process_elapsed": 0.3287510919999477}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.34098224015906453, "process_elapsed": 0.3368965309999794}, "coarsen_multipoles": {"wall_elapsed": 0.8644498628564179, "process_elapsed": 0.8597816169999533}, "eval_direct": {"wall_elapsed": 20.44144478905946, "process_elapsed": 20.493337145999817}, "multipole_to_local": {"wall_elapsed": 176.60086810868233, "process_elapsed": 175.99911542799998}, "eval_multipoles": {"wall_elapsed": 47.3556498978287, "process_elapsed": 47.21309775099985}, "form_locals": {"wall_elapsed": 12.22528616571799, "process_elapsed": 12.173807162000003}, "refine_locals": {"wall_elapsed": 0.7411672458983958, "process_elapsed": 0.7411488990001089}, "eval_locals": {"wall_elapsed": 0.4227954070083797, "process_elapsed": 0.42279098400013027}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.43783256597816944, "process_elapsed": 0.4303805439999451}, "coarsen_multipoles": {"wall_elapsed": 0.9156720908358693, "process_elapsed": 0.9222488749999229}, "eval_direct": {"wall_elapsed": 21.642432290129364, "process_elapsed": 21.649963951000245}, "multipole_to_local": {"wall_elapsed": 200.9743533632718, "process_elapsed": 200.288029092}, "eval_multipoles": {"wall_elapsed": 54.97431806195527, "process_elapsed": 54.881239913999934}, "form_locals": {"wall_elapsed": 10.682431893888861, "process_elapsed": 10.645913471000085}, "refine_locals": {"wall_elapsed": 0.7400978719815612, "process_elapsed": 0.7279401089999737}, "eval_locals": {"wall_elapsed": 0.47856780607253313, "process_elapsed": 0.4785622940000849}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06467467220500112, "process_elapsed": 0.061806205999999975}, "coarsen_multipoles": {"wall_elapsed": 0.1378657571040094, "process_elapsed": 0.13786446399999974}, "eval_direct": {"wall_elapsed": 2.628749551717192, "process_elapsed": 2.6928994460000024}, "multipole_to_local": {"wall_elapsed": 20.407855125609785, "process_elapsed": 20.420269581}, "eval_multipoles": {"wall_elapsed": 3.4444818547926843, "process_elapsed": 3.4479039949999972}, "form_locals": {"wall_elapsed": 2.021404864266515, "process_elapsed": 2.013269193000003}, "refine_locals": {"wall_elapsed": 0.11385017307475209, "process_elapsed": 0.11385017100000283}, "eval_locals": {"wall_elapsed": 0.061653067357838154, "process_elapsed": 0.06165442400000032}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.10336571699008346, "process_elapsed": 0.10382350900000148}, "coarsen_multipoles": {"wall_elapsed": 0.18445956613868475, "process_elapsed": 0.18445694999999773}, "eval_direct": {"wall_elapsed": 5.217078930698335, "process_elapsed": 5.1945506220000155}, "multipole_to_local": {"wall_elapsed": 40.47428226983175, "process_elapsed": 40.43935276}, "eval_multipoles": {"wall_elapsed": 8.246962024830282, "process_elapsed": 8.253471535999992}, "form_locals": {"wall_elapsed": 4.244218919891864, "process_elapsed": 4.237957393000002}, "refine_locals": {"wall_elapsed": 0.20841451222077012, "process_elapsed": 0.20840436699999998}, "eval_locals": {"wall_elapsed": 0.12309996783733368, "process_elapsed": 0.12310028500000669}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.24232071777805686, "process_elapsed": 0.24232055199999536}, "coarsen_multipoles": {"wall_elapsed": 0.5347794652916491, "process_elapsed": 0.534767346999999}, "eval_direct": {"wall_elapsed": 11.002750042360276, "process_elapsed": 11.097017833000024}, "multipole_to_local": {"wall_elapsed": 93.03417005809024, "process_elapsed": 92.738502075}, "eval_multipoles": {"wall_elapsed": 23.674448810052127, "process_elapsed": 23.61579378899998}, "form_locals": {"wall_elapsed": 7.417238333728164, "process_elapsed": 7.402527147000001}, "refine_locals": {"wall_elapsed": 0.3950768308714032, "process_elapsed": 0.39506440099998485}, "eval_locals": {"wall_elapsed": 0.25366233196109533, "process_elapsed": 0.25365634300001716}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17537363106384873, "process_elapsed": 0.17537425200001167}, "coarsen_multipoles": {"wall_elapsed": 0.4305199389345944, "process_elapsed": 0.4364680989999954}, "eval_direct": {"wall_elapsed": 11.999836526811123, "process_elapsed": 12.09899144499991}, "multipole_to_local": {"wall_elapsed": 108.8945693182759, "process_elapsed": 108.54051004800002}, "eval_multipoles": {"wall_elapsed": 28.0945353070274, "process_elapsed": 28.019863450000003}, "form_locals": {"wall_elapsed": 8.115772506222129, "process_elapsed": 8.086782484999958}, "refine_locals": {"wall_elapsed": 0.4050322100520134, "process_elapsed": 0.4050208720000228}, "eval_locals": {"wall_elapsed": 0.23690649028867483, "process_elapsed": 0.23690572800001064}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.2068675379268825, "process_elapsed": 0.20685269899996683}, "coarsen_multipoles": {"wall_elapsed": 0.6696573188528419, "process_elapsed": 0.664978437000002}, "eval_direct": {"wall_elapsed": 15.180561240762472, "process_elapsed": 15.243839906999938}, "multipole_to_local": {"wall_elapsed": 143.82901267288253, "process_elapsed": 143.340336542}, "eval_multipoles": {"wall_elapsed": 34.135265816003084, "process_elapsed": 34.04566944399994}, "form_locals": {"wall_elapsed": 8.900728145148605, "process_elapsed": 8.893034466000017}, "refine_locals": {"wall_elapsed": 0.5461942246183753, "process_elapsed": 0.546178929000007}, "eval_locals": {"wall_elapsed": 0.3527874890714884, "process_elapsed": 0.35278468699993937}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.310679045971483, "process_elapsed": 0.3128285689999757}, "coarsen_multipoles": {"wall_elapsed": 0.6655385498888791, "process_elapsed": 0.6780369559999144}, "eval_direct": {"wall_elapsed": 17.763539463747293, "process_elapsed": 17.785153179000076}, "multipole_to_local": {"wall_elapsed": 165.45908340485767, "process_elapsed": 164.94912508800007}, "eval_multipoles": {"wall_elapsed": 41.00005980208516, "process_elapsed": 40.86370828999998}, "form_locals": {"wall_elapsed": 10.63184105977416, "process_elapsed": 10.620832092}, "refine_locals": {"wall_elapsed": 0.6163057168014348, "process_elapsed": 0.6162964320000128}, "eval_locals": {"wall_elapsed": 0.3811777690425515, "process_elapsed": 0.3816459009999562}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.2698205062188208, "process_elapsed": 0.2698129970000309}, "coarsen_multipoles": {"wall_elapsed": 0.6649344856850803, "process_elapsed": 0.6794030730000031}, "eval_direct": {"wall_elapsed": 18.069350454490632, "process_elapsed": 18.11652952300028}, "multipole_to_local": {"wall_elapsed": 179.39501194981858, "process_elapsed": 178.77733126399994}, "eval_multipoles": {"wall_elapsed": 49.19099767273292, "process_elapsed": 49.06879406899998}, "form_locals": {"wall_elapsed": 11.126400337088853, "process_elapsed": 11.094821377000017}, "refine_locals": {"wall_elapsed": 0.8388090138323605, "process_elapsed": 0.8387958670000444}, "eval_locals": {"wall_elapsed": 0.43714700592681766, "process_elapsed": 0.4371343509999406}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.3005179218016565, "process_elapsed": 0.3107753879999109}, "coarsen_multipoles": {"wall_elapsed": 0.7997653689235449, "process_elapsed": 0.8049937840000894}, "eval_direct": {"wall_elapsed": 20.99420260032639, "process_elapsed": 21.002320735999774}, "multipole_to_local": {"wall_elapsed": 201.31278445525095, "process_elapsed": 200.60458022299986}, "eval_multipoles": {"wall_elapsed": 56.057990666944534, "process_elapsed": 55.91957687800004}, "form_locals": {"wall_elapsed": 11.969875158276409, "process_elapsed": 11.920894822000037}, "refine_locals": {"wall_elapsed": 0.7045927383005619, "process_elapsed": 0.7050175569997919}, "eval_locals": {"wall_elapsed": 0.43921483773738146, "process_elapsed": 0.4392006999999012}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06566942017525434, "process_elapsed": 0.06264390200000003}, "coarsen_multipoles": {"wall_elapsed": 0.1418711910955608, "process_elapsed": 0.1418699069999998}, "eval_direct": {"wall_elapsed": 2.446558577939868, "process_elapsed": 2.480314300999999}, "multipole_to_local": {"wall_elapsed": 20.00079088192433, "process_elapsed": 19.990914497}, "eval_multipoles": {"wall_elapsed": 3.289981202688068, "process_elapsed": 3.298983691}, "form_locals": {"wall_elapsed": 1.949442199897021, "process_elapsed": 1.9376855149999983}, "refine_locals": {"wall_elapsed": 0.10793576203286648, "process_elapsed": 0.1079347100000021}, "eval_locals": {"wall_elapsed": 0.056184975896030664, "process_elapsed": 0.056184503999997304}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.0772943552583456, "process_elapsed": 0.07729414800000001}, "coarsen_multipoles": {"wall_elapsed": 0.1868803328834474, "process_elapsed": 0.18687748400000004}, "eval_direct": {"wall_elapsed": 4.70476703485474, "process_elapsed": 4.733044761000009}, "multipole_to_local": {"wall_elapsed": 39.82684602914378, "process_elapsed": 39.775730969}, "eval_multipoles": {"wall_elapsed": 8.290043313987553, "process_elapsed": 8.295265927000003}, "form_locals": {"wall_elapsed": 4.078609869815409, "process_elapsed": 4.073348116000005}, "refine_locals": {"wall_elapsed": 0.20782660599797964, "process_elapsed": 0.20782425500000556}, "eval_locals": {"wall_elapsed": 0.15716208703815937, "process_elapsed": 0.15309306700000036}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.16780595015734434, "process_elapsed": 0.16779495699999813}, "coarsen_multipoles": {"wall_elapsed": 0.42514409590512514, "process_elapsed": 0.4251383390000001}, "eval_direct": {"wall_elapsed": 11.506238477304578, "process_elapsed": 11.525195629999985}, "multipole_to_local": {"wall_elapsed": 93.78711060807109, "process_elapsed": 93.47971988}, "eval_multipoles": {"wall_elapsed": 24.25627316115424, "process_elapsed": 24.202846137000023}, "form_locals": {"wall_elapsed": 7.886253128293902, "process_elapsed": 7.8745142149999765}, "refine_locals": {"wall_elapsed": 0.5174721670337021, "process_elapsed": 0.5133926010000209}, "eval_locals": {"wall_elapsed": 0.2859889171086252, "process_elapsed": 0.2859800949999851}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.183073571883142, "process_elapsed": 0.18305776899998705}, "coarsen_multipoles": {"wall_elapsed": 0.472671234048903, "process_elapsed": 0.47271501700001295}, "eval_direct": {"wall_elapsed": 12.604105783626437, "process_elapsed": 12.64961513900002}, "multipole_to_local": {"wall_elapsed": 107.64262523688376, "process_elapsed": 107.31761293199997}, "eval_multipoles": {"wall_elapsed": 27.184093620162457, "process_elapsed": 27.179954468999995}, "form_locals": {"wall_elapsed": 8.9608427840285, "process_elapsed": 8.936076344000014}, "refine_locals": {"wall_elapsed": 0.559703144710511, "process_elapsed": 0.559695051999995}, "eval_locals": {"wall_elapsed": 0.25458914041519165, "process_elapsed": 0.2545796289999771}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.2608643942512572, "process_elapsed": 0.262650323999992}, "coarsen_multipoles": {"wall_elapsed": 0.5767403277568519, "process_elapsed": 0.5860576580000156}, "eval_direct": {"wall_elapsed": 15.735880189575255, "process_elapsed": 15.813270451999927}, "multipole_to_local": {"wall_elapsed": 140.27258323831484, "process_elapsed": 139.77256453799998}, "eval_multipoles": {"wall_elapsed": 36.051918339915574, "process_elapsed": 35.95092203699994}, "form_locals": {"wall_elapsed": 8.086318483110517, "process_elapsed": 8.063137184000084}, "refine_locals": {"wall_elapsed": 0.6116273296065629, "process_elapsed": 0.6116195170000083}, "eval_locals": {"wall_elapsed": 0.30272550601512194, "process_elapsed": 0.30271572899994226}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.23625991912558675, "process_elapsed": 0.2364244679999956}, "coarsen_multipoles": {"wall_elapsed": 0.6893667639233172, "process_elapsed": 0.6830092370000784}, "eval_direct": {"wall_elapsed": 15.056561090052128, "process_elapsed": 15.098359518000052}, "multipole_to_local": {"wall_elapsed": 161.72943416889757, "process_elapsed": 161.19251369200003}, "eval_multipoles": {"wall_elapsed": 41.702806482091546, "process_elapsed": 41.572584398000004}, "form_locals": {"wall_elapsed": 10.078389388974756, "process_elapsed": 10.070601187999955}, "refine_locals": {"wall_elapsed": 0.5847061406821012, "process_elapsed": 0.5846636710000439}, "eval_locals": {"wall_elapsed": 0.32613639906048775, "process_elapsed": 0.3261274469999762}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.35347751434892416, "process_elapsed": 0.3534746269999687}, "coarsen_multipoles": {"wall_elapsed": 0.7678062850609422, "process_elapsed": 0.7621317609999778}, "eval_direct": {"wall_elapsed": 19.385647067334503, "process_elapsed": 19.373809754000035}, "multipole_to_local": {"wall_elapsed": 177.7568790889345, "process_elapsed": 177.13995553400014}, "eval_multipoles": {"wall_elapsed": 55.13101848727092, "process_elapsed": 54.92711723799994}, "form_locals": {"wall_elapsed": 10.444787265732884, "process_elapsed": 10.436945378000019}, "refine_locals": {"wall_elapsed": 0.6166465007700026, "process_elapsed": 0.6166181649998634}, "eval_locals": {"wall_elapsed": 0.35686514899134636, "process_elapsed": 0.35686196599999676}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.3867368856444955, "process_elapsed": 0.3867698809999638}, "coarsen_multipoles": {"wall_elapsed": 0.7339309039525688, "process_elapsed": 0.7566592790001323}, "eval_direct": {"wall_elapsed": 20.32549550011754, "process_elapsed": 20.388905034000118}, "multipole_to_local": {"wall_elapsed": 198.88563758181408, "process_elapsed": 198.28836379799986}, "eval_multipoles": {"wall_elapsed": 51.132025649771094, "process_elapsed": 50.934892610999896}, "form_locals": {"wall_elapsed": 11.990623429883271, "process_elapsed": 11.949524165999946}, "refine_locals": {"wall_elapsed": 0.6882827966473997, "process_elapsed": 0.680187923999938}, "eval_locals": {"wall_elapsed": 0.40719516295939684, "process_elapsed": 0.40764913800012437}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06581627298146486, "process_elapsed": 0.06344213200000004}, "coarsen_multipoles": {"wall_elapsed": 0.1412572581321001, "process_elapsed": 0.1412559019999997}, "eval_direct": {"wall_elapsed": 2.7299221428111196, "process_elapsed": 2.781603758000003}, "multipole_to_local": {"wall_elapsed": 20.612939092796296, "process_elapsed": 20.616477141}, "eval_multipoles": {"wall_elapsed": 3.5307789859361947, "process_elapsed": 3.517346646}, "form_locals": {"wall_elapsed": 2.124925720039755, "process_elapsed": 2.1208257669999995}, "refine_locals": {"wall_elapsed": 0.1124834748916328, "process_elapsed": 0.11248237000000216}, "eval_locals": {"wall_elapsed": 0.06405873689800501, "process_elapsed": 0.0640584180000019}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.1036986019462347, "process_elapsed": 0.10369902800000119}, "coarsen_multipoles": {"wall_elapsed": 0.1949111670255661, "process_elapsed": 0.1999170879999994}, "eval_direct": {"wall_elapsed": 4.608586409594864, "process_elapsed": 4.5927578570000165}, "multipole_to_local": {"wall_elapsed": 38.54366623284295, "process_elapsed": 38.485827138000005}, "eval_multipoles": {"wall_elapsed": 8.568737780675292, "process_elapsed": 8.542159236999993}, "form_locals": {"wall_elapsed": 4.073263252153993, "process_elapsed": 4.064695062000013}, "refine_locals": {"wall_elapsed": 0.309948590118438, "process_elapsed": 0.3115177579999937}, "eval_locals": {"wall_elapsed": 0.1761215110309422, "process_elapsed": 0.17611938100000657}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.13820320600643754, "process_elapsed": 0.138196674999989}, "coarsen_multipoles": {"wall_elapsed": 0.38043196592479944, "process_elapsed": 0.3804273089999981}, "eval_direct": {"wall_elapsed": 11.744406177196652, "process_elapsed": 11.827659204999975}, "multipole_to_local": {"wall_elapsed": 91.26099151792005, "process_elapsed": 90.957502208}, "eval_multipoles": {"wall_elapsed": 22.827171033713967, "process_elapsed": 22.78696679800001}, "form_locals": {"wall_elapsed": 7.519823815207928, "process_elapsed": 7.498412040000005}, "refine_locals": {"wall_elapsed": 0.36121500795707107, "process_elapsed": 0.36120376799999576}, "eval_locals": {"wall_elapsed": 0.2246690890751779, "process_elapsed": 0.2246666040000207}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17177576525136828, "process_elapsed": 0.1717763049999803}, "coarsen_multipoles": {"wall_elapsed": 0.42073291214182973, "process_elapsed": 0.4207269609999855}, "eval_direct": {"wall_elapsed": 11.8608891190961, "process_elapsed": 11.953956640000058}, "multipole_to_local": {"wall_elapsed": 108.9518269430846, "process_elapsed": 108.638402584}, "eval_multipoles": {"wall_elapsed": 29.55046471906826, "process_elapsed": 29.447812095000018}, "form_locals": {"wall_elapsed": 8.121391328983009, "process_elapsed": 8.098203552999962}, "refine_locals": {"wall_elapsed": 0.40015376918017864, "process_elapsed": 0.40014009999998734}, "eval_locals": {"wall_elapsed": 0.24294947879388928, "process_elapsed": 0.24293507200002296}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.3036691783927381, "process_elapsed": 0.3037372880000362}, "coarsen_multipoles": {"wall_elapsed": 0.7293630791828036, "process_elapsed": 0.7386667420000208}, "eval_direct": {"wall_elapsed": 15.853490174748003, "process_elapsed": 15.951854819999937}, "multipole_to_local": {"wall_elapsed": 150.07324582897127, "process_elapsed": 149.59256680499993}, "eval_multipoles": {"wall_elapsed": 36.21569347428158, "process_elapsed": 36.108943527000065}, "form_locals": {"wall_elapsed": 9.256117098033428, "process_elapsed": 9.234842302999937}, "refine_locals": {"wall_elapsed": 0.6879124888218939, "process_elapsed": 0.6842866189999768}, "eval_locals": {"wall_elapsed": 0.4285805160179734, "process_elapsed": 0.4285680850000517}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.2390671377070248, "process_elapsed": 0.23906642099996134}, "coarsen_multipoles": {"wall_elapsed": 0.6618608850985765, "process_elapsed": 0.6946877950000498}, "eval_direct": {"wall_elapsed": 16.631962614599615, "process_elapsed": 16.672639716999925}, "multipole_to_local": {"wall_elapsed": 161.71785036334768, "process_elapsed": 161.19572901000004}, "eval_multipoles": {"wall_elapsed": 42.610538037959486, "process_elapsed": 42.503210351999996}, "form_locals": {"wall_elapsed": 11.49972936604172, "process_elapsed": 11.47165019199997}, "refine_locals": {"wall_elapsed": 0.7118247579783201, "process_elapsed": 0.7118074400000296}, "eval_locals": {"wall_elapsed": 0.380106495693326, "process_elapsed": 0.38009324099994046}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.3670531171374023, "process_elapsed": 0.36309775300003366}, "coarsen_multipoles": {"wall_elapsed": 0.7518898211419582, "process_elapsed": 0.7729549489999954}, "eval_direct": {"wall_elapsed": 18.767360630445182, "process_elapsed": 18.769919828999946}, "multipole_to_local": {"wall_elapsed": 180.2374455779791, "process_elapsed": 179.64930603300002}, "eval_multipoles": {"wall_elapsed": 49.90873555187136, "process_elapsed": 49.73379236400001}, "form_locals": {"wall_elapsed": 10.527730371803045, "process_elapsed": 10.505953223000006}, "refine_locals": {"wall_elapsed": 0.6305891978554428, "process_elapsed": 0.6305585160000646}, "eval_locals": {"wall_elapsed": 0.5451591932214797, "process_elapsed": 0.5386910560000615}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.3035329198464751, "process_elapsed": 0.3046291329999349}, "coarsen_multipoles": {"wall_elapsed": 0.908652248326689, "process_elapsed": 0.9229546799999753}, "eval_direct": {"wall_elapsed": 23.435453578364104, "process_elapsed": 23.427185823999707}, "multipole_to_local": {"wall_elapsed": 204.10029060393572, "process_elapsed": 203.498182929}, "eval_multipoles": {"wall_elapsed": 57.77209374681115, "process_elapsed": 57.580601317999935}, "form_locals": {"wall_elapsed": 11.694038683082908, "process_elapsed": 11.65377241200008}, "refine_locals": {"wall_elapsed": 0.6659307437948883, "process_elapsed": 0.6659000030001607}, "eval_locals": {"wall_elapsed": 0.42689173109829426, "process_elapsed": 0.4268864119999307}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06433802330866456, "process_elapsed": 0.06175603100000027}, "coarsen_multipoles": {"wall_elapsed": 0.13728277198970318, "process_elapsed": 0.1372830300000003}, "eval_direct": {"wall_elapsed": 2.8698996007442474, "process_elapsed": 2.8860750380000035}, "multipole_to_local": {"wall_elapsed": 21.608792692888528, "process_elapsed": 21.600729749}, "eval_multipoles": {"wall_elapsed": 4.170610999688506, "process_elapsed": 4.196137698000001}, "form_locals": {"wall_elapsed": 1.8854569140821695, "process_elapsed": 1.8855776410000011}, "refine_locals": {"wall_elapsed": 0.16010719677433372, "process_elapsed": 0.16010556499999495}, "eval_locals": {"wall_elapsed": 0.08334489725530148, "process_elapsed": 0.08334504000000464}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.07694227108731866, "process_elapsed": 0.07694427899999567}, "coarsen_multipoles": {"wall_elapsed": 0.18193004885688424, "process_elapsed": 0.18192722100000225}, "eval_direct": {"wall_elapsed": 5.362686685286462, "process_elapsed": 5.376335398999991}, "multipole_to_local": {"wall_elapsed": 38.82882813597098, "process_elapsed": 38.800395750999996}, "eval_multipoles": {"wall_elapsed": 7.6316725057549775, "process_elapsed": 7.61176749900001}, "form_locals": {"wall_elapsed": 3.7538683358579874, "process_elapsed": 3.740067400000001}, "refine_locals": {"wall_elapsed": 0.18044804502278566, "process_elapsed": 0.18044670099999394}, "eval_locals": {"wall_elapsed": 0.1640238557010889, "process_elapsed": 0.15998751500001163}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.16299484204500914, "process_elapsed": 0.1589325700000046}, "coarsen_multipoles": {"wall_elapsed": 0.5394922369159758, "process_elapsed": 0.5394833269999992}, "eval_direct": {"wall_elapsed": 11.529483899474144, "process_elapsed": 11.633024222999964}, "multipole_to_local": {"wall_elapsed": 90.45034577790648, "process_elapsed": 90.140623747}, "eval_multipoles": {"wall_elapsed": 23.475032337009907, "process_elapsed": 23.402588277000007}, "form_locals": {"wall_elapsed": 7.382363630924374, "process_elapsed": 7.379083809000008}, "refine_locals": {"wall_elapsed": 0.3571728630922735, "process_elapsed": 0.3571698819999938}, "eval_locals": {"wall_elapsed": 0.29383321665227413, "process_elapsed": 0.28573974700000804}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17482035793364048, "process_elapsed": 0.17482164000000466}, "coarsen_multipoles": {"wall_elapsed": 0.6471779476851225, "process_elapsed": 0.6350906819999977}, "eval_direct": {"wall_elapsed": 11.94170841993764, "process_elapsed": 12.134813735999984}, "multipole_to_local": {"wall_elapsed": 109.43196559883654, "process_elapsed": 109.081074277}, "eval_multipoles": {"wall_elapsed": 29.13186590280384, "process_elapsed": 29.05222061400002}, "form_locals": {"wall_elapsed": 9.20513498224318, "process_elapsed": 9.17659013399998}, "refine_locals": {"wall_elapsed": 0.39085082802921534, "process_elapsed": 0.3908466930000145}, "eval_locals": {"wall_elapsed": 0.23075266415253282, "process_elapsed": 0.23075040099996613}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.31254801992326975, "process_elapsed": 0.30992854599998054}, "coarsen_multipoles": {"wall_elapsed": 0.637142900377512, "process_elapsed": 0.6467619349999723}, "eval_direct": {"wall_elapsed": 15.69611760089174, "process_elapsed": 15.757981944999813}, "multipole_to_local": {"wall_elapsed": 140.356332520023, "process_elapsed": 139.88694511699998}, "eval_multipoles": {"wall_elapsed": 38.49987019971013, "process_elapsed": 38.43145631599998}, "form_locals": {"wall_elapsed": 8.471607562154531, "process_elapsed": 8.449020899000061}, "refine_locals": {"wall_elapsed": 0.5592385237105191, "process_elapsed": 0.5596502360000386}, "eval_locals": {"wall_elapsed": 0.35891397623345256, "process_elapsed": 0.3589064400000552}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.22916135005652905, "process_elapsed": 0.2294245719999708}, "coarsen_multipoles": {"wall_elapsed": 0.5582886077463627, "process_elapsed": 0.5605387639999435}, "eval_direct": {"wall_elapsed": 16.712323531042784, "process_elapsed": 16.77518112300004}, "multipole_to_local": {"wall_elapsed": 159.60533997509629, "process_elapsed": 159.07230811499994}, "eval_multipoles": {"wall_elapsed": 42.59306370886043, "process_elapsed": 42.45572857700006}, "form_locals": {"wall_elapsed": 10.805573990568519, "process_elapsed": 10.770887148999918}, "refine_locals": {"wall_elapsed": 0.5874539171345532, "process_elapsed": 0.579410558999939}, "eval_locals": {"wall_elapsed": 0.34672214509919286, "process_elapsed": 0.3467195629999651}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.34223695332184434, "process_elapsed": 0.3381626640000377}, "coarsen_multipoles": {"wall_elapsed": 0.8546734559349716, "process_elapsed": 0.8620984199999384}, "eval_direct": {"wall_elapsed": 20.248594620265067, "process_elapsed": 20.2735790050001}, "multipole_to_local": {"wall_elapsed": 177.53715593600646, "process_elapsed": 176.916876098}, "eval_multipoles": {"wall_elapsed": 46.6420071949251, "process_elapsed": 46.50159333199986}, "form_locals": {"wall_elapsed": 11.556011468637735, "process_elapsed": 11.530447846000015}, "refine_locals": {"wall_elapsed": 0.6371138021349907, "process_elapsed": 0.6370984749999025}, "eval_locals": {"wall_elapsed": 0.48767920210957527, "process_elapsed": 0.4836109719999513}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.4101475323550403, "process_elapsed": 0.4025623400000313}, "coarsen_multipoles": {"wall_elapsed": 0.8309031310491264, "process_elapsed": 0.8533334829999148}, "eval_direct": {"wall_elapsed": 21.83228043373674, "process_elapsed": 21.811839603999942}, "multipole_to_local": {"wall_elapsed": 200.4263506392017, "process_elapsed": 199.78048179799998}, "eval_multipoles": {"wall_elapsed": 55.83001929195598, "process_elapsed": 55.62621917499996}, "form_locals": {"wall_elapsed": 10.937592420261353, "process_elapsed": 10.904243739000094}, "refine_locals": {"wall_elapsed": 0.8196568163111806, "process_elapsed": 0.8155691360000219}, "eval_locals": {"wall_elapsed": 0.6638513482175767, "process_elapsed": 0.6638208470001246}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06488013127818704, "process_elapsed": 0.06267741799999982}, "coarsen_multipoles": {"wall_elapsed": 0.14028360787779093, "process_elapsed": 0.1402826039999998}, "eval_direct": {"wall_elapsed": 3.573185256216675, "process_elapsed": 3.561920087999996}, "multipole_to_local": {"wall_elapsed": 25.873784495983273, "process_elapsed": 25.732638186}, "eval_multipoles": {"wall_elapsed": 4.642745970282704, "process_elapsed": 4.639341426000001}, "form_locals": {"wall_elapsed": 3.0461708339862525, "process_elapsed": 3.036363762999997}, "refine_locals": {"wall_elapsed": 0.11961846519261599, "process_elapsed": 0.1196096529999977}, "eval_locals": {"wall_elapsed": 0.0641988911665976, "process_elapsed": 0.06419945400000415}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.09974845498800278, "process_elapsed": 0.09974945999999818}, "coarsen_multipoles": {"wall_elapsed": 0.2711884528398514, "process_elapsed": 0.2711792780000053}, "eval_direct": {"wall_elapsed": 6.678008021786809, "process_elapsed": 6.685549811999991}, "multipole_to_local": {"wall_elapsed": 52.48578274901956, "process_elapsed": 50.94725895999999}, "eval_multipoles": {"wall_elapsed": 11.199469511862844, "process_elapsed": 10.973406965999999}, "form_locals": {"wall_elapsed": 5.428788446821272, "process_elapsed": 5.372336655000012}, "refine_locals": {"wall_elapsed": 0.2391572780907154, "process_elapsed": 0.2391561879999955}, "eval_locals": {"wall_elapsed": 0.13382326532155275, "process_elapsed": 0.13382537700000796}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17349608801305294, "process_elapsed": 0.17349610799999482}, "coarsen_multipoles": {"wall_elapsed": 0.4378826292231679, "process_elapsed": 0.43415366199999994}, "eval_direct": {"wall_elapsed": 14.007537546101958, "process_elapsed": 14.066462461}, "multipole_to_local": {"wall_elapsed": 131.48507186723873, "process_elapsed": 128.31762228600002}, "eval_multipoles": {"wall_elapsed": 29.94184113200754, "process_elapsed": 29.82207009000001}, "form_locals": {"wall_elapsed": 9.938785715959966, "process_elapsed": 9.913687585000048}, "refine_locals": {"wall_elapsed": 0.4504069094546139, "process_elapsed": 0.4487136799999689}, "eval_locals": {"wall_elapsed": 0.30359381902962923, "process_elapsed": 0.30358403499997166}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.2583726462908089, "process_elapsed": 0.2584110270000224}, "coarsen_multipoles": {"wall_elapsed": 0.5752558228559792, "process_elapsed": 0.5769190489999687}, "eval_direct": {"wall_elapsed": 12.875543330796063, "process_elapsed": 12.883404596000048}, "multipole_to_local": {"wall_elapsed": 117.18043434806168, "process_elapsed": 116.57996186900004}, "eval_multipoles": {"wall_elapsed": 27.881489561870694, "process_elapsed": 27.78543085000001}, "form_locals": {"wall_elapsed": 8.799156446009874, "process_elapsed": 8.768620798000029}, "refine_locals": {"wall_elapsed": 0.4524097847752273, "process_elapsed": 0.45239475799996853}, "eval_locals": {"wall_elapsed": 0.2726343311369419, "process_elapsed": 0.27261175699999285}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.22424806095659733, "process_elapsed": 0.2242476690000217}, "coarsen_multipoles": {"wall_elapsed": 0.6595294354483485, "process_elapsed": 0.6785909230000016}, "eval_direct": {"wall_elapsed": 16.094195554498583, "process_elapsed": 16.12589607299998}, "multipole_to_local": {"wall_elapsed": 144.88413401972502, "process_elapsed": 144.42795690799994}, "eval_multipoles": {"wall_elapsed": 37.05021429667249, "process_elapsed": 36.94682276699996}, "form_locals": {"wall_elapsed": 8.93320427602157, "process_elapsed": 8.92260437199991}, "refine_locals": {"wall_elapsed": 0.5139365317299962, "process_elapsed": 0.5144198460000098}, "eval_locals": {"wall_elapsed": 0.4673224021680653, "process_elapsed": 0.4632590369999434}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.39084181608632207, "process_elapsed": 0.39083779399993546}, "coarsen_multipoles": {"wall_elapsed": 0.7719269678927958, "process_elapsed": 0.8146186569999827}, "eval_direct": {"wall_elapsed": 17.227728134952486, "process_elapsed": 17.294979483999896}, "multipole_to_local": {"wall_elapsed": 166.3015058990568, "process_elapsed": 165.78228093299992}, "eval_multipoles": {"wall_elapsed": 44.71846476290375, "process_elapsed": 44.62762825000004}, "form_locals": {"wall_elapsed": 9.726129946764559, "process_elapsed": 9.721420520000038}, "refine_locals": {"wall_elapsed": 0.5776261049322784, "process_elapsed": 0.5694936979999738}, "eval_locals": {"wall_elapsed": 0.3503082669340074, "process_elapsed": 0.350304264999977}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.29380203830078244, "process_elapsed": 0.29537681300007534}, "coarsen_multipoles": {"wall_elapsed": 0.6812492711469531, "process_elapsed": 0.7019599970000172}, "eval_direct": {"wall_elapsed": 26.609884928911924, "process_elapsed": 26.486387821999756}, "multipole_to_local": {"wall_elapsed": 225.8225390901789, "process_elapsed": 218.796968299}, "eval_multipoles": {"wall_elapsed": 72.68378263898194, "process_elapsed": 64.27706159500008}, "form_locals": {"wall_elapsed": 15.491025436203927, "process_elapsed": 15.374123016999874}, "refine_locals": {"wall_elapsed": 0.797229612711817, "process_elapsed": 0.7972753879998891}, "eval_locals": {"wall_elapsed": 0.7185860709287226, "process_elapsed": 0.6837079179999819}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.8264692649245262, "process_elapsed": 0.5747898250001526}, "coarsen_multipoles": {"wall_elapsed": 1.5497954818420112, "process_elapsed": 1.3431830110000647}, "eval_direct": {"wall_elapsed": 29.598126132041216, "process_elapsed": 29.095733828999755}, "multipole_to_local": {"wall_elapsed": 273.25275476509705, "process_elapsed": 266.123896437}, "eval_multipoles": {"wall_elapsed": 71.07263175630942, "process_elapsed": 70.09043602299994}, "form_locals": {"wall_elapsed": 14.648473134730011, "process_elapsed": 14.53982591099998}, "refine_locals": {"wall_elapsed": 1.0756985042244196, "process_elapsed": 1.0682339970001067}, "eval_locals": {"wall_elapsed": 0.7669271482154727, "process_elapsed": 0.6153591639999831}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.09652932407334447, "process_elapsed": 0.09385259800000023}, "coarsen_multipoles": {"wall_elapsed": 0.1438053478486836, "process_elapsed": 0.14380348099999996}, "eval_direct": {"wall_elapsed": 2.8724248111248016, "process_elapsed": 2.8874125049999955}, "multipole_to_local": {"wall_elapsed": 22.150050774216652, "process_elapsed": 22.163823834000002}, "eval_multipoles": {"wall_elapsed": 4.035081175155938, "process_elapsed": 4.018481735999998}, "form_locals": {"wall_elapsed": 2.4109095097519457, "process_elapsed": 2.410357944000001}, "refine_locals": {"wall_elapsed": 0.10400902340188622, "process_elapsed": 0.10400039700000008}, "eval_locals": {"wall_elapsed": 0.05630338191986084, "process_elapsed": 0.05630333900000295}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.10583456791937351, "process_elapsed": 0.1058346049999983}, "coarsen_multipoles": {"wall_elapsed": 0.24514786386862397, "process_elapsed": 0.24514414600000123}, "eval_direct": {"wall_elapsed": 5.353780907113105, "process_elapsed": 5.363511694999993}, "multipole_to_local": {"wall_elapsed": 40.435692673083395, "process_elapsed": 40.370689201000005}, "eval_multipoles": {"wall_elapsed": 8.241205959115177, "process_elapsed": 8.222697027999999}, "form_locals": {"wall_elapsed": 4.570903809741139, "process_elapsed": 4.547761195999996}, "refine_locals": {"wall_elapsed": 0.22210092516615987, "process_elapsed": 0.22209859000000165}, "eval_locals": {"wall_elapsed": 0.13560375943779945, "process_elapsed": 0.13560390499999642}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.22054569516330957, "process_elapsed": 0.22053314199999363}, "coarsen_multipoles": {"wall_elapsed": 0.5125908548943698, "process_elapsed": 0.5085106519999982}, "eval_direct": {"wall_elapsed": 12.12822123710066, "process_elapsed": 12.215636038999989}, "multipole_to_local": {"wall_elapsed": 93.70578746590763, "process_elapsed": 93.417041737}, "eval_multipoles": {"wall_elapsed": 25.345406110864133, "process_elapsed": 25.348133571999995}, "form_locals": {"wall_elapsed": 8.22137835714966, "process_elapsed": 8.202794989000012}, "refine_locals": {"wall_elapsed": 0.41789698833599687, "process_elapsed": 0.4185953409999854}, "eval_locals": {"wall_elapsed": 0.269101491663605, "process_elapsed": 0.26908321999999885}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17082596709951758, "process_elapsed": 0.1708260939999775}, "coarsen_multipoles": {"wall_elapsed": 0.4154431517235935, "process_elapsed": 0.42217062099999225}, "eval_direct": {"wall_elapsed": 11.69108117558062, "process_elapsed": 11.81256067199999}, "multipole_to_local": {"wall_elapsed": 105.88159589888528, "process_elapsed": 105.51405778699998}, "eval_multipoles": {"wall_elapsed": 28.176122231874615, "process_elapsed": 28.119217985000034}, "form_locals": {"wall_elapsed": 8.196291028987616, "process_elapsed": 8.181508215999997}, "refine_locals": {"wall_elapsed": 0.46452207770198584, "process_elapsed": 0.4564281270000379}, "eval_locals": {"wall_elapsed": 0.25099264504387975, "process_elapsed": 0.25098168199997417}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.27000816026702523, "process_elapsed": 0.2720453750000047}, "coarsen_multipoles": {"wall_elapsed": 0.518343870062381, "process_elapsed": 0.5315893110000047}, "eval_direct": {"wall_elapsed": 15.501696541905403, "process_elapsed": 15.713185632000034}, "multipole_to_local": {"wall_elapsed": 145.782555392012, "process_elapsed": 145.32088750699995}, "eval_multipoles": {"wall_elapsed": 38.39985727425665, "process_elapsed": 38.26757356000007}, "form_locals": {"wall_elapsed": 7.733311468735337, "process_elapsed": 7.705839346999937}, "refine_locals": {"wall_elapsed": 0.5397246889770031, "process_elapsed": 0.540322248999928}, "eval_locals": {"wall_elapsed": 0.32426714105531573, "process_elapsed": 0.3242635839999366}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.24512064224109054, "process_elapsed": 0.25092624599994906}, "coarsen_multipoles": {"wall_elapsed": 0.7491539157927036, "process_elapsed": 0.7398730170000363}, "eval_direct": {"wall_elapsed": 17.279178180731833, "process_elapsed": 17.591523772999835}, "multipole_to_local": {"wall_elapsed": 161.031417501159, "process_elapsed": 160.49498069599997}, "eval_multipoles": {"wall_elapsed": 45.24870359105989, "process_elapsed": 45.14435771700005}, "form_locals": {"wall_elapsed": 10.377049359958619, "process_elapsed": 10.342459472999963}, "refine_locals": {"wall_elapsed": 0.7589819608256221, "process_elapsed": 0.754924054000071}, "eval_locals": {"wall_elapsed": 0.4476486751809716, "process_elapsed": 0.4476429339999868}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.27594426879659295, "process_elapsed": 0.28047744100001637}, "coarsen_multipoles": {"wall_elapsed": 0.7236054469831288, "process_elapsed": 0.7297952440000017}, "eval_direct": {"wall_elapsed": 20.532363687176257, "process_elapsed": 20.546230066000135}, "multipole_to_local": {"wall_elapsed": 180.1073812278919, "process_elapsed": 179.505576249}, "eval_multipoles": {"wall_elapsed": 51.82399559998885, "process_elapsed": 51.64447850400006}, "form_locals": {"wall_elapsed": 13.09091425826773, "process_elapsed": 13.063926417999937}, "refine_locals": {"wall_elapsed": 0.6001745373941958, "process_elapsed": 0.600155986000118}, "eval_locals": {"wall_elapsed": 0.36805562302470207, "process_elapsed": 0.3680515439998544}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.39941675309091806, "process_elapsed": 0.3981763469998896}, "coarsen_multipoles": {"wall_elapsed": 0.9822220490314066, "process_elapsed": 1.0149941570000465}, "eval_direct": {"wall_elapsed": 21.01450498914346, "process_elapsed": 21.073397865999823}, "multipole_to_local": {"wall_elapsed": 197.67101062694564, "process_elapsed": 197.05391954499987}, "eval_multipoles": {"wall_elapsed": 58.286998888943344, "process_elapsed": 58.08302871900014}, "form_locals": {"wall_elapsed": 11.379268935415894, "process_elapsed": 11.344901745000016}, "refine_locals": {"wall_elapsed": 0.659439907874912, "process_elapsed": 0.6594181859998116}, "eval_locals": {"wall_elapsed": 0.48237756825983524, "process_elapsed": 0.47433469900011005}}] \ No newline at end of file -- GitLab From 7e81e208dfaaef97497f45f891ec1bb055a7b116 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 1 Aug 2018 08:38:37 -0500 Subject: [PATCH 146/260] Refactor default perf model --- boxtree/distributed/__init__.py | 6 +----- boxtree/distributed/perf_model.py | 6 ++++++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 9a0861f..008e6cf 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -100,11 +100,7 @@ class DistributedFMMInfo(object): model.loadjson(model_filename) if len(model.time_result) == 0: - import os - current_dir = os.path.dirname(os.path.abspath(__file__)) - default_perf_file_path = os.path.join( - current_dir, 'default_perf_model.json') - model.loadjson(default_perf_file_path) + model.load_default_model() counter = PerformanceCounter(global_trav, self.global_wrangler, True) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index c0619f1..3ed55b8 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -630,3 +630,9 @@ class PerformanceModel: with open(filename, 'w') as f: json.dump(output, f) print("Save {} records to disk.".format(len(self.time_result))) + + def load_default_model(self): + import os + current_dir = os.path.dirname(os.path.abspath(__file__)) + default_perf_file_path = os.path.join(current_dir, 'default_perf_model.json') + self.loadjson(default_perf_file_path) -- GitLab From c9a6a89533e31eebe7bf273af4d2a22fae27e884 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 3 Aug 2018 11:22:31 -0500 Subject: [PATCH 147/260] Refactor direct eval source box counting --- boxtree/distributed/__init__.py | 1 + boxtree/distributed/partition.py | 6 ++--- boxtree/distributed/perf_model.py | 39 ++++++++++++++++++++++++++++--- examples/demo_perf_model.py | 9 ++++--- 4 files changed, 43 insertions(+), 12 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 008e6cf..84f36d2 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -96,6 +96,7 @@ class DistributedFMMInfo(object): distributed_expansion_wrangler_factory, True, drive_fmm ) + if model_filename is not None: model.loadjson(model_filename) diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 0b24ecd..fd88955 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -46,10 +46,8 @@ def partition_work(perf_model, perf_counter, traversal, total_rank): param = perf_model.eval_direct_model() direct_workload = perf_counter.count_direct(use_global_idx=True) - ndirect_source_boxes = np.zeros((tree.nboxes,), dtype=np.intp) - ndirect_source_boxes[traversal.target_boxes] = ( - traversal.neighbor_source_boxes_starts[1:] - - traversal.neighbor_source_boxes_starts[:-1] + ndirect_source_boxes = perf_counter.count_direct_source_boxes( + use_global_idx=True ) time_increment += (direct_workload * param[0] + ndirect_source_boxes * param[1]) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 3ed55b8..b648e8f 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -203,6 +203,36 @@ class PerformanceCounter: return direct_workload + def count_direct_source_boxes(self): + """ + Note: This method does not have a 'use_global_idx' argument because list 1 + and list 3 near box list is indexed like 'target_boxes' while list 4 near box + list is indexed like 'target_or_target_parent_boxes'. + """ + traversal = self.traversal + tree = traversal.tree + + ndirect_src_boxes = np.zeros((tree.nboxes,), dtype=np.intp) + + ndirect_src_boxes[traversal.target_boxes] += ( + traversal.neighbor_source_boxes_starts[1:] + - traversal.neighbor_source_boxes_starts[:-1] + ) + + if traversal.from_sep_close_smaller_starts is not None: + ndirect_src_boxes[traversal.target_boxes] += ( + traversal.from_sep_close_smaller_starts[1:] + - traversal.from_sep_close_smaller_starts[:-1] + ) + + if traversal.from_sep_close_bigger_starts is not None: + ndirect_src_boxes[traversal.target_or_target_parent_boxes] += ( + traversal.from_sep_close_bigger_starts[1:] + - traversal.from_sep_close_bigger_starts[:-1] + ) + + return ndirect_src_boxes + def count_m2l(self, use_global_idx=False): """ :return: If *use_global_idx* is True, return a numpy array of shape @@ -368,7 +398,7 @@ class PerformanceModel: timing_data = { "nterms_fmm_total": counter.count_nters_fmm_total(), "direct_workload": np.sum(counter.count_direct()), - "direct_nsource_boxes": traversal.neighbor_source_boxes_starts[-1], + "direct_nsource_boxes": np.sum(counter.count_direct_source_boxes()), "m2l_workload": np.sum(counter.count_m2l()), "m2p_workload": np.sum(nm2p), "m2p_nboxes": np.sum(nm2p_boxes), @@ -493,7 +523,7 @@ class PerformanceModel: for trav in traversals: self.time_performance(trav) - def predict_time(self, eval_traversal, eval_counter, wall_time=True): + def predict_step_time(self, eval_counter, wall_time=True): predict_timing = {} # {{{ Predict eval_direct @@ -501,7 +531,7 @@ class PerformanceModel: param = self.eval_direct_model(wall_time=wall_time) direct_workload = np.sum(eval_counter.count_direct()) - direct_nsource_boxes = eval_traversal.neighbor_source_boxes_starts[-1] + direct_nsource_boxes = np.sum(eval_counter.count_direct_source_boxes()) predict_timing["eval_direct"] = ( direct_workload * param[0] + direct_nsource_boxes * param[1] + param[2]) @@ -578,6 +608,7 @@ class PerformanceModel: try: with open(filename, 'r') as f: loaded_results = json.load(f) + for current_result in loaded_results: converted_result = {} @@ -598,6 +629,8 @@ class PerformanceModel: self.time_result.append(converted_result) + print("Load {} records from disk.".format(len(loaded_results))) + except IOError: print("Cannot open file '" + filename + "'") except EOFError: diff --git a/examples/demo_perf_model.py b/examples/demo_perf_model.py index 318938e..662bd2c 100644 --- a/examples/demo_perf_model.py +++ b/examples/demo_perf_model.py @@ -25,7 +25,7 @@ wrangler_factory = functools.partial( def train_model(): model = PerformanceModel(context, wrangler_factory, True, drive_fmm) - model.load('model') + model.loadjson('model.json') test_cases = [ (9000, 9000), @@ -42,7 +42,7 @@ def train_model(): trav = generate_random_traversal(context, nsources, ntargets, dims, dtype) model.time_performance(trav) - model.save('model') + model.savejson('model.json') def eval_model(): @@ -60,10 +60,9 @@ def eval_model(): eval_counter = PerformanceCounter(eval_traversal, eval_wrangler, True) model = PerformanceModel(context, wrangler_factory, True, drive_fmm) - model.load('model') + model.loadjson('model.json') - predict_timing = model.predict_time(eval_traversal, eval_counter, - wall_time=wall_time) + predict_timing = model.predict_step_time(eval_counter, wall_time=wall_time) # }}} -- GitLab From d177aa70fcb582d3166497678e5edff87f2b16a8 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 3 Aug 2018 15:27:02 -0500 Subject: [PATCH 148/260] Refactor boxes time prediction --- boxtree/distributed/__init__.py | 6 +-- boxtree/distributed/partition.py | 33 +++------------- boxtree/distributed/perf_model.py | 63 +++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 32 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 84f36d2..d5e9979 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -25,7 +25,7 @@ THE SOFTWARE. from mpi4py import MPI import numpy as np -from boxtree.distributed.perf_model import PerformanceModel, PerformanceCounter +from boxtree.distributed.perf_model import PerformanceModel MPITags = dict( DIST_TREE=0, @@ -103,8 +103,6 @@ class DistributedFMMInfo(object): if len(model.time_result) == 0: model.load_default_model() - counter = PerformanceCounter(global_trav, self.global_wrangler, True) - # }}} # {{{ Partiton work @@ -112,7 +110,7 @@ class DistributedFMMInfo(object): if current_rank == 0: from boxtree.distributed.partition import partition_work responsible_boxes_list = partition_work( - model, counter, global_trav, comm.Get_size() + model, global_trav, comm.Get_size() ) else: responsible_boxes_list = None diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index fd88955..4ca071f 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -29,12 +29,13 @@ from pyopencl.tools import dtype_to_ctype from mako.template import Template -def partition_work(perf_model, perf_counter, traversal, total_rank): +def partition_work(perf_model, traversal, total_rank): """ This function assigns responsible boxes of each process. Each process is responsible for calculating the multiple expansions as well as evaluating target potentials in *responsible_boxes*. + :arg perf_model: A boxtree.distributed.perf_model.PerformanceModel object. :arg traversal: The traversal object built on root containing all particles. :arg total_rank: The total number of processes. :return: A numpy array of shape (total_rank,), where the ith element is an numpy @@ -42,35 +43,11 @@ def partition_work(perf_model, perf_counter, traversal, total_rank): """ tree = traversal.tree - time_increment = np.zeros((tree.nboxes,), dtype=np.float64) - - param = perf_model.eval_direct_model() - direct_workload = perf_counter.count_direct(use_global_idx=True) - ndirect_source_boxes = perf_counter.count_direct_source_boxes( - use_global_idx=True - ) - time_increment += (direct_workload * param[0] + ndirect_source_boxes * param[1]) - - param = perf_model.multipole_to_local_model() - m2l_workload = perf_counter.count_m2l(use_global_idx=True) - time_increment += (m2l_workload * param[0]) - - param = perf_model.eval_multipoles_model() - m2p_workload, m2p_nboxes = perf_counter.count_m2p(use_global_idx=True) - time_increment += (m2p_workload * param[0] + m2p_nboxes * param[1]) - - param = perf_model.form_locals_model() - p2l_workload = perf_counter.count_p2l(use_global_idx=True) - p2l_nboxes = perf_counter.count_p2l_source_boxes(use_global_idx=True) - time_increment += (p2l_workload * param[0] + p2l_nboxes * param[1]) - - param = perf_model.eval_locals_model() - eval_part_workload = perf_counter.count_eval_part(use_global_idx=True) - time_increment += (eval_part_workload * param[0]) + boxes_time = perf_model.predict_boxes_time(traversal) total_workload = 0 for i in range(tree.nboxes): - total_workload += time_increment[i] + total_workload += boxes_time[i] # transform tree from level order to dfs order dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) @@ -96,7 +73,7 @@ def partition_work(perf_model, perf_counter, traversal, total_rank): break box_idx = dfs_order[i] - workload_count += time_increment[box_idx] + workload_count += boxes_time[box_idx] if (workload_count > (rank + 1)*total_workload/total_rank or i == tree.nboxes - 1): responsible_boxes_list[rank] = dfs_order[start:i+1] diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index b648e8f..62cfcf2 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -588,6 +588,69 @@ class PerformanceModel: return predict_timing + def predict_boxes_time(self, traversal): + tree = traversal.tree + wrangler = self.wrangler_factory(tree) + counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) + + boxes_time = np.zeros((tree.nboxes,), dtype=np.float64) + + # {{{ eval_direct time + + param = self.eval_direct_model() + + direct_workload = counter.count_direct(use_global_idx=True) + ndirect_source_boxes = counter.count_direct_source_boxes() + + boxes_time += (direct_workload * param[0] + + ndirect_source_boxes * param[1] + + param[2]) + + # }}} + + # {{{ multipole_to_local time + + param = self.multipole_to_local_model() + + m2l_workload = counter.count_m2l(use_global_idx=True) + + boxes_time += (m2l_workload * param[0] + param[1]) + + # }}} + + # {{{ eval_multipoles time + + param = self.eval_multipoles_model() + + m2p_workload, m2p_nboxes = counter.count_m2p(use_global_idx=True) + + boxes_time += (m2p_workload * param[0] + m2p_nboxes * param[1] + param[2]) + + # }}} + + # {{{ form_locals time + + param = self.form_locals_model() + + p2l_workload = counter.count_p2l(use_global_idx=True) + p2l_nboxes = counter.count_p2l_source_boxes(use_global_idx=True) + + boxes_time += (p2l_workload * param[0] + p2l_nboxes * param[1] + param[2]) + + # }}} + + # {{{ eval_part time + + param = self.eval_locals_model() + + eval_part_workload = counter.count_eval_part(use_global_idx=True) + + boxes_time += (eval_part_workload * param[0] + param[1]) + + # }}} + + return boxes_time + def save(self, filename): with open(filename, 'wb') as f: pickle.dump(self.time_result, f) -- GitLab From 1b6ea9d8af1dd3aa0103e10cca030c1d7edd9b09 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 4 Aug 2018 17:21:44 -0500 Subject: [PATCH 149/260] Tweak interface --- boxtree/distributed/__init__.py | 4 +++- boxtree/distributed/partition.py | 6 ++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index d5e9979..572f585 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -108,9 +108,11 @@ class DistributedFMMInfo(object): # {{{ Partiton work if current_rank == 0: + boxes_time = model.predict_boxes_time(global_trav) + from boxtree.distributed.partition import partition_work responsible_boxes_list = partition_work( - model, global_trav, comm.Get_size() + boxes_time, global_trav, comm.Get_size() ) else: responsible_boxes_list = None diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 4ca071f..ecaa77c 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -29,13 +29,13 @@ from pyopencl.tools import dtype_to_ctype from mako.template import Template -def partition_work(perf_model, traversal, total_rank): +def partition_work(boxes_time, traversal, total_rank): """ This function assigns responsible boxes of each process. Each process is responsible for calculating the multiple expansions as well as evaluating target potentials in *responsible_boxes*. - :arg perf_model: A boxtree.distributed.perf_model.PerformanceModel object. + :arg boxes_time: The expected running time of each box. :arg traversal: The traversal object built on root containing all particles. :arg total_rank: The total number of processes. :return: A numpy array of shape (total_rank,), where the ith element is an numpy @@ -43,8 +43,6 @@ def partition_work(perf_model, traversal, total_rank): """ tree = traversal.tree - boxes_time = perf_model.predict_boxes_time(traversal) - total_workload = 0 for i in range(tree.nboxes): total_workload += boxes_time[i] -- GitLab From eabc6229e0493eedc82d994b24528f059e7b9ff3 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 7 Aug 2018 10:53:27 -0500 Subject: [PATCH 150/260] Remove FMM driver argument from perf model --- boxtree/distributed/perf_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 62cfcf2..1ddae43 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -376,11 +376,10 @@ class PerformanceCounter: class PerformanceModel: - def __init__(self, cl_context, wrangler_factory, uses_pde_expansions, drive_fmm): + def __init__(self, cl_context, wrangler_factory, uses_pde_expansions): self.cl_context = cl_context self.wrangler_factory = wrangler_factory self.uses_pde_expansions = uses_pde_expansions - self.drive_fmm = drive_fmm self.time_result = [] @@ -416,7 +415,8 @@ class PerformanceModel: ).get() # Time a FMM run - self.drive_fmm(traversal, wrangler, source_weights, timing_data=timing_data) + from boxtree.fmm import drive_fmm + drive_fmm(traversal, wrangler, source_weights, timing_data=timing_data) self.time_result.append(timing_data) -- GitLab From 468f2cae20cd0aac8aa1af7832bb1413f1c9139c Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 7 Aug 2018 21:06:05 -0500 Subject: [PATCH 151/260] Remove wrangler factory for performance model --- boxtree/distributed/__init__.py | 9 ++------- boxtree/distributed/perf_model.py | 10 +++------- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 572f585..044ff63 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -90,12 +90,7 @@ class DistributedFMMInfo(object): # {{{ Get performance model and counter if current_rank == 0: - from boxtree.fmm import drive_fmm - model = PerformanceModel( - queue.context, - distributed_expansion_wrangler_factory, - True, drive_fmm - ) + model = PerformanceModel(queue.context, True) if model_filename is not None: model.loadjson(model_filename) @@ -108,7 +103,7 @@ class DistributedFMMInfo(object): # {{{ Partiton work if current_rank == 0: - boxes_time = model.predict_boxes_time(global_trav) + boxes_time = model.predict_boxes_time(global_trav, self.global_wrangler) from boxtree.distributed.partition import partition_work responsible_boxes_list = partition_work( diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 1ddae43..dbe300d 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -376,9 +376,8 @@ class PerformanceCounter: class PerformanceModel: - def __init__(self, cl_context, wrangler_factory, uses_pde_expansions): + def __init__(self, cl_context, uses_pde_expansions): self.cl_context = cl_context - self.wrangler_factory = wrangler_factory self.uses_pde_expansions = uses_pde_expansions self.time_result = [] @@ -386,9 +385,7 @@ class PerformanceModel: from pyopencl.clrandom import PhiloxGenerator self.rng = PhiloxGenerator(cl_context) - def time_performance(self, traversal): - wrangler = self.wrangler_factory(tree=traversal.tree) - + def time_performance(self, traversal, wrangler): counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) # Record useful metadata for assembling performance data @@ -588,9 +585,8 @@ class PerformanceModel: return predict_timing - def predict_boxes_time(self, traversal): + def predict_boxes_time(self, traversal, wrangler): tree = traversal.tree - wrangler = self.wrangler_factory(tree) counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) boxes_time = np.zeros((tree.nboxes,), dtype=np.float64) -- GitLab From e71b8a5e36d303bc1913c5abbd04959a5ef4590b Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 8 Aug 2018 12:22:40 -0500 Subject: [PATCH 152/260] __add__ for TimingResult --- boxtree/distributed/perf_model.py | 4 ---- boxtree/fmm.py | 6 ++++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index dbe300d..5c832ff 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -713,10 +713,6 @@ class PerformanceModel: 'process_elapsed': entry.process_elapsed } - else: - print(type(entry)) - raise RuntimeError("Unknown type in result") - output.append(current_output) with open(filename, 'w') as f: diff --git a/boxtree/fmm.py b/boxtree/fmm.py index e4f494a..6e9a6a3 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -368,6 +368,12 @@ class TimingResult(Record): wall_elapsed=wall_elapsed, process_elapsed=process_elapsed) + def __add__(self, other): + return TimingResult( + self.wall_elapsed + other.wall_elapsed, + self.process_elapsed + other.process_elapsed + ) + # }}} -- GitLab From bc306aa3d02a89eef77c4d63c27c81e81f59c49a Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 13 Aug 2018 10:30:45 -0500 Subject: [PATCH 153/260] Add box_target_counts_nonchild kwarg --- boxtree/distributed/perf_model.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 5c832ff..3210303 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -154,7 +154,7 @@ class PerformanceCounter: return nterms_fmm_total - def count_direct(self, use_global_idx=False): + def count_direct(self, use_global_idx=False, box_target_counts_nonchild=None): """ :return: If *use_global_idx* is True, return a numpy array of shape (tree.nboxes,) such that the ith entry represents the workload from @@ -165,6 +165,9 @@ class PerformanceCounter: traversal = self.traversal tree = traversal.tree + if box_target_counts_nonchild is None: + box_target_counts_nonchild = tree.box_target_counts_nonchild + if use_global_idx: direct_workload = np.zeros((tree.nboxes,), dtype=np.intp) else: @@ -172,7 +175,7 @@ class PerformanceCounter: direct_workload = np.zeros((ntarget_boxes,), dtype=np.intp) for itgt_box, tgt_ibox in enumerate(traversal.target_boxes): - ntargets = tree.box_target_counts_nonchild[tgt_ibox] + ntargets = box_target_counts_nonchild[tgt_ibox] nsources = 0 start, end = traversal.neighbor_source_boxes_starts[itgt_box:itgt_box+2] @@ -277,7 +280,7 @@ class PerformanceCounter: return nm2l - def count_m2p(self, use_global_idx=False): + def count_m2p(self, use_global_idx=False, box_target_counts_nonchild=None): trav = self.traversal tree = trav.tree @@ -288,12 +291,15 @@ class PerformanceCounter: nm2p = np.zeros((len(trav.target_boxes),), dtype=np.intp) nm2p_boxes = np.zeros((len(trav.target_boxes),), dtype=np.intp) + if box_target_counts_nonchild is None: + box_target_counts_nonchild = tree.box_target_counts_nonchild + for ilevel, sep_smaller_list in enumerate(trav.from_sep_smaller_by_level): ncoeffs_fmm_cur_level = self.parameters.ncoeffs_fmm_by_level[ilevel] tgt_box_list = trav.target_boxes_sep_smaller_by_source_level[ilevel] for itgt_box, tgt_ibox in enumerate(tgt_box_list): - ntargets = tree.box_target_counts_nonchild[tgt_ibox] + ntargets = box_target_counts_nonchild[tgt_ibox] start, end = sep_smaller_list.starts[itgt_box:itgt_box + 2] @@ -351,7 +357,7 @@ class PerformanceCounter: else: return p2l_nsource_boxes - def count_eval_part(self, use_global_idx=False): + def count_eval_part(self, use_global_idx=False, box_target_counts_nonchild=None): trav = self.traversal tree = trav.tree parameters = self.parameters @@ -361,8 +367,11 @@ class PerformanceCounter: else: neval_part = np.zeros(len(trav.target_boxes), dtype=np.intp) + if box_target_counts_nonchild is None: + box_target_counts_nonchild = tree.box_target_counts_nonchild + for itgt_box, tgt_ibox in enumerate(trav.target_boxes): - ntargets = tree.box_target_counts_nonchild[tgt_ibox] + ntargets = box_target_counts_nonchild[tgt_ibox] tgt_box_level = trav.tree.box_levels[tgt_ibox] ncoeffs_fmm = parameters.ncoeffs_fmm_by_level[tgt_box_level] -- GitLab From 22bf0c7dbea7415c195d55f8c59d8f93d4b1f847 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Thu, 16 Aug 2018 19:57:51 -0500 Subject: [PATCH 154/260] Fix syntax error --- boxtree/tools.py | 1 - 1 file changed, 1 deletion(-) diff --git a/boxtree/tools.py b/boxtree/tools.py index 9b1d318..54a60d3 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -982,7 +982,6 @@ class ConstantOneExpansionWrangler(object): def finalize_potentials(self, potentials): return potentials ->>>>>>> move-constant-one-wrangler-to-tools # }}} -- GitLab From 08941f5df1772b9a98af8fe8ab7ac0c84e7bc46c Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Thu, 16 Aug 2018 19:58:16 -0500 Subject: [PATCH 155/260] Update __add__ --- boxtree/fmm.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index e717d66..3002888 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -403,11 +403,7 @@ class TimingResult(Mapping): return type(self)(result) - def __add__(self, other): - return TimingResult( - self.wall_elapsed + other.wall_elapsed, - self.process_elapsed + other.process_elapsed - ) + __add__ = merge # }}} -- GitLab From 3b9893e4b9660b4847f8bc63636ea1bdb0c413e4 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Thu, 16 Aug 2018 20:01:54 -0500 Subject: [PATCH 156/260] Update uses of TimingResult in the performance model to new interface --- boxtree/distributed/perf_model.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 3210303..b036b4e 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -476,9 +476,9 @@ class PerformanceModel: result = self.time_result[0] if wall_time: - dependent_value = result[y_name].wall_elapsed + dependent_value = result[y_name]["wall_elapsed"] else: - dependent_value = result[y_name].process_elapsed + dependent_value = result[y_name]["process_elapsed"] independent_value = result[x_name[0]] coeff = dependent_value / independent_value @@ -490,9 +490,9 @@ class PerformanceModel: for iresult, result in enumerate(self.time_result): if wall_time: - dependent_value[iresult] = result[y_name].wall_elapsed + dependent_value[iresult] = result[y_name]["wall_elapsed"] else: - dependent_value[iresult] = result[y_name].process_elapsed + dependent_value[iresult] = result[y_name]["process_elapsed"] for icol, variable_name in enumerate(x_name): coeff_matrix[iresult, icol] = result[variable_name] @@ -718,8 +718,8 @@ class PerformanceModel: elif isinstance(entry, TimingResult): current_output[field_name] = { - 'wall_elapsed': entry.wall_elapsed, - 'process_elapsed': entry.process_elapsed + 'wall_elapsed': entry.get("wall_elapsed"), + 'process_elapsed': entry.get("process_elapsed") } output.append(current_output) -- GitLab From f0e56de252b21da7d9c46ec1e27084ffe95d874f Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Thu, 16 Aug 2018 20:38:59 -0500 Subject: [PATCH 157/260] Another fix --- boxtree/distributed/perf_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index b036b4e..34eb45c 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -688,8 +688,8 @@ class PerformanceModel: elif isinstance(entry, dict): converted_result[field_name] = TimingResult( - entry['wall_elapsed'], - entry['process_elapsed'] + wall_elapsed=entry['wall_elapsed'], + process_elapsed=entry['process_elapsed'] ) else: -- GitLab From a015815c800fdcaea234c2399249ea4042fd547d Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Thu, 16 Aug 2018 21:18:10 -0500 Subject: [PATCH 158/260] Fixes for reindexing List 4 close --- boxtree/distributed/calculation.py | 2 +- boxtree/distributed/partition.py | 2 +- boxtree/distributed/perf_model.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 107cb76..e565fe5 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -477,7 +477,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, "('list 4 close')") potentials = potentials + local_wrangler.eval_direct( - local_trav.target_or_target_parent_boxes, + local_trav.target_boxes, local_trav.from_sep_close_bigger_starts, local_trav.from_sep_close_bigger_lists, local_src_weights)[0] diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index ecaa77c..453b73f 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -264,7 +264,7 @@ class ResponsibleBoxesQuery(object): # Add list 4 close of responsible boxes if self.traversal.from_sep_close_bigger_starts is not None: self.add_interaction_list_boxes( - self.target_or_target_parent_boxes_dev, + self.target_boxes_dev, responsible_boxes_mask | ancestor_boxes_mask, self.from_sep_close_bigger_starts_dev, self.from_sep_close_bigger_lists_dev, diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 34eb45c..049d541 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -229,7 +229,7 @@ class PerformanceCounter: ) if traversal.from_sep_close_bigger_starts is not None: - ndirect_src_boxes[traversal.target_or_target_parent_boxes] += ( + ndirect_src_boxes[traversal.target_boxes] += ( traversal.from_sep_close_bigger_starts[1:] - traversal.from_sep_close_bigger_starts[:-1] ) @@ -326,7 +326,7 @@ class PerformanceCounter: else: np2l = np.zeros(len(trav.target_or_target_parent_boxes), dtype=np.intp) - for itgt_box, tgt_ibox in enumerate(trav.target_or_target_parent_boxes): + for itgt_box, tgt_ibox in enumerate(trav.target_boxes): tgt_box_level = trav.tree.box_levels[tgt_ibox] ncoeffs = parameters.ncoeffs_fmm_by_level[tgt_box_level] -- GitLab From 2e03c0c4fbb583f71999dab1c7dd735c6ccefd9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Thu, 16 Aug 2018 23:13:11 -0400 Subject: [PATCH 159/260] [WIP] Try to fix CIs on distributed-fmm-global branch --- .gitlab-ci.yml | 18 ++++++++++++++---- test/test_distributed.py | 4 ++-- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8b99942..40adf72 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,7 +24,6 @@ Python 3.5 K40: - nvidia-k40 except: - tags - - distributed-fmm-global Python 2.7 POCL: script: @@ -38,7 +37,6 @@ Python 2.7 POCL: - pocl except: - tags - - distributed-fmm-global Python 3.5 POCL: script: @@ -52,7 +50,6 @@ Python 3.5 POCL: - pocl except: - tags - - distributed-fmm-global Python 3.6 POCL: script: @@ -66,7 +63,20 @@ Python 3.6 POCL: - pocl except: - tags - - distributed-fmm-global + +Python 3.6 POCL MPI: + script: + - export PY_EXE=python3.6 + - export PYOPENCL_TEST=portable + - export EXTRA_INSTALL="numpy mako mpi4py" + - export PYTEST_ADDOPTS="-k mpi" + - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh + - ". ./build-and-test-py-project.sh" + tags: + - python3.6 + - pocl + except: + - tags Documentation: script: diff --git a/test/test_distributed.py b/test/test_distributed.py index b320c0c..333e77e 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,6 +1,5 @@ import numpy as np import pyopencl as cl -from mpi4py import MPI from boxtree.distributed.calculation import DistributedFMMLibExpansionWrangler from boxtree.distributed import DistributedFMMInfo import numpy.linalg as la @@ -15,7 +14,8 @@ logging.getLogger("boxtree.distributed").setLevel(logging.INFO) def _test_against_shared(dims, nsources, ntargets, dtype): - + from mpi4py import MPI + # Get the current rank comm = MPI.COMM_WORLD rank = comm.Get_rank() -- GitLab From b357b38384f446f88f48f636b04d73e69fb97337 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Thu, 16 Aug 2018 23:20:26 -0400 Subject: [PATCH 160/260] Use setuptools.find_packages() in setup.py --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 32cca6a..eb8bbbb 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ def main(): - from setuptools import setup + from setuptools import setup, find_packages version_dict = {} init_filename = "boxtree/version.py" @@ -39,7 +39,7 @@ def main(): 'Topic :: Utilities', ], - packages=["boxtree"], + packages=find_packages(), install_requires=[ "pytools>=2018.4", "pyopencl>=2013.1", -- GitLab From 8886b41fbf3aac4ec03690e1135b4e17e1120109 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Thu, 16 Aug 2018 23:23:36 -0400 Subject: [PATCH 161/260] Flake8 fixes --- test/test_distributed.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index 333e77e..6a1f5ff 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -15,7 +15,7 @@ logging.getLogger("boxtree.distributed").setLevel(logging.INFO) def _test_against_shared(dims, nsources, ntargets, dtype): from mpi4py import MPI - + # Get the current rank comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -274,6 +274,7 @@ class ConstantOneExpansionWrangler(object): def _test_constantone(dims, nsources, ntargets, dtype): + from mpi4py import MPI # Get the current rank comm = MPI.COMM_WORLD -- GitLab From 8d75e57515e9521d5f75e4c8ce8824bced336e37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Thu, 16 Aug 2018 23:34:03 -0400 Subject: [PATCH 162/260] Localize a few more imports in test_distributed.py --- test/test_distributed.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index 6a1f5ff..b79082d 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,7 +1,5 @@ import numpy as np import pyopencl as cl -from boxtree.distributed.calculation import DistributedFMMLibExpansionWrangler -from boxtree.distributed import DistributedFMMInfo import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler import logging @@ -71,9 +69,12 @@ def _test_against_shared(dims, nsources, ntargets, dtype): # Compute FMM using distributed memory parallelism def distributed_expansion_wrangler_factory(tree): + from boxtree.distributed.calculation import DistributedFMMLibExpansionWrangler + return DistributedFMMLibExpansionWrangler( queue, tree, helmholtz_k, fmm_level_to_nterms=fmm_level_to_nterms) + from boxtree.distributed import DistributedFMMInfo distribued_fmm_info = DistributedFMMInfo( queue, trav, distributed_expansion_wrangler_factory, comm=comm) pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) -- GitLab From a33ba810bc03d2da1f4d9618999f5d5ee59c28e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Thu, 16 Aug 2018 23:39:18 -0400 Subject: [PATCH 163/260] Flake 8 fix --- test/test_distributed.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index b79082d..300cef9 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -69,7 +69,8 @@ def _test_against_shared(dims, nsources, ntargets, dtype): # Compute FMM using distributed memory parallelism def distributed_expansion_wrangler_factory(tree): - from boxtree.distributed.calculation import DistributedFMMLibExpansionWrangler + from boxtree.distributed.calculation import \ + DistributedFMMLibExpansionWrangler return DistributedFMMLibExpansionWrangler( queue, tree, helmholtz_k, fmm_level_to_nterms=fmm_level_to_nterms) -- GitLab From 9730b0bbb446f5044b55479a4b007e675f71c760 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 17 Aug 2018 00:16:11 -0500 Subject: [PATCH 164/260] Make statsmodels optional --- boxtree/distributed/perf_model.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 049d541..17aa2ae 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -499,15 +499,19 @@ class PerformanceModel: coeff_matrix[:, -1] = 1 - """ - from numpy.linalg import lstsq - coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] - """ - import statsmodels.api as sm - rlm_model = sm.RLM(dependent_value, coeff_matrix) - rlm_result = rlm_model.fit() - - return rlm_result.params + try: + import statsmodels.api as sm + rlm_model = sm.RLM(dependent_value, coeff_matrix) + rlm_result = rlm_model.fit() + coeff = rlm_result.params + except ImportError: + import warnings + warnings.warn("statsmodels package not installed") + + from numpy.linalg import lstsq + coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] + + return coeff def time_random_traversals(self): context = self.cl_context -- GitLab From 7f91df3f626422d49295463f71c74bd138f33c05 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 17 Aug 2018 00:29:34 -0500 Subject: [PATCH 165/260] More detailed warning on statsmodels --- boxtree/distributed/perf_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 17aa2ae..7075244 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -506,7 +506,8 @@ class PerformanceModel: coeff = rlm_result.params except ImportError: import warnings - warnings.warn("statsmodels package not installed") + warnings.warn("Statsmodels package not found. Install to obtain more" + "robust regression.") from numpy.linalg import lstsq coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] -- GitLab From 1de75ce3b9c865927f63c9533d5b1c7db8549e1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Fri, 17 Aug 2018 01:31:29 -0400 Subject: [PATCH 166/260] Add PY_EXTRA_FLAGS="-m mpi4py.run" to help avoid stuck MPI-based CIs --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 40adf72..3494946 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -70,6 +70,7 @@ Python 3.6 POCL MPI: - export PYOPENCL_TEST=portable - export EXTRA_INSTALL="numpy mako mpi4py" - export PYTEST_ADDOPTS="-k mpi" + - export PY_EXTRA_FLAGS="-m mpi4py.run" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: -- GitLab From 2446df2291c52e953457a299206f38286aaf1838 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Fri, 17 Aug 2018 01:50:38 -0400 Subject: [PATCH 167/260] Use -m mpi4py.run in spawning subprocesses --- .gitlab-ci.yml | 1 - test/test_distributed.py | 6 ++++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3494946..40adf72 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -70,7 +70,6 @@ Python 3.6 POCL MPI: - export PYOPENCL_TEST=portable - export EXTRA_INSTALL="numpy mako mpi4py" - export PYTEST_ADDOPTS="-k mpi" - - export PY_EXTRA_FLAGS="-m mpi4py.run" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: diff --git a/test/test_distributed.py b/test/test_distributed.py index 300cef9..aa50100 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -105,7 +105,8 @@ def test_against_shared(num_processes, dims, nsources, ntargets): subprocess.run([ "mpiexec", "-np", str(num_processes), "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", - sys.executable, __file__], + # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html + sys.executable, "-m", "mpi4py.run", __file__], env=newenv, check=True ) @@ -349,7 +350,8 @@ def test_constantone(num_processes, dims, nsources, ntargets): subprocess.run([ "mpiexec", "-np", str(num_processes), "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", - sys.executable, __file__], + # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html + sys.executable, "-m", "mpi4py.run", __file__], env=newenv, check=True ) -- GitLab From c92c81003994b25a337bafc12b041966e61f13c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Fri, 17 Aug 2018 01:58:19 -0400 Subject: [PATCH 168/260] Turn off output capture (for now) on MPI test --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 40adf72..358c7fe 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -69,7 +69,7 @@ Python 3.6 POCL MPI: - export PY_EXE=python3.6 - export PYOPENCL_TEST=portable - export EXTRA_INSTALL="numpy mako mpi4py" - - export PYTEST_ADDOPTS="-k mpi" + - export PYTEST_ADDOPTS="-k mpi --capture=no" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: -- GitLab From 1fd2da895123385277e7e93ba951833082d63b9f Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 17 Aug 2018 13:28:18 -0500 Subject: [PATCH 169/260] Temporarily add level_nterms to constantone wrangler --- requirements.txt | 1 - test/test_distributed.py | 161 ++------------------------------------- 2 files changed, 5 insertions(+), 157 deletions(-) diff --git a/requirements.txt b/requirements.txt index dd5f008..cd0a243 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ numpy -mako git+https://github.com/inducer/pyopencl git+https://github.com/inducer/islpy git+https://github.com/inducer/loopy diff --git a/test/test_distributed.py b/test/test_distributed.py index b320c0c..42ca946 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -5,6 +5,8 @@ from boxtree.distributed.calculation import DistributedFMMLibExpansionWrangler from boxtree.distributed import DistributedFMMInfo import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +from boxtree.tools import ConstantOneExpansionWrangler as \ + ConstantOneExpansionWranglerBase import logging import os import pytest @@ -111,164 +113,11 @@ def test_against_shared(num_processes, dims, nsources, ntargets): # {{{ Constantone expansion wrangler -class ConstantOneExpansionWrangler(object): - """This implements the 'analytical routines' for a Green's function that is - constant 1 everywhere. For 'charges' of 'ones', this should get every particle - a copy of the particle count. - """ +class ConstantOneExpansionWrangler(ConstantOneExpansionWranglerBase): def __init__(self, tree): - self.tree = tree - - def multipole_expansion_zeros(self): - return np.zeros(self.tree.nboxes, dtype=np.float64) - - local_expansion_zeros = multipole_expansion_zeros - - def potential_zeros(self): - return np.zeros(self.tree.ntargets, dtype=np.float64) - - def _get_source_slice(self, ibox): - pstart = self.tree.box_source_starts[ibox] - return slice( - pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) - - def _get_target_slice(self, ibox): - pstart = self.tree.box_target_starts[ibox] - return slice( - pstart, pstart + self.tree.box_target_counts_nonchild[ibox]) - - def reorder_sources(self, source_array): - return source_array[self.tree.user_source_ids] - - def reorder_potentials(self, potentials): - return potentials[self.tree.sorted_target_ids] - - def form_multipoles(self, level_start_source_box_nrs, source_boxes, src_weights): - mpoles = self.multipole_expansion_zeros() - for ibox in source_boxes: - pslice = self._get_source_slice(ibox) - mpoles[ibox] += np.sum(src_weights[pslice]) - - return mpoles - - def coarsen_multipoles(self, level_start_source_parent_box_nrs, - source_parent_boxes, mpoles): - tree = self.tree - - # nlevels-1 is the last valid level index - # nlevels-2 is the last valid level that could have children - # - # 3 is the last relevant source_level. - # 2 is the last relevant target_level. - # (because no level 1 box will be well-separated from another) - for source_level in range(tree.nlevels-1, 2, -1): - target_level = source_level - 1 - start, stop = level_start_source_parent_box_nrs[ - target_level:target_level+2] - for ibox in source_parent_boxes[start:stop]: - for child in tree.box_child_ids[:, ibox]: - if child: - mpoles[ibox] += mpoles[child] - - def eval_direct(self, target_boxes, neighbor_sources_starts, - neighbor_sources_lists, src_weights): - pot = self.potential_zeros() - - for itgt_box, tgt_ibox in enumerate(target_boxes): - tgt_pslice = self._get_target_slice(tgt_ibox) - - src_sum = 0 - start, end = neighbor_sources_starts[itgt_box:itgt_box+2] - #print "DIR: %s <- %s" % (tgt_ibox, neighbor_sources_lists[start:end]) - for src_ibox in neighbor_sources_lists[start:end]: - src_pslice = self._get_source_slice(src_ibox) - - src_sum += np.sum(src_weights[src_pslice]) - - pot[tgt_pslice] = src_sum - - return pot - - def multipole_to_local(self, - level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, - starts, lists, mpole_exps): - local_exps = self.local_expansion_zeros() - - for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): - start, end = starts[itgt_box:itgt_box+2] - - contrib = 0 - #print tgt_ibox, "<-", lists[start:end] - for src_ibox in lists[start:end]: - contrib += mpole_exps[src_ibox] - - local_exps[tgt_ibox] += contrib - - return local_exps - - def eval_multipoles(self, - target_boxes_by_source_level, from_sep_smaller_nonsiblings_by_level, - mpole_exps): - pot = self.potential_zeros() - - for level, ssn in enumerate(from_sep_smaller_nonsiblings_by_level): - for itgt_box, tgt_ibox in \ - enumerate(target_boxes_by_source_level[level]): - tgt_pslice = self._get_target_slice(tgt_ibox) - - contrib = 0 - - start, end = ssn.starts[itgt_box:itgt_box+2] - for src_ibox in ssn.lists[start:end]: - contrib += mpole_exps[src_ibox] - - pot[tgt_pslice] += contrib - - return pot - - def form_locals(self, - level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, starts, lists, src_weights): - local_exps = self.local_expansion_zeros() - - for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): - start, end = starts[itgt_box:itgt_box+2] - - #print "LIST 4", tgt_ibox, "<-", lists[start:end] - contrib = 0 - for src_ibox in lists[start:end]: - src_pslice = self._get_source_slice(src_ibox) - - contrib += np.sum(src_weights[src_pslice]) - - local_exps[tgt_ibox] += contrib - - return local_exps - - def refine_locals(self, level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, local_exps): - - for target_lev in range(1, self.tree.nlevels): - start, stop = level_start_target_or_target_parent_box_nrs[ - target_lev:target_lev+2] - for ibox in target_or_target_parent_boxes[start:stop]: - local_exps[ibox] += local_exps[self.tree.box_parent_ids[ibox]] - - return local_exps - - def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): - pot = self.potential_zeros() - - for ibox in target_boxes: - tgt_pslice = self._get_target_slice(ibox) - pot[tgt_pslice] += local_exps[ibox] - - return pot - - def finalize_potentials(self, potentials): - return potentials + super(ConstantOneExpansionWrangler, self).__init__(tree) + self.level_nterms = np.ones(tree.nlevels, dtype=np.int32) # }}} -- GitLab From 1dfa344adffd6134f498387f256a2c282b39d31b Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 17 Aug 2018 13:37:51 -0500 Subject: [PATCH 170/260] Fix typo --- boxtree/distributed/perf_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index 7075244..ea3fad0 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -470,7 +470,7 @@ class PerformanceModel: nvariables = len(x_name) if nresult < 1: - raise RuntimeError("Please run FMM at lease once using time_performance" + raise RuntimeError("Please run FMM at least once using time_performance " "before forming models.") elif nresult == 1: result = self.time_result[0] -- GitLab From d0ddf01ec13d9094dda9d443b5f5494f0f8373d5 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 17 Aug 2018 23:24:16 -0500 Subject: [PATCH 171/260] Add default_perf_model.json to package_data --- setup.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index eb8bbbb..9b424e1 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,12 @@ def main(): "pytest>=2.3", "cgen>=2013.1.2", "six", - ]) + ], + package_data={ + "boxtree": [ + "distributed/default_perf_model.json" + ] + }) if __name__ == '__main__': -- GitLab From 5d511f6072315af1f42e59b3a76fc3c8208d7f4e Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 17 Aug 2018 23:31:20 -0500 Subject: [PATCH 172/260] Force OpenMP to use 1 thread in test cases --- test/test_distributed.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index 8ec032a..c4a2210 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -101,6 +101,7 @@ def test_against_shared(num_processes, dims, nsources, ntargets): newenv["dims"] = str(dims) newenv["nsources"] = str(nsources) newenv["ntargets"] = str(ntargets) + newenv["OMP_NUM_THREADS"] = "1" import subprocess import sys @@ -140,7 +141,6 @@ def _test_constantone(dims, nsources, ntargets, dtype): import pyopencl as cl ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) - print(queue.context.devices) if rank == 0: @@ -193,6 +193,7 @@ def test_constantone(num_processes, dims, nsources, ntargets): newenv["dims"] = str(dims) newenv["nsources"] = str(nsources) newenv["ntargets"] = str(ntargets) + newenv["OMP_NUM_THREADS"] = "1" import subprocess import sys -- GitLab From 3b2b452f400284aeeede0cd90b2ee87e029d6a96 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 18 Aug 2018 11:04:57 -0500 Subject: [PATCH 173/260] Add a note for not importing mpi4py.MPI at module level --- test/test_distributed.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/test_distributed.py b/test/test_distributed.py index c4a2210..ae19b95 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -8,6 +8,9 @@ import logging import os import pytest +# Note: Do not import mpi4py.MPI object at the module level, because OpenMPI does not +# support recursive invocations. + # Configure logging logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING")) logging.getLogger("boxtree.distributed").setLevel(logging.INFO) -- GitLab From e2c4cce3d51851630b8781a691e93ff0431c245c Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 14 Oct 2018 12:04:46 -0500 Subject: [PATCH 174/260] Raise runtime error when partition cannot be done --- boxtree/distributed/partition.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 453b73f..6413551 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -43,6 +43,10 @@ def partition_work(boxes_time, traversal, total_rank): """ tree = traversal.tree + if total_rank > tree.nboxes: + raise RuntimeError("Fail to partition work because the number of boxes is " + "less than the number of processes.") + total_workload = 0 for i in range(tree.nboxes): total_workload += boxes_time[i] -- GitLab From 1d030f37b819f699d32e5ca1ff7806bd4b6a1824 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 29 Oct 2018 09:48:33 -0500 Subject: [PATCH 175/260] Add time recording utility --- boxtree/distributed/calculation.py | 27 +++++++++++---------------- boxtree/distributed/util.py | 22 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 16 deletions(-) create mode 100644 boxtree/distributed/util.py diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index e565fe5..38b6e77 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -30,6 +30,7 @@ from mpi4py import MPI import time from boxtree.distributed import dtype_to_mpi from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +from boxtree.distributed.util import TimeRecorder from pytools import memoize_method import loopy as lp from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_1 # noqa: F401 @@ -166,10 +167,8 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False, stats = {} if record_timing: - comm.Barrier() - from time import time - t_start = time() - logger.debug("communicate multipoles: start") + time_recorder = TimeRecorder("Communicate multiploes", comm, logger) + t_start = time.time() # contributing_boxes: # @@ -259,10 +258,8 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False, comm_pattern.advance() if record_timing: - stats["total_time"] = time() - t_start - logger.info("Communicate multipoles: done in {0:.4f} sec.".format( - stats["total_time"] - )) + stats["total_time"] = time.time() - t_start + time_recorder.record() else: stats["total_time"] = None @@ -289,10 +286,10 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD, current_rank = comm.Get_rank() total_rank = comm.Get_size() - if current_rank == 0: - if record_timing: - start_time = time.time() + if record_timing: + time_recorder = TimeRecorder("Distribute source weights", comm, logger) + if current_rank == 0: weight_req = [] local_src_weights = np.empty((total_rank,), dtype=object) @@ -307,15 +304,13 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD, MPI.Request.Waitall(weight_req) - if record_timing: - logger.info("Distribute source weights in {0:.4f} sec.".format( - time.time() - start_time - )) - local_src_weights = local_src_weights[0] else: local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) + if record_timing: + time_recorder.record() + return local_src_weights # }}} diff --git a/boxtree/distributed/util.py b/boxtree/distributed/util.py new file mode 100644 index 0000000..26b6568 --- /dev/null +++ b/boxtree/distributed/util.py @@ -0,0 +1,22 @@ +import time + + +class TimeRecorder: + # functions in this class need to be called collectively + def __init__(self, name, comm, logger): + self.name = name + self.comm = comm + self.logger = logger + self.start_time = None + + self.comm.Barrier() + if self.comm.Get_rank() == 0: + self.start_time = time.time() + + def record(self): + self.comm.Barrier() + if self.comm.Get_rank() == 0: + self.logger.info("{0} time: {1} sec.".format( + self.name, + time.time() - self.start_time + )) -- GitLab From 69ab29eac3c7d61577b1e03399748ab8ba2ff984 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 20 Feb 2019 13:43:16 -0600 Subject: [PATCH 176/260] Bug fix --- boxtree/distributed/perf_model.py | 20 -------------------- boxtree/tools.py | 3 ++- 2 files changed, 2 insertions(+), 21 deletions(-) diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py index ea3fad0..87e8010 100644 --- a/boxtree/distributed/perf_model.py +++ b/boxtree/distributed/perf_model.py @@ -514,26 +514,6 @@ class PerformanceModel: return coeff - def time_random_traversals(self): - context = self.cl_context - dtype = np.float64 - - traversals = [] - - for nsources, ntargets, dims in [(9000, 9000, 3), - (12000, 12000, 3), - (15000, 15000, 3), - (18000, 18000, 3), - (21000, 21000, 3)]: - generated_traversal = generate_random_traversal( - context, nsources, ntargets, dims, dtype - ) - - traversals.append(generated_traversal) - - for trav in traversals: - self.time_performance(trav) - def predict_step_time(self, eval_counter, wall_time=True): predict_timing = {} diff --git a/boxtree/tools.py b/boxtree/tools.py index 5f13cd5..d1e85f5 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -27,7 +27,7 @@ import numpy as np from pytools import Record, memoize_method import pyopencl as cl import pyopencl.array # noqa -from pyopencl.tools import dtype_to_c_struct, ScalarArg, VectorArg as _VectorArg +from pyopencl.tools import dtype_to_c_struct, VectorArg as _VectorArg from mako.template import Template from pytools.obj_array import make_obj_array from boxtree.fmm import TimingFuture, TimingResult @@ -666,6 +666,7 @@ class MaskCompressorKernel(object): @memoize_method def get_matrix_compressor_kernel(self, mask_dtype, list_dtype): from pyopencl.algorithm import ListOfListsBuilder + from pyopencl.tools import VectorArg, ScalarArg return ListOfListsBuilder( self.context, -- GitLab From 78f065bd6455d2c5ba90ce8d2286c3030b624583 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 20 Feb 2019 13:56:22 -0600 Subject: [PATCH 177/260] Bug fix --- boxtree/tools.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/boxtree/tools.py b/boxtree/tools.py index d1e85f5..e7db984 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -27,7 +27,7 @@ import numpy as np from pytools import Record, memoize_method import pyopencl as cl import pyopencl.array # noqa -from pyopencl.tools import dtype_to_c_struct, VectorArg as _VectorArg +from pyopencl.tools import dtype_to_c_struct, ScalarArg, VectorArg as _VectorArg from mako.template import Template from pytools.obj_array import make_obj_array from boxtree.fmm import TimingFuture, TimingResult @@ -40,7 +40,7 @@ from functools import partial # Use offsets in VectorArg by default. VectorArg = partial(_VectorArg, with_offset=True) - +ScalarArg = ScalarArg AXIS_NAMES = ("x", "y", "z", "w") @@ -652,6 +652,7 @@ class MaskCompressorKernel(object): @memoize_method def get_list_compressor_kernel(self, mask_dtype, list_dtype): from pyopencl.algorithm import ListOfListsBuilder + # Reimport VectorArg to use default with_offset from pyopencl.tools import VectorArg return ListOfListsBuilder( @@ -666,7 +667,8 @@ class MaskCompressorKernel(object): @memoize_method def get_matrix_compressor_kernel(self, mask_dtype, list_dtype): from pyopencl.algorithm import ListOfListsBuilder - from pyopencl.tools import VectorArg, ScalarArg + # Reimport VectorArg to use default with_offset + from pyopencl.tools import VectorArg return ListOfListsBuilder( self.context, -- GitLab From ceec402d2e99abde46f56c5c595bfd1330a4fc4d Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 20 Feb 2019 15:33:39 -0600 Subject: [PATCH 178/260] Integrate OpenCL cost model with distributed implementation --- boxtree/distributed/__init__.py | 58 ++++++----- boxtree/distributed/default_perf_model.json | 1 - examples/demo_perf_model.py | 106 -------------------- test/test_distributed.py | 9 +- 4 files changed, 39 insertions(+), 135 deletions(-) delete mode 100644 boxtree/distributed/default_perf_model.json delete mode 100644 examples/demo_perf_model.py diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 044ff63..8a2f585 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -25,7 +25,7 @@ THE SOFTWARE. from mpi4py import MPI import numpy as np -from boxtree.distributed.perf_model import PerformanceModel +from boxtree.cost import CLFMMCostModel MPITags = dict( DIST_TREE=0, @@ -55,16 +55,23 @@ def dtype_to_mpi(dtype): class DistributedFMMInfo(object): - def __init__(self, queue, global_trav, distributed_expansion_wrangler_factory, - model_filename=None, comm=MPI.COMM_WORLD): + def __init__(self, queue, global_trav_dev, + distributed_expansion_wrangler_factory, + cost_model=None, comm=MPI.COMM_WORLD): - self.global_trav = global_trav - self.distributed_expansion_wrangler_factory = \ - distributed_expansion_wrangler_factory + # TODO: Support box_target_counts_nonchild? self.comm = comm current_rank = comm.Get_rank() + if current_rank == 0: + self.global_trav = global_trav_dev.get(queue=queue) + else: + self.global_trav = None + + self.distributed_expansion_wrangler_factory = \ + distributed_expansion_wrangler_factory + # {{{ Get global wrangler if current_rank == 0: @@ -79,7 +86,7 @@ class DistributedFMMInfo(object): # {{{ Broadcast well_sep_is_n_away if current_rank == 0: - well_sep_is_n_away = global_trav.well_sep_is_n_away + well_sep_is_n_away = self.global_trav.well_sep_is_n_away else: well_sep_is_n_away = None @@ -87,27 +94,32 @@ class DistributedFMMInfo(object): # }}} - # {{{ Get performance model and counter - - if current_rank == 0: - model = PerformanceModel(queue.context, True) - - if model_filename is not None: - model.loadjson(model_filename) - - if len(model.time_result) == 0: - model.load_default_model() - - # }}} - # {{{ Partiton work if current_rank == 0: - boxes_time = model.predict_boxes_time(global_trav, self.global_wrangler) + # Construct default cost model if not supplied + if cost_model is None: + # TODO: should replace the calibration params with a reasonable + # deafult one + cost_model = CLFMMCostModel( + queue, CLFMMCostModel.get_constantone_calibration_params() + ) + + ndirect_sources_per_target_box = ( + cost_model.get_ndirect_sources_per_target_box(global_trav_dev) + ) + + boxes_time = cost_model.aggregate_stage_costs_per_box( + global_trav_dev, + cost_model.get_fmm_modeled_cost( + global_trav_dev, self.global_wrangler.level_nterms, + ndirect_sources_per_target_box + ) + ).get() from boxtree.distributed.partition import partition_work responsible_boxes_list = partition_work( - boxes_time, global_trav, comm.Get_size() + boxes_time, self.global_trav, comm.Get_size() ) else: responsible_boxes_list = None @@ -118,7 +130,7 @@ class DistributedFMMInfo(object): if current_rank == 0: from boxtree.distributed.partition import ResponsibleBoxesQuery - responsible_box_query = ResponsibleBoxesQuery(queue, global_trav) + responsible_box_query = ResponsibleBoxesQuery(queue, self.global_trav) else: responsible_box_query = None diff --git a/boxtree/distributed/default_perf_model.json b/boxtree/distributed/default_perf_model.json deleted file mode 100644 index 5c6a449..0000000 --- a/boxtree/distributed/default_perf_model.json +++ /dev/null @@ -1 +0,0 @@ -[{"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.14289305033162236, "process_elapsed": 0.09281096999999994}, "coarsen_multipoles": {"wall_elapsed": 0.14258357882499695, "process_elapsed": 0.14258194200000007}, "eval_direct": {"wall_elapsed": 2.344248099718243, "process_elapsed": 2.378837522999998}, "multipole_to_local": {"wall_elapsed": 20.023932092823088, "process_elapsed": 19.927228304}, "eval_multipoles": {"wall_elapsed": 3.287798510864377, "process_elapsed": 3.2817736969999984}, "form_locals": {"wall_elapsed": 2.141686537768692, "process_elapsed": 2.1379926530000013}, "refine_locals": {"wall_elapsed": 0.10454159695655107, "process_elapsed": 0.10454057600000155}, "eval_locals": {"wall_elapsed": 0.05774546507745981, "process_elapsed": 0.05774528099999898}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.10452838707715273, "process_elapsed": 0.10452839599999919}, "coarsen_multipoles": {"wall_elapsed": 0.21106432611122727, "process_elapsed": 0.21548113200000074}, "eval_direct": {"wall_elapsed": 4.7187186549417675, "process_elapsed": 4.702423059000012}, "multipole_to_local": {"wall_elapsed": 40.50634287390858, "process_elapsed": 40.512730489999996}, "eval_multipoles": {"wall_elapsed": 9.251218600198627, "process_elapsed": 9.216514626000006}, "form_locals": {"wall_elapsed": 4.5346991759724915, "process_elapsed": 4.524306796000005}, "refine_locals": {"wall_elapsed": 0.1888848263770342, "process_elapsed": 0.19806307300000014}, "eval_locals": {"wall_elapsed": 0.10716611426323652, "process_elapsed": 0.10716471899999647}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.1347822081297636, "process_elapsed": 0.13477609299999926}, "coarsen_multipoles": {"wall_elapsed": 0.3603002396412194, "process_elapsed": 0.3602957989999993}, "eval_direct": {"wall_elapsed": 11.283871918916702, "process_elapsed": 11.367921355000007}, "multipole_to_local": {"wall_elapsed": 92.28659388935193, "process_elapsed": 92.02833609099999}, "eval_multipoles": {"wall_elapsed": 24.550387303810567, "process_elapsed": 24.45371944300001}, "form_locals": {"wall_elapsed": 7.136191665194929, "process_elapsed": 7.125686079000019}, "refine_locals": {"wall_elapsed": 0.42898024804890156, "process_elapsed": 0.42143030599999065}, "eval_locals": {"wall_elapsed": 0.27421190217137337, "process_elapsed": 0.2742086969999775}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.2584445010870695, "process_elapsed": 0.25844339400001104}, "coarsen_multipoles": {"wall_elapsed": 0.578245738055557, "process_elapsed": 0.5796970419999923}, "eval_direct": {"wall_elapsed": 13.274465977214277, "process_elapsed": 13.333639903999995}, "multipole_to_local": {"wall_elapsed": 111.34071257105097, "process_elapsed": 110.98862347400001}, "eval_multipoles": {"wall_elapsed": 27.020350001286715, "process_elapsed": 27.001812247000032}, "form_locals": {"wall_elapsed": 9.117257341276854, "process_elapsed": 9.09303535500004}, "refine_locals": {"wall_elapsed": 0.4617841048166156, "process_elapsed": 0.45366404100002455}, "eval_locals": {"wall_elapsed": 0.29467571387067437, "process_elapsed": 0.29467265900001394}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.3379204752855003, "process_elapsed": 0.3395408740000221}, "coarsen_multipoles": {"wall_elapsed": 0.7178796431981027, "process_elapsed": 0.7149586010000348}, "eval_direct": {"wall_elapsed": 15.559152766596526, "process_elapsed": 15.673222753999994}, "multipole_to_local": {"wall_elapsed": 140.75012073572725, "process_elapsed": 140.313214335}, "eval_multipoles": {"wall_elapsed": 36.27695396123454, "process_elapsed": 36.167522549999944}, "form_locals": {"wall_elapsed": 9.149377660825849, "process_elapsed": 9.114768321999918}, "refine_locals": {"wall_elapsed": 0.48122364515438676, "process_elapsed": 0.481217474999994}, "eval_locals": {"wall_elapsed": 0.3034700150601566, "process_elapsed": 0.3034582699999646}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.2821324490942061, "process_elapsed": 0.2821300870000414}, "coarsen_multipoles": {"wall_elapsed": 0.7399735772050917, "process_elapsed": 0.7435758610000676}, "eval_direct": {"wall_elapsed": 17.014956682454795, "process_elapsed": 17.00256681099995}, "multipole_to_local": {"wall_elapsed": 159.9870855966583, "process_elapsed": 159.52702093099992}, "eval_multipoles": {"wall_elapsed": 44.760278538800776, "process_elapsed": 44.609669542999995}, "form_locals": {"wall_elapsed": 10.070859387051314, "process_elapsed": 10.042829486999949}, "refine_locals": {"wall_elapsed": 0.6633619191125035, "process_elapsed": 0.6633519270000079}, "eval_locals": {"wall_elapsed": 0.32876442186534405, "process_elapsed": 0.3287510919999477}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.34098224015906453, "process_elapsed": 0.3368965309999794}, "coarsen_multipoles": {"wall_elapsed": 0.8644498628564179, "process_elapsed": 0.8597816169999533}, "eval_direct": {"wall_elapsed": 20.44144478905946, "process_elapsed": 20.493337145999817}, "multipole_to_local": {"wall_elapsed": 176.60086810868233, "process_elapsed": 175.99911542799998}, "eval_multipoles": {"wall_elapsed": 47.3556498978287, "process_elapsed": 47.21309775099985}, "form_locals": {"wall_elapsed": 12.22528616571799, "process_elapsed": 12.173807162000003}, "refine_locals": {"wall_elapsed": 0.7411672458983958, "process_elapsed": 0.7411488990001089}, "eval_locals": {"wall_elapsed": 0.4227954070083797, "process_elapsed": 0.42279098400013027}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.43783256597816944, "process_elapsed": 0.4303805439999451}, "coarsen_multipoles": {"wall_elapsed": 0.9156720908358693, "process_elapsed": 0.9222488749999229}, "eval_direct": {"wall_elapsed": 21.642432290129364, "process_elapsed": 21.649963951000245}, "multipole_to_local": {"wall_elapsed": 200.9743533632718, "process_elapsed": 200.288029092}, "eval_multipoles": {"wall_elapsed": 54.97431806195527, "process_elapsed": 54.881239913999934}, "form_locals": {"wall_elapsed": 10.682431893888861, "process_elapsed": 10.645913471000085}, "refine_locals": {"wall_elapsed": 0.7400978719815612, "process_elapsed": 0.7279401089999737}, "eval_locals": {"wall_elapsed": 0.47856780607253313, "process_elapsed": 0.4785622940000849}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06467467220500112, "process_elapsed": 0.061806205999999975}, "coarsen_multipoles": {"wall_elapsed": 0.1378657571040094, "process_elapsed": 0.13786446399999974}, "eval_direct": {"wall_elapsed": 2.628749551717192, "process_elapsed": 2.6928994460000024}, "multipole_to_local": {"wall_elapsed": 20.407855125609785, "process_elapsed": 20.420269581}, "eval_multipoles": {"wall_elapsed": 3.4444818547926843, "process_elapsed": 3.4479039949999972}, "form_locals": {"wall_elapsed": 2.021404864266515, "process_elapsed": 2.013269193000003}, "refine_locals": {"wall_elapsed": 0.11385017307475209, "process_elapsed": 0.11385017100000283}, "eval_locals": {"wall_elapsed": 0.061653067357838154, "process_elapsed": 0.06165442400000032}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.10336571699008346, "process_elapsed": 0.10382350900000148}, "coarsen_multipoles": {"wall_elapsed": 0.18445956613868475, "process_elapsed": 0.18445694999999773}, "eval_direct": {"wall_elapsed": 5.217078930698335, "process_elapsed": 5.1945506220000155}, "multipole_to_local": {"wall_elapsed": 40.47428226983175, "process_elapsed": 40.43935276}, "eval_multipoles": {"wall_elapsed": 8.246962024830282, "process_elapsed": 8.253471535999992}, "form_locals": {"wall_elapsed": 4.244218919891864, "process_elapsed": 4.237957393000002}, "refine_locals": {"wall_elapsed": 0.20841451222077012, "process_elapsed": 0.20840436699999998}, "eval_locals": {"wall_elapsed": 0.12309996783733368, "process_elapsed": 0.12310028500000669}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.24232071777805686, "process_elapsed": 0.24232055199999536}, "coarsen_multipoles": {"wall_elapsed": 0.5347794652916491, "process_elapsed": 0.534767346999999}, "eval_direct": {"wall_elapsed": 11.002750042360276, "process_elapsed": 11.097017833000024}, "multipole_to_local": {"wall_elapsed": 93.03417005809024, "process_elapsed": 92.738502075}, "eval_multipoles": {"wall_elapsed": 23.674448810052127, "process_elapsed": 23.61579378899998}, "form_locals": {"wall_elapsed": 7.417238333728164, "process_elapsed": 7.402527147000001}, "refine_locals": {"wall_elapsed": 0.3950768308714032, "process_elapsed": 0.39506440099998485}, "eval_locals": {"wall_elapsed": 0.25366233196109533, "process_elapsed": 0.25365634300001716}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17537363106384873, "process_elapsed": 0.17537425200001167}, "coarsen_multipoles": {"wall_elapsed": 0.4305199389345944, "process_elapsed": 0.4364680989999954}, "eval_direct": {"wall_elapsed": 11.999836526811123, "process_elapsed": 12.09899144499991}, "multipole_to_local": {"wall_elapsed": 108.8945693182759, "process_elapsed": 108.54051004800002}, "eval_multipoles": {"wall_elapsed": 28.0945353070274, "process_elapsed": 28.019863450000003}, "form_locals": {"wall_elapsed": 8.115772506222129, "process_elapsed": 8.086782484999958}, "refine_locals": {"wall_elapsed": 0.4050322100520134, "process_elapsed": 0.4050208720000228}, "eval_locals": {"wall_elapsed": 0.23690649028867483, "process_elapsed": 0.23690572800001064}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.2068675379268825, "process_elapsed": 0.20685269899996683}, "coarsen_multipoles": {"wall_elapsed": 0.6696573188528419, "process_elapsed": 0.664978437000002}, "eval_direct": {"wall_elapsed": 15.180561240762472, "process_elapsed": 15.243839906999938}, "multipole_to_local": {"wall_elapsed": 143.82901267288253, "process_elapsed": 143.340336542}, "eval_multipoles": {"wall_elapsed": 34.135265816003084, "process_elapsed": 34.04566944399994}, "form_locals": {"wall_elapsed": 8.900728145148605, "process_elapsed": 8.893034466000017}, "refine_locals": {"wall_elapsed": 0.5461942246183753, "process_elapsed": 0.546178929000007}, "eval_locals": {"wall_elapsed": 0.3527874890714884, "process_elapsed": 0.35278468699993937}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.310679045971483, "process_elapsed": 0.3128285689999757}, "coarsen_multipoles": {"wall_elapsed": 0.6655385498888791, "process_elapsed": 0.6780369559999144}, "eval_direct": {"wall_elapsed": 17.763539463747293, "process_elapsed": 17.785153179000076}, "multipole_to_local": {"wall_elapsed": 165.45908340485767, "process_elapsed": 164.94912508800007}, "eval_multipoles": {"wall_elapsed": 41.00005980208516, "process_elapsed": 40.86370828999998}, "form_locals": {"wall_elapsed": 10.63184105977416, "process_elapsed": 10.620832092}, "refine_locals": {"wall_elapsed": 0.6163057168014348, "process_elapsed": 0.6162964320000128}, "eval_locals": {"wall_elapsed": 0.3811777690425515, "process_elapsed": 0.3816459009999562}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.2698205062188208, "process_elapsed": 0.2698129970000309}, "coarsen_multipoles": {"wall_elapsed": 0.6649344856850803, "process_elapsed": 0.6794030730000031}, "eval_direct": {"wall_elapsed": 18.069350454490632, "process_elapsed": 18.11652952300028}, "multipole_to_local": {"wall_elapsed": 179.39501194981858, "process_elapsed": 178.77733126399994}, "eval_multipoles": {"wall_elapsed": 49.19099767273292, "process_elapsed": 49.06879406899998}, "form_locals": {"wall_elapsed": 11.126400337088853, "process_elapsed": 11.094821377000017}, "refine_locals": {"wall_elapsed": 0.8388090138323605, "process_elapsed": 0.8387958670000444}, "eval_locals": {"wall_elapsed": 0.43714700592681766, "process_elapsed": 0.4371343509999406}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.3005179218016565, "process_elapsed": 0.3107753879999109}, "coarsen_multipoles": {"wall_elapsed": 0.7997653689235449, "process_elapsed": 0.8049937840000894}, "eval_direct": {"wall_elapsed": 20.99420260032639, "process_elapsed": 21.002320735999774}, "multipole_to_local": {"wall_elapsed": 201.31278445525095, "process_elapsed": 200.60458022299986}, "eval_multipoles": {"wall_elapsed": 56.057990666944534, "process_elapsed": 55.91957687800004}, "form_locals": {"wall_elapsed": 11.969875158276409, "process_elapsed": 11.920894822000037}, "refine_locals": {"wall_elapsed": 0.7045927383005619, "process_elapsed": 0.7050175569997919}, "eval_locals": {"wall_elapsed": 0.43921483773738146, "process_elapsed": 0.4392006999999012}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06566942017525434, "process_elapsed": 0.06264390200000003}, "coarsen_multipoles": {"wall_elapsed": 0.1418711910955608, "process_elapsed": 0.1418699069999998}, "eval_direct": {"wall_elapsed": 2.446558577939868, "process_elapsed": 2.480314300999999}, "multipole_to_local": {"wall_elapsed": 20.00079088192433, "process_elapsed": 19.990914497}, "eval_multipoles": {"wall_elapsed": 3.289981202688068, "process_elapsed": 3.298983691}, "form_locals": {"wall_elapsed": 1.949442199897021, "process_elapsed": 1.9376855149999983}, "refine_locals": {"wall_elapsed": 0.10793576203286648, "process_elapsed": 0.1079347100000021}, "eval_locals": {"wall_elapsed": 0.056184975896030664, "process_elapsed": 0.056184503999997304}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.0772943552583456, "process_elapsed": 0.07729414800000001}, "coarsen_multipoles": {"wall_elapsed": 0.1868803328834474, "process_elapsed": 0.18687748400000004}, "eval_direct": {"wall_elapsed": 4.70476703485474, "process_elapsed": 4.733044761000009}, "multipole_to_local": {"wall_elapsed": 39.82684602914378, "process_elapsed": 39.775730969}, "eval_multipoles": {"wall_elapsed": 8.290043313987553, "process_elapsed": 8.295265927000003}, "form_locals": {"wall_elapsed": 4.078609869815409, "process_elapsed": 4.073348116000005}, "refine_locals": {"wall_elapsed": 0.20782660599797964, "process_elapsed": 0.20782425500000556}, "eval_locals": {"wall_elapsed": 0.15716208703815937, "process_elapsed": 0.15309306700000036}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.16780595015734434, "process_elapsed": 0.16779495699999813}, "coarsen_multipoles": {"wall_elapsed": 0.42514409590512514, "process_elapsed": 0.4251383390000001}, "eval_direct": {"wall_elapsed": 11.506238477304578, "process_elapsed": 11.525195629999985}, "multipole_to_local": {"wall_elapsed": 93.78711060807109, "process_elapsed": 93.47971988}, "eval_multipoles": {"wall_elapsed": 24.25627316115424, "process_elapsed": 24.202846137000023}, "form_locals": {"wall_elapsed": 7.886253128293902, "process_elapsed": 7.8745142149999765}, "refine_locals": {"wall_elapsed": 0.5174721670337021, "process_elapsed": 0.5133926010000209}, "eval_locals": {"wall_elapsed": 0.2859889171086252, "process_elapsed": 0.2859800949999851}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.183073571883142, "process_elapsed": 0.18305776899998705}, "coarsen_multipoles": {"wall_elapsed": 0.472671234048903, "process_elapsed": 0.47271501700001295}, "eval_direct": {"wall_elapsed": 12.604105783626437, "process_elapsed": 12.64961513900002}, "multipole_to_local": {"wall_elapsed": 107.64262523688376, "process_elapsed": 107.31761293199997}, "eval_multipoles": {"wall_elapsed": 27.184093620162457, "process_elapsed": 27.179954468999995}, "form_locals": {"wall_elapsed": 8.9608427840285, "process_elapsed": 8.936076344000014}, "refine_locals": {"wall_elapsed": 0.559703144710511, "process_elapsed": 0.559695051999995}, "eval_locals": {"wall_elapsed": 0.25458914041519165, "process_elapsed": 0.2545796289999771}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.2608643942512572, "process_elapsed": 0.262650323999992}, "coarsen_multipoles": {"wall_elapsed": 0.5767403277568519, "process_elapsed": 0.5860576580000156}, "eval_direct": {"wall_elapsed": 15.735880189575255, "process_elapsed": 15.813270451999927}, "multipole_to_local": {"wall_elapsed": 140.27258323831484, "process_elapsed": 139.77256453799998}, "eval_multipoles": {"wall_elapsed": 36.051918339915574, "process_elapsed": 35.95092203699994}, "form_locals": {"wall_elapsed": 8.086318483110517, "process_elapsed": 8.063137184000084}, "refine_locals": {"wall_elapsed": 0.6116273296065629, "process_elapsed": 0.6116195170000083}, "eval_locals": {"wall_elapsed": 0.30272550601512194, "process_elapsed": 0.30271572899994226}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.23625991912558675, "process_elapsed": 0.2364244679999956}, "coarsen_multipoles": {"wall_elapsed": 0.6893667639233172, "process_elapsed": 0.6830092370000784}, "eval_direct": {"wall_elapsed": 15.056561090052128, "process_elapsed": 15.098359518000052}, "multipole_to_local": {"wall_elapsed": 161.72943416889757, "process_elapsed": 161.19251369200003}, "eval_multipoles": {"wall_elapsed": 41.702806482091546, "process_elapsed": 41.572584398000004}, "form_locals": {"wall_elapsed": 10.078389388974756, "process_elapsed": 10.070601187999955}, "refine_locals": {"wall_elapsed": 0.5847061406821012, "process_elapsed": 0.5846636710000439}, "eval_locals": {"wall_elapsed": 0.32613639906048775, "process_elapsed": 0.3261274469999762}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.35347751434892416, "process_elapsed": 0.3534746269999687}, "coarsen_multipoles": {"wall_elapsed": 0.7678062850609422, "process_elapsed": 0.7621317609999778}, "eval_direct": {"wall_elapsed": 19.385647067334503, "process_elapsed": 19.373809754000035}, "multipole_to_local": {"wall_elapsed": 177.7568790889345, "process_elapsed": 177.13995553400014}, "eval_multipoles": {"wall_elapsed": 55.13101848727092, "process_elapsed": 54.92711723799994}, "form_locals": {"wall_elapsed": 10.444787265732884, "process_elapsed": 10.436945378000019}, "refine_locals": {"wall_elapsed": 0.6166465007700026, "process_elapsed": 0.6166181649998634}, "eval_locals": {"wall_elapsed": 0.35686514899134636, "process_elapsed": 0.35686196599999676}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.3867368856444955, "process_elapsed": 0.3867698809999638}, "coarsen_multipoles": {"wall_elapsed": 0.7339309039525688, "process_elapsed": 0.7566592790001323}, "eval_direct": {"wall_elapsed": 20.32549550011754, "process_elapsed": 20.388905034000118}, "multipole_to_local": {"wall_elapsed": 198.88563758181408, "process_elapsed": 198.28836379799986}, "eval_multipoles": {"wall_elapsed": 51.132025649771094, "process_elapsed": 50.934892610999896}, "form_locals": {"wall_elapsed": 11.990623429883271, "process_elapsed": 11.949524165999946}, "refine_locals": {"wall_elapsed": 0.6882827966473997, "process_elapsed": 0.680187923999938}, "eval_locals": {"wall_elapsed": 0.40719516295939684, "process_elapsed": 0.40764913800012437}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06581627298146486, "process_elapsed": 0.06344213200000004}, "coarsen_multipoles": {"wall_elapsed": 0.1412572581321001, "process_elapsed": 0.1412559019999997}, "eval_direct": {"wall_elapsed": 2.7299221428111196, "process_elapsed": 2.781603758000003}, "multipole_to_local": {"wall_elapsed": 20.612939092796296, "process_elapsed": 20.616477141}, "eval_multipoles": {"wall_elapsed": 3.5307789859361947, "process_elapsed": 3.517346646}, "form_locals": {"wall_elapsed": 2.124925720039755, "process_elapsed": 2.1208257669999995}, "refine_locals": {"wall_elapsed": 0.1124834748916328, "process_elapsed": 0.11248237000000216}, "eval_locals": {"wall_elapsed": 0.06405873689800501, "process_elapsed": 0.0640584180000019}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.1036986019462347, "process_elapsed": 0.10369902800000119}, "coarsen_multipoles": {"wall_elapsed": 0.1949111670255661, "process_elapsed": 0.1999170879999994}, "eval_direct": {"wall_elapsed": 4.608586409594864, "process_elapsed": 4.5927578570000165}, "multipole_to_local": {"wall_elapsed": 38.54366623284295, "process_elapsed": 38.485827138000005}, "eval_multipoles": {"wall_elapsed": 8.568737780675292, "process_elapsed": 8.542159236999993}, "form_locals": {"wall_elapsed": 4.073263252153993, "process_elapsed": 4.064695062000013}, "refine_locals": {"wall_elapsed": 0.309948590118438, "process_elapsed": 0.3115177579999937}, "eval_locals": {"wall_elapsed": 0.1761215110309422, "process_elapsed": 0.17611938100000657}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.13820320600643754, "process_elapsed": 0.138196674999989}, "coarsen_multipoles": {"wall_elapsed": 0.38043196592479944, "process_elapsed": 0.3804273089999981}, "eval_direct": {"wall_elapsed": 11.744406177196652, "process_elapsed": 11.827659204999975}, "multipole_to_local": {"wall_elapsed": 91.26099151792005, "process_elapsed": 90.957502208}, "eval_multipoles": {"wall_elapsed": 22.827171033713967, "process_elapsed": 22.78696679800001}, "form_locals": {"wall_elapsed": 7.519823815207928, "process_elapsed": 7.498412040000005}, "refine_locals": {"wall_elapsed": 0.36121500795707107, "process_elapsed": 0.36120376799999576}, "eval_locals": {"wall_elapsed": 0.2246690890751779, "process_elapsed": 0.2246666040000207}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17177576525136828, "process_elapsed": 0.1717763049999803}, "coarsen_multipoles": {"wall_elapsed": 0.42073291214182973, "process_elapsed": 0.4207269609999855}, "eval_direct": {"wall_elapsed": 11.8608891190961, "process_elapsed": 11.953956640000058}, "multipole_to_local": {"wall_elapsed": 108.9518269430846, "process_elapsed": 108.638402584}, "eval_multipoles": {"wall_elapsed": 29.55046471906826, "process_elapsed": 29.447812095000018}, "form_locals": {"wall_elapsed": 8.121391328983009, "process_elapsed": 8.098203552999962}, "refine_locals": {"wall_elapsed": 0.40015376918017864, "process_elapsed": 0.40014009999998734}, "eval_locals": {"wall_elapsed": 0.24294947879388928, "process_elapsed": 0.24293507200002296}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.3036691783927381, "process_elapsed": 0.3037372880000362}, "coarsen_multipoles": {"wall_elapsed": 0.7293630791828036, "process_elapsed": 0.7386667420000208}, "eval_direct": {"wall_elapsed": 15.853490174748003, "process_elapsed": 15.951854819999937}, "multipole_to_local": {"wall_elapsed": 150.07324582897127, "process_elapsed": 149.59256680499993}, "eval_multipoles": {"wall_elapsed": 36.21569347428158, "process_elapsed": 36.108943527000065}, "form_locals": {"wall_elapsed": 9.256117098033428, "process_elapsed": 9.234842302999937}, "refine_locals": {"wall_elapsed": 0.6879124888218939, "process_elapsed": 0.6842866189999768}, "eval_locals": {"wall_elapsed": 0.4285805160179734, "process_elapsed": 0.4285680850000517}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.2390671377070248, "process_elapsed": 0.23906642099996134}, "coarsen_multipoles": {"wall_elapsed": 0.6618608850985765, "process_elapsed": 0.6946877950000498}, "eval_direct": {"wall_elapsed": 16.631962614599615, "process_elapsed": 16.672639716999925}, "multipole_to_local": {"wall_elapsed": 161.71785036334768, "process_elapsed": 161.19572901000004}, "eval_multipoles": {"wall_elapsed": 42.610538037959486, "process_elapsed": 42.503210351999996}, "form_locals": {"wall_elapsed": 11.49972936604172, "process_elapsed": 11.47165019199997}, "refine_locals": {"wall_elapsed": 0.7118247579783201, "process_elapsed": 0.7118074400000296}, "eval_locals": {"wall_elapsed": 0.380106495693326, "process_elapsed": 0.38009324099994046}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.3670531171374023, "process_elapsed": 0.36309775300003366}, "coarsen_multipoles": {"wall_elapsed": 0.7518898211419582, "process_elapsed": 0.7729549489999954}, "eval_direct": {"wall_elapsed": 18.767360630445182, "process_elapsed": 18.769919828999946}, "multipole_to_local": {"wall_elapsed": 180.2374455779791, "process_elapsed": 179.64930603300002}, "eval_multipoles": {"wall_elapsed": 49.90873555187136, "process_elapsed": 49.73379236400001}, "form_locals": {"wall_elapsed": 10.527730371803045, "process_elapsed": 10.505953223000006}, "refine_locals": {"wall_elapsed": 0.6305891978554428, "process_elapsed": 0.6305585160000646}, "eval_locals": {"wall_elapsed": 0.5451591932214797, "process_elapsed": 0.5386910560000615}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.3035329198464751, "process_elapsed": 0.3046291329999349}, "coarsen_multipoles": {"wall_elapsed": 0.908652248326689, "process_elapsed": 0.9229546799999753}, "eval_direct": {"wall_elapsed": 23.435453578364104, "process_elapsed": 23.427185823999707}, "multipole_to_local": {"wall_elapsed": 204.10029060393572, "process_elapsed": 203.498182929}, "eval_multipoles": {"wall_elapsed": 57.77209374681115, "process_elapsed": 57.580601317999935}, "form_locals": {"wall_elapsed": 11.694038683082908, "process_elapsed": 11.65377241200008}, "refine_locals": {"wall_elapsed": 0.6659307437948883, "process_elapsed": 0.6659000030001607}, "eval_locals": {"wall_elapsed": 0.42689173109829426, "process_elapsed": 0.4268864119999307}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06433802330866456, "process_elapsed": 0.06175603100000027}, "coarsen_multipoles": {"wall_elapsed": 0.13728277198970318, "process_elapsed": 0.1372830300000003}, "eval_direct": {"wall_elapsed": 2.8698996007442474, "process_elapsed": 2.8860750380000035}, "multipole_to_local": {"wall_elapsed": 21.608792692888528, "process_elapsed": 21.600729749}, "eval_multipoles": {"wall_elapsed": 4.170610999688506, "process_elapsed": 4.196137698000001}, "form_locals": {"wall_elapsed": 1.8854569140821695, "process_elapsed": 1.8855776410000011}, "refine_locals": {"wall_elapsed": 0.16010719677433372, "process_elapsed": 0.16010556499999495}, "eval_locals": {"wall_elapsed": 0.08334489725530148, "process_elapsed": 0.08334504000000464}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.07694227108731866, "process_elapsed": 0.07694427899999567}, "coarsen_multipoles": {"wall_elapsed": 0.18193004885688424, "process_elapsed": 0.18192722100000225}, "eval_direct": {"wall_elapsed": 5.362686685286462, "process_elapsed": 5.376335398999991}, "multipole_to_local": {"wall_elapsed": 38.82882813597098, "process_elapsed": 38.800395750999996}, "eval_multipoles": {"wall_elapsed": 7.6316725057549775, "process_elapsed": 7.61176749900001}, "form_locals": {"wall_elapsed": 3.7538683358579874, "process_elapsed": 3.740067400000001}, "refine_locals": {"wall_elapsed": 0.18044804502278566, "process_elapsed": 0.18044670099999394}, "eval_locals": {"wall_elapsed": 0.1640238557010889, "process_elapsed": 0.15998751500001163}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.16299484204500914, "process_elapsed": 0.1589325700000046}, "coarsen_multipoles": {"wall_elapsed": 0.5394922369159758, "process_elapsed": 0.5394833269999992}, "eval_direct": {"wall_elapsed": 11.529483899474144, "process_elapsed": 11.633024222999964}, "multipole_to_local": {"wall_elapsed": 90.45034577790648, "process_elapsed": 90.140623747}, "eval_multipoles": {"wall_elapsed": 23.475032337009907, "process_elapsed": 23.402588277000007}, "form_locals": {"wall_elapsed": 7.382363630924374, "process_elapsed": 7.379083809000008}, "refine_locals": {"wall_elapsed": 0.3571728630922735, "process_elapsed": 0.3571698819999938}, "eval_locals": {"wall_elapsed": 0.29383321665227413, "process_elapsed": 0.28573974700000804}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17482035793364048, "process_elapsed": 0.17482164000000466}, "coarsen_multipoles": {"wall_elapsed": 0.6471779476851225, "process_elapsed": 0.6350906819999977}, "eval_direct": {"wall_elapsed": 11.94170841993764, "process_elapsed": 12.134813735999984}, "multipole_to_local": {"wall_elapsed": 109.43196559883654, "process_elapsed": 109.081074277}, "eval_multipoles": {"wall_elapsed": 29.13186590280384, "process_elapsed": 29.05222061400002}, "form_locals": {"wall_elapsed": 9.20513498224318, "process_elapsed": 9.17659013399998}, "refine_locals": {"wall_elapsed": 0.39085082802921534, "process_elapsed": 0.3908466930000145}, "eval_locals": {"wall_elapsed": 0.23075266415253282, "process_elapsed": 0.23075040099996613}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.31254801992326975, "process_elapsed": 0.30992854599998054}, "coarsen_multipoles": {"wall_elapsed": 0.637142900377512, "process_elapsed": 0.6467619349999723}, "eval_direct": {"wall_elapsed": 15.69611760089174, "process_elapsed": 15.757981944999813}, "multipole_to_local": {"wall_elapsed": 140.356332520023, "process_elapsed": 139.88694511699998}, "eval_multipoles": {"wall_elapsed": 38.49987019971013, "process_elapsed": 38.43145631599998}, "form_locals": {"wall_elapsed": 8.471607562154531, "process_elapsed": 8.449020899000061}, "refine_locals": {"wall_elapsed": 0.5592385237105191, "process_elapsed": 0.5596502360000386}, "eval_locals": {"wall_elapsed": 0.35891397623345256, "process_elapsed": 0.3589064400000552}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.22916135005652905, "process_elapsed": 0.2294245719999708}, "coarsen_multipoles": {"wall_elapsed": 0.5582886077463627, "process_elapsed": 0.5605387639999435}, "eval_direct": {"wall_elapsed": 16.712323531042784, "process_elapsed": 16.77518112300004}, "multipole_to_local": {"wall_elapsed": 159.60533997509629, "process_elapsed": 159.07230811499994}, "eval_multipoles": {"wall_elapsed": 42.59306370886043, "process_elapsed": 42.45572857700006}, "form_locals": {"wall_elapsed": 10.805573990568519, "process_elapsed": 10.770887148999918}, "refine_locals": {"wall_elapsed": 0.5874539171345532, "process_elapsed": 0.579410558999939}, "eval_locals": {"wall_elapsed": 0.34672214509919286, "process_elapsed": 0.3467195629999651}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.34223695332184434, "process_elapsed": 0.3381626640000377}, "coarsen_multipoles": {"wall_elapsed": 0.8546734559349716, "process_elapsed": 0.8620984199999384}, "eval_direct": {"wall_elapsed": 20.248594620265067, "process_elapsed": 20.2735790050001}, "multipole_to_local": {"wall_elapsed": 177.53715593600646, "process_elapsed": 176.916876098}, "eval_multipoles": {"wall_elapsed": 46.6420071949251, "process_elapsed": 46.50159333199986}, "form_locals": {"wall_elapsed": 11.556011468637735, "process_elapsed": 11.530447846000015}, "refine_locals": {"wall_elapsed": 0.6371138021349907, "process_elapsed": 0.6370984749999025}, "eval_locals": {"wall_elapsed": 0.48767920210957527, "process_elapsed": 0.4836109719999513}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.4101475323550403, "process_elapsed": 0.4025623400000313}, "coarsen_multipoles": {"wall_elapsed": 0.8309031310491264, "process_elapsed": 0.8533334829999148}, "eval_direct": {"wall_elapsed": 21.83228043373674, "process_elapsed": 21.811839603999942}, "multipole_to_local": {"wall_elapsed": 200.4263506392017, "process_elapsed": 199.78048179799998}, "eval_multipoles": {"wall_elapsed": 55.83001929195598, "process_elapsed": 55.62621917499996}, "form_locals": {"wall_elapsed": 10.937592420261353, "process_elapsed": 10.904243739000094}, "refine_locals": {"wall_elapsed": 0.8196568163111806, "process_elapsed": 0.8155691360000219}, "eval_locals": {"wall_elapsed": 0.6638513482175767, "process_elapsed": 0.6638208470001246}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.06488013127818704, "process_elapsed": 0.06267741799999982}, "coarsen_multipoles": {"wall_elapsed": 0.14028360787779093, "process_elapsed": 0.1402826039999998}, "eval_direct": {"wall_elapsed": 3.573185256216675, "process_elapsed": 3.561920087999996}, "multipole_to_local": {"wall_elapsed": 25.873784495983273, "process_elapsed": 25.732638186}, "eval_multipoles": {"wall_elapsed": 4.642745970282704, "process_elapsed": 4.639341426000001}, "form_locals": {"wall_elapsed": 3.0461708339862525, "process_elapsed": 3.036363762999997}, "refine_locals": {"wall_elapsed": 0.11961846519261599, "process_elapsed": 0.1196096529999977}, "eval_locals": {"wall_elapsed": 0.0641988911665976, "process_elapsed": 0.06419945400000415}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.09974845498800278, "process_elapsed": 0.09974945999999818}, "coarsen_multipoles": {"wall_elapsed": 0.2711884528398514, "process_elapsed": 0.2711792780000053}, "eval_direct": {"wall_elapsed": 6.678008021786809, "process_elapsed": 6.685549811999991}, "multipole_to_local": {"wall_elapsed": 52.48578274901956, "process_elapsed": 50.94725895999999}, "eval_multipoles": {"wall_elapsed": 11.199469511862844, "process_elapsed": 10.973406965999999}, "form_locals": {"wall_elapsed": 5.428788446821272, "process_elapsed": 5.372336655000012}, "refine_locals": {"wall_elapsed": 0.2391572780907154, "process_elapsed": 0.2391561879999955}, "eval_locals": {"wall_elapsed": 0.13382326532155275, "process_elapsed": 0.13382537700000796}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17349608801305294, "process_elapsed": 0.17349610799999482}, "coarsen_multipoles": {"wall_elapsed": 0.4378826292231679, "process_elapsed": 0.43415366199999994}, "eval_direct": {"wall_elapsed": 14.007537546101958, "process_elapsed": 14.066462461}, "multipole_to_local": {"wall_elapsed": 131.48507186723873, "process_elapsed": 128.31762228600002}, "eval_multipoles": {"wall_elapsed": 29.94184113200754, "process_elapsed": 29.82207009000001}, "form_locals": {"wall_elapsed": 9.938785715959966, "process_elapsed": 9.913687585000048}, "refine_locals": {"wall_elapsed": 0.4504069094546139, "process_elapsed": 0.4487136799999689}, "eval_locals": {"wall_elapsed": 0.30359381902962923, "process_elapsed": 0.30358403499997166}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.2583726462908089, "process_elapsed": 0.2584110270000224}, "coarsen_multipoles": {"wall_elapsed": 0.5752558228559792, "process_elapsed": 0.5769190489999687}, "eval_direct": {"wall_elapsed": 12.875543330796063, "process_elapsed": 12.883404596000048}, "multipole_to_local": {"wall_elapsed": 117.18043434806168, "process_elapsed": 116.57996186900004}, "eval_multipoles": {"wall_elapsed": 27.881489561870694, "process_elapsed": 27.78543085000001}, "form_locals": {"wall_elapsed": 8.799156446009874, "process_elapsed": 8.768620798000029}, "refine_locals": {"wall_elapsed": 0.4524097847752273, "process_elapsed": 0.45239475799996853}, "eval_locals": {"wall_elapsed": 0.2726343311369419, "process_elapsed": 0.27261175699999285}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.22424806095659733, "process_elapsed": 0.2242476690000217}, "coarsen_multipoles": {"wall_elapsed": 0.6595294354483485, "process_elapsed": 0.6785909230000016}, "eval_direct": {"wall_elapsed": 16.094195554498583, "process_elapsed": 16.12589607299998}, "multipole_to_local": {"wall_elapsed": 144.88413401972502, "process_elapsed": 144.42795690799994}, "eval_multipoles": {"wall_elapsed": 37.05021429667249, "process_elapsed": 36.94682276699996}, "form_locals": {"wall_elapsed": 8.93320427602157, "process_elapsed": 8.92260437199991}, "refine_locals": {"wall_elapsed": 0.5139365317299962, "process_elapsed": 0.5144198460000098}, "eval_locals": {"wall_elapsed": 0.4673224021680653, "process_elapsed": 0.4632590369999434}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.39084181608632207, "process_elapsed": 0.39083779399993546}, "coarsen_multipoles": {"wall_elapsed": 0.7719269678927958, "process_elapsed": 0.8146186569999827}, "eval_direct": {"wall_elapsed": 17.227728134952486, "process_elapsed": 17.294979483999896}, "multipole_to_local": {"wall_elapsed": 166.3015058990568, "process_elapsed": 165.78228093299992}, "eval_multipoles": {"wall_elapsed": 44.71846476290375, "process_elapsed": 44.62762825000004}, "form_locals": {"wall_elapsed": 9.726129946764559, "process_elapsed": 9.721420520000038}, "refine_locals": {"wall_elapsed": 0.5776261049322784, "process_elapsed": 0.5694936979999738}, "eval_locals": {"wall_elapsed": 0.3503082669340074, "process_elapsed": 0.350304264999977}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.29380203830078244, "process_elapsed": 0.29537681300007534}, "coarsen_multipoles": {"wall_elapsed": 0.6812492711469531, "process_elapsed": 0.7019599970000172}, "eval_direct": {"wall_elapsed": 26.609884928911924, "process_elapsed": 26.486387821999756}, "multipole_to_local": {"wall_elapsed": 225.8225390901789, "process_elapsed": 218.796968299}, "eval_multipoles": {"wall_elapsed": 72.68378263898194, "process_elapsed": 64.27706159500008}, "form_locals": {"wall_elapsed": 15.491025436203927, "process_elapsed": 15.374123016999874}, "refine_locals": {"wall_elapsed": 0.797229612711817, "process_elapsed": 0.7972753879998891}, "eval_locals": {"wall_elapsed": 0.7185860709287226, "process_elapsed": 0.6837079179999819}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.8264692649245262, "process_elapsed": 0.5747898250001526}, "coarsen_multipoles": {"wall_elapsed": 1.5497954818420112, "process_elapsed": 1.3431830110000647}, "eval_direct": {"wall_elapsed": 29.598126132041216, "process_elapsed": 29.095733828999755}, "multipole_to_local": {"wall_elapsed": 273.25275476509705, "process_elapsed": 266.123896437}, "eval_multipoles": {"wall_elapsed": 71.07263175630942, "process_elapsed": 70.09043602299994}, "form_locals": {"wall_elapsed": 14.648473134730011, "process_elapsed": 14.53982591099998}, "refine_locals": {"wall_elapsed": 1.0756985042244196, "process_elapsed": 1.0682339970001067}, "eval_locals": {"wall_elapsed": 0.7669271482154727, "process_elapsed": 0.6153591639999831}}, {"nterms_fmm_total": 1000000, "direct_workload": 5064570, "direct_nsource_boxes": 56907, "m2l_workload": 879645000, "m2p_workload": 80054000, "m2p_nboxes": 148660, "p2l_workload": 58602800, "p2l_nboxes": 84384, "eval_part_workload": 1000000, "form_multipoles": {"wall_elapsed": 0.09652932407334447, "process_elapsed": 0.09385259800000023}, "coarsen_multipoles": {"wall_elapsed": 0.1438053478486836, "process_elapsed": 0.14380348099999996}, "eval_direct": {"wall_elapsed": 2.8724248111248016, "process_elapsed": 2.8874125049999955}, "multipole_to_local": {"wall_elapsed": 22.150050774216652, "process_elapsed": 22.163823834000002}, "eval_multipoles": {"wall_elapsed": 4.035081175155938, "process_elapsed": 4.018481735999998}, "form_locals": {"wall_elapsed": 2.4109095097519457, "process_elapsed": 2.410357944000001}, "refine_locals": {"wall_elapsed": 0.10400902340188622, "process_elapsed": 0.10400039700000008}, "eval_locals": {"wall_elapsed": 0.05630338191986084, "process_elapsed": 0.05630333900000295}}, {"nterms_fmm_total": 2000000, "direct_workload": 14111527, "direct_nsource_boxes": 105337, "m2l_workload": 1702853000, "m2p_workload": 202584200, "m2p_nboxes": 311728, "p2l_workload": 120805600, "p2l_nboxes": 146698, "eval_part_workload": 2000000, "form_multipoles": {"wall_elapsed": 0.10583456791937351, "process_elapsed": 0.1058346049999983}, "coarsen_multipoles": {"wall_elapsed": 0.24514786386862397, "process_elapsed": 0.24514414600000123}, "eval_direct": {"wall_elapsed": 5.353780907113105, "process_elapsed": 5.363511694999993}, "multipole_to_local": {"wall_elapsed": 40.435692673083395, "process_elapsed": 40.370689201000005}, "eval_multipoles": {"wall_elapsed": 8.241205959115177, "process_elapsed": 8.222697027999999}, "form_locals": {"wall_elapsed": 4.570903809741139, "process_elapsed": 4.547761195999996}, "refine_locals": {"wall_elapsed": 0.22210092516615987, "process_elapsed": 0.22209859000000165}, "eval_locals": {"wall_elapsed": 0.13560375943779945, "process_elapsed": 0.13560390499999642}}, {"nterms_fmm_total": 3000000, "direct_workload": 24428758, "direct_nsource_boxes": 223172, "m2l_workload": 4011675000, "m2p_workload": 670713600, "m2p_nboxes": 735176, "p2l_workload": 213069300, "p2l_nboxes": 259667, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.22054569516330957, "process_elapsed": 0.22053314199999363}, "coarsen_multipoles": {"wall_elapsed": 0.5125908548943698, "process_elapsed": 0.5085106519999982}, "eval_direct": {"wall_elapsed": 12.12822123710066, "process_elapsed": 12.215636038999989}, "multipole_to_local": {"wall_elapsed": 93.70578746590763, "process_elapsed": 93.417041737}, "eval_multipoles": {"wall_elapsed": 25.345406110864133, "process_elapsed": 25.348133571999995}, "form_locals": {"wall_elapsed": 8.22137835714966, "process_elapsed": 8.202794989000012}, "refine_locals": {"wall_elapsed": 0.41789698833599687, "process_elapsed": 0.4185953409999854}, "eval_locals": {"wall_elapsed": 0.269101491663605, "process_elapsed": 0.26908321999999885}}, {"nterms_fmm_total": 4000000, "direct_workload": 29106936, "direct_nsource_boxes": 259688, "m2l_workload": 4679517000, "m2p_workload": 737745100, "m2p_nboxes": 887508, "p2l_workload": 262131700, "p2l_nboxes": 277715, "eval_part_workload": 4000000, "form_multipoles": {"wall_elapsed": 0.17082596709951758, "process_elapsed": 0.1708260939999775}, "coarsen_multipoles": {"wall_elapsed": 0.4154431517235935, "process_elapsed": 0.42217062099999225}, "eval_direct": {"wall_elapsed": 11.69108117558062, "process_elapsed": 11.81256067199999}, "multipole_to_local": {"wall_elapsed": 105.88159589888528, "process_elapsed": 105.51405778699998}, "eval_multipoles": {"wall_elapsed": 28.176122231874615, "process_elapsed": 28.119217985000034}, "form_locals": {"wall_elapsed": 8.196291028987616, "process_elapsed": 8.181508215999997}, "refine_locals": {"wall_elapsed": 0.46452207770198584, "process_elapsed": 0.4564281270000379}, "eval_locals": {"wall_elapsed": 0.25099264504387975, "process_elapsed": 0.25098168199997417}}, {"nterms_fmm_total": 5000000, "direct_workload": 47923958, "direct_nsource_boxes": 321217, "m2l_workload": 6098447000, "m2p_workload": 1049959000, "m2p_nboxes": 1058414, "p2l_workload": 250873800, "p2l_nboxes": 307399, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.27000816026702523, "process_elapsed": 0.2720453750000047}, "coarsen_multipoles": {"wall_elapsed": 0.518343870062381, "process_elapsed": 0.5315893110000047}, "eval_direct": {"wall_elapsed": 15.501696541905403, "process_elapsed": 15.713185632000034}, "multipole_to_local": {"wall_elapsed": 145.782555392012, "process_elapsed": 145.32088750699995}, "eval_multipoles": {"wall_elapsed": 38.39985727425665, "process_elapsed": 38.26757356000007}, "form_locals": {"wall_elapsed": 7.733311468735337, "process_elapsed": 7.705839346999937}, "refine_locals": {"wall_elapsed": 0.5397246889770031, "process_elapsed": 0.540322248999928}, "eval_locals": {"wall_elapsed": 0.32426714105531573, "process_elapsed": 0.3242635839999366}}, {"nterms_fmm_total": 6000000, "direct_workload": 52175740, "direct_nsource_boxes": 365978, "m2l_workload": 6963407000, "m2p_workload": 1228104100, "m2p_nboxes": 1238954, "p2l_workload": 320901400, "p2l_nboxes": 340870, "eval_part_workload": 6000000, "form_multipoles": {"wall_elapsed": 0.24512064224109054, "process_elapsed": 0.25092624599994906}, "coarsen_multipoles": {"wall_elapsed": 0.7491539157927036, "process_elapsed": 0.7398730170000363}, "eval_direct": {"wall_elapsed": 17.279178180731833, "process_elapsed": 17.591523772999835}, "multipole_to_local": {"wall_elapsed": 161.031417501159, "process_elapsed": 160.49498069599997}, "eval_multipoles": {"wall_elapsed": 45.24870359105989, "process_elapsed": 45.14435771700005}, "form_locals": {"wall_elapsed": 10.377049359958619, "process_elapsed": 10.342459472999963}, "refine_locals": {"wall_elapsed": 0.7589819608256221, "process_elapsed": 0.754924054000071}, "eval_locals": {"wall_elapsed": 0.4476486751809716, "process_elapsed": 0.4476429339999868}}, {"nterms_fmm_total": 7000000, "direct_workload": 67754792, "direct_nsource_boxes": 399866, "m2l_workload": 7699296000, "m2p_workload": 1423135200, "m2p_nboxes": 1352554, "p2l_workload": 330677000, "p2l_nboxes": 356695, "eval_part_workload": 7000000, "form_multipoles": {"wall_elapsed": 0.27594426879659295, "process_elapsed": 0.28047744100001637}, "coarsen_multipoles": {"wall_elapsed": 0.7236054469831288, "process_elapsed": 0.7297952440000017}, "eval_direct": {"wall_elapsed": 20.532363687176257, "process_elapsed": 20.546230066000135}, "multipole_to_local": {"wall_elapsed": 180.1073812278919, "process_elapsed": 179.505576249}, "eval_multipoles": {"wall_elapsed": 51.82399559998885, "process_elapsed": 51.64447850400006}, "form_locals": {"wall_elapsed": 13.09091425826773, "process_elapsed": 13.063926417999937}, "refine_locals": {"wall_elapsed": 0.6001745373941958, "process_elapsed": 0.600155986000118}, "eval_locals": {"wall_elapsed": 0.36805562302470207, "process_elapsed": 0.3680515439998544}}, {"nterms_fmm_total": 8000000, "direct_workload": 82707417, "direct_nsource_boxes": 441394, "m2l_workload": 8617704000, "m2p_workload": 1685913200, "m2p_nboxes": 1498566, "p2l_workload": 348565200, "p2l_nboxes": 382521, "eval_part_workload": 8000000, "form_multipoles": {"wall_elapsed": 0.39941675309091806, "process_elapsed": 0.3981763469998896}, "coarsen_multipoles": {"wall_elapsed": 0.9822220490314066, "process_elapsed": 1.0149941570000465}, "eval_direct": {"wall_elapsed": 21.01450498914346, "process_elapsed": 21.073397865999823}, "multipole_to_local": {"wall_elapsed": 197.67101062694564, "process_elapsed": 197.05391954499987}, "eval_multipoles": {"wall_elapsed": 58.286998888943344, "process_elapsed": 58.08302871900014}, "form_locals": {"wall_elapsed": 11.379268935415894, "process_elapsed": 11.344901745000016}, "refine_locals": {"wall_elapsed": 0.659439907874912, "process_elapsed": 0.6594181859998116}, "eval_locals": {"wall_elapsed": 0.48237756825983524, "process_elapsed": 0.47433469900011005}}] \ No newline at end of file diff --git a/examples/demo_perf_model.py b/examples/demo_perf_model.py deleted file mode 100644 index 662bd2c..0000000 --- a/examples/demo_perf_model.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import division -import pyopencl as cl -import numpy as np -from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler -import functools -from boxtree.distributed.perf_model import PerformanceModel, PerformanceCounter -from boxtree.distributed.perf_model import generate_random_traversal -from boxtree.fmm import drive_fmm -from pyopencl.clrandom import PhiloxGenerator - -context = cl.create_some_context() -queue = cl.CommandQueue(context) -dtype = np.float64 -helmholtz_k = 0 -dims = 3 - - -def fmm_level_to_nterms(tree, level): - return max(level, 3) - - -wrangler_factory = functools.partial( - FMMLibExpansionWrangler, helmholtz_k=0, fmm_level_to_nterms=fmm_level_to_nterms) - - -def train_model(): - model = PerformanceModel(context, wrangler_factory, True, drive_fmm) - model.loadjson('model.json') - - test_cases = [ - (9000, 9000), - (9000, 9000), - (12000, 12000), - (12000, 12000), - (15000, 15000), - (15000, 15000), - (18000, 18000), - (18000, 18000) - ] - - for nsources, ntargets in test_cases: - trav = generate_random_traversal(context, nsources, ntargets, dims, dtype) - model.time_performance(trav) - - model.savejson('model.json') - - -def eval_model(): - nsources = 25000 - ntargets = 25000 - wall_time = True - - eval_traversal = generate_random_traversal( - context, nsources, ntargets, dims, dtype) - - eval_wrangler = wrangler_factory(eval_traversal.tree) - - # {{{ Predict timing - - eval_counter = PerformanceCounter(eval_traversal, eval_wrangler, True) - - model = PerformanceModel(context, wrangler_factory, True, drive_fmm) - model.loadjson('model.json') - - predict_timing = model.predict_step_time(eval_counter, wall_time=wall_time) - - # }}} - - # {{{ Actual timing - - true_timing = {} - - rng = PhiloxGenerator(context) - source_weights = rng.uniform( - queue, eval_traversal.tree.nsources, eval_traversal.tree.coord_dtype).get() - - drive_fmm(eval_traversal, eval_wrangler, source_weights, timing_data=true_timing) - - # }}} - - for field in ["eval_direct", "multipole_to_local", "eval_multipoles", - "form_locals", "eval_locals"]: - predict_time_field = predict_timing[field] - - if wall_time: - true_time_field = true_timing[field].wall_elapsed - else: - true_time_field = true_timing[field].process_elapsed - - diff = abs(predict_time_field - true_time_field) - - print(field + ": predict " + str(predict_time_field) + " actual " - + str(true_time_field) + " error " + str(diff / true_time_field)) - - -if __name__ == '__main__': - import sys - if len(sys.argv) != 2: - raise RuntimeError("Please provide exact 1 argument") - - if sys.argv[1] == 'train': - train_model() - elif sys.argv[1] == 'eval': - eval_model() - else: - raise RuntimeError("Do not recognize the argument") diff --git a/test/test_distributed.py b/test/test_distributed.py index 9dc5ff9..b6d7cf2 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -24,7 +24,7 @@ def _test_against_shared(dims, nsources, ntargets, dtype): rank = comm.Get_rank() # Initialize arguments for worker processes - trav = None + d_trav = None sources_weights = None helmholtz_k = 0 @@ -82,7 +82,7 @@ def _test_against_shared(dims, nsources, ntargets, dtype): from boxtree.distributed import DistributedFMMInfo distribued_fmm_info = DistributedFMMInfo( - queue, trav, distributed_expansion_wrangler_factory, comm=comm) + queue, d_trav, distributed_expansion_wrangler_factory, comm=comm) pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) if rank == 0: @@ -137,7 +137,7 @@ def _test_constantone(dims, nsources, ntargets, dtype): rank = comm.Get_rank() # Initialization - trav = None + d_trav = None sources_weights = None # Configure PyOpenCL @@ -166,14 +166,13 @@ def _test_constantone(dims, nsources, ntargets, dtype): from boxtree.traversal import FMMTraversalBuilder tg = FMMTraversalBuilder(ctx) d_trav, _ = tg(queue, tree, debug=True) - trav = d_trav.get(queue=queue) def constantone_expansion_wrangler_factory(tree): return ConstantOneExpansionWrangler(tree) from boxtree.distributed import DistributedFMMInfo distributed_fmm_info = DistributedFMMInfo( - queue, trav, constantone_expansion_wrangler_factory, comm=MPI.COMM_WORLD + queue, d_trav, constantone_expansion_wrangler_factory, comm=MPI.COMM_WORLD ) pot_dfmm = distributed_fmm_info.drive_dfmm( -- GitLab From 4d16f6d0ecfbb1de71231d45a1ee5baf9a631f1c Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 22 Feb 2019 22:38:08 -0600 Subject: [PATCH 179/260] Try disable recv mprobe --- boxtree/distributed/__init__.py | 4 ++++ boxtree/distributed/calculation.py | 3 +++ boxtree/distributed/local_tree.py | 3 +++ 3 files changed, 10 insertions(+) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 8a2f585..e6f9b62 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -23,10 +23,14 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import mpi4py +mpi4py.rc.recv_mprobe = False + from mpi4py import MPI import numpy as np from boxtree.cost import CLFMMCostModel + MPITags = dict( DIST_TREE=0, DIST_SOURCES=1, diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 38b6e77..de04355 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -23,6 +23,9 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import mpi4py +mpi4py.rc.recv_mprobe = False + import numpy as np import pyopencl as cl from boxtree.distributed import MPITags diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 19c4423..2b7ec99 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -23,6 +23,9 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import mpi4py +mpi4py.rc.recv_mprobe = False + from collections import namedtuple import pyopencl as cl from mako.template import Template -- GitLab From c91ad97ad1b3783214cef7d8901ac35f99fffa3f Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 25 Feb 2019 15:40:35 -0800 Subject: [PATCH 180/260] Revert "Try disable recv mprobe" This reverts commit 4d16f6d0ecfbb1de71231d45a1ee5baf9a631f1c. --- boxtree/distributed/__init__.py | 4 ---- boxtree/distributed/calculation.py | 3 --- boxtree/distributed/local_tree.py | 3 --- 3 files changed, 10 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index e6f9b62..8a2f585 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -23,14 +23,10 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import mpi4py -mpi4py.rc.recv_mprobe = False - from mpi4py import MPI import numpy as np from boxtree.cost import CLFMMCostModel - MPITags = dict( DIST_TREE=0, DIST_SOURCES=1, diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index de04355..38b6e77 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -23,9 +23,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import mpi4py -mpi4py.rc.recv_mprobe = False - import numpy as np import pyopencl as cl from boxtree.distributed import MPITags diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 2b7ec99..19c4423 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -23,9 +23,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import mpi4py -mpi4py.rc.recv_mprobe = False - from collections import namedtuple import pyopencl as cl from mako.template import Template -- GitLab From 5963d38e9d5efd2182132e8a4a9f3ab0bcc90963 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 24 Mar 2019 17:18:17 -0500 Subject: [PATCH 181/260] Use new cost model interface --- boxtree/distributed/__init__.py | 9 +- boxtree/distributed/perf_model.py | 720 ------------------------------ 2 files changed, 1 insertion(+), 728 deletions(-) delete mode 100644 boxtree/distributed/perf_model.py diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 8a2f585..b3c3637 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -105,16 +105,9 @@ class DistributedFMMInfo(object): queue, CLFMMCostModel.get_constantone_calibration_params() ) - ndirect_sources_per_target_box = ( - cost_model.get_ndirect_sources_per_target_box(global_trav_dev) - ) - boxes_time = cost_model.aggregate_stage_costs_per_box( global_trav_dev, - cost_model.get_fmm_modeled_cost( - global_trav_dev, self.global_wrangler.level_nterms, - ndirect_sources_per_target_box - ) + cost_model(global_trav_dev, self.global_wrangler.level_nterms) ).get() from boxtree.distributed.partition import partition_work diff --git a/boxtree/distributed/perf_model.py b/boxtree/distributed/perf_model.py deleted file mode 100644 index 87e8010..0000000 --- a/boxtree/distributed/perf_model.py +++ /dev/null @@ -1,720 +0,0 @@ -from __future__ import division - -__copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ - Copyright (C) 2018 Hao Gao" - -__license__ = """ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -""" - -import pyopencl as cl -import numpy as np -from collections import namedtuple -from pyopencl.clrandom import PhiloxGenerator -import pickle -from boxtree.fmm import TimingResult -import json - - -def generate_random_traversal(context, nsources, ntargets, dims, dtype): - with cl.CommandQueue(context) as queue: - from boxtree.tools import make_normal_particle_array as p_normal - sources = p_normal(queue, nsources, dims, dtype, seed=15) - targets = p_normal(queue, ntargets, dims, dtype, seed=18) - - rng = PhiloxGenerator(context, seed=22) - target_radii = rng.uniform( - queue, ntargets, a=0, b=0.05, dtype=np.float64).get() - - from boxtree import TreeBuilder - tb = TreeBuilder(context) - tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, - stick_out_factor=0.25, max_particles_in_box=30, debug=True) - - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(context, well_sep_is_n_away=2) - d_trav, _ = tg(queue, tree, debug=True) - trav = d_trav.get(queue=queue) - - return trav - - -FMMParameters = namedtuple( - "FMMParameters", - ['ncoeffs_fmm_by_level', - 'translation_source_power', - 'translation_target_power', - 'translation_max_power'] -) - - -class PerformanceCounter: - - def __init__(self, traversal, wrangler, uses_pde_expansions): - self.traversal = traversal - self.wrangler = wrangler - self.uses_pde_expansions = uses_pde_expansions - - self.parameters = self.get_fmm_parameters( - traversal.tree.dimensions, - uses_pde_expansions, - wrangler.level_nterms - ) - - @staticmethod - def xlat_cost(p_source, p_target, parameters): - """ - :param p_source: A numpy array of numbers of source terms - :return: The same shape as *p_source* - """ - return ( - p_source ** parameters.translation_source_power - * p_target ** parameters.translation_target_power - * np.maximum(p_source, p_target) ** parameters.translation_max_power - ) - - @staticmethod - def get_fmm_parameters(dimensions, use_pde_expansions, level_nterms): - if use_pde_expansions: - ncoeffs_fmm_by_level = level_nterms ** (dimensions - 1) - - if dimensions == 2: - translation_source_power = 1 - translation_target_power = 1 - translation_max_power = 0 - elif dimensions == 3: - # Based on a reading of FMMlib, i.e. a point-and-shoot FMM. - translation_source_power = 0 - translation_target_power = 0 - translation_max_power = 3 - else: - raise ValueError("Don't know how to estimate expansion complexities " - "for dimension %d" % dimensions) - - else: - ncoeffs_fmm_by_level = level_nterms ** dimensions - - translation_source_power = dimensions - translation_target_power = dimensions - translation_max_power = 0 - - return FMMParameters( - ncoeffs_fmm_by_level=ncoeffs_fmm_by_level, - translation_source_power=translation_source_power, - translation_target_power=translation_target_power, - translation_max_power=translation_max_power - ) - - def count_nsources_by_level(self): - """ - :return: A numpy array of share (tree.nlevels,) such that the ith index - documents the number of sources on level i. - """ - tree = self.traversal.tree - - nsources_by_level = np.empty((tree.nlevels,), dtype=np.intp) - - for ilevel in range(tree.nlevels): - start_ibox = tree.level_start_box_nrs[ilevel] - end_ibox = tree.level_start_box_nrs[ilevel + 1] - count = 0 - - for ibox in range(start_ibox, end_ibox): - count += tree.box_source_counts_nonchild[ibox] - - nsources_by_level[ilevel] = count - - return nsources_by_level - - def count_nters_fmm_total(self): - """ - :return: total number of terms formed across all levels during form_multipole - """ - nsources_by_level = self.count_nsources_by_level() - - ncoeffs_fmm_by_level = self.parameters.ncoeffs_fmm_by_level - - nterms_fmm_total = np.sum(nsources_by_level * ncoeffs_fmm_by_level) - - return nterms_fmm_total - - def count_direct(self, use_global_idx=False, box_target_counts_nonchild=None): - """ - :return: If *use_global_idx* is True, return a numpy array of shape - (tree.nboxes,) such that the ith entry represents the workload from - direct evaluation on box i. If *use_global_idx* is False, return a numpy - array of shape (ntarget_boxes,) such that the ith entry represents the - workload on *target_boxes* i. - """ - traversal = self.traversal - tree = traversal.tree - - if box_target_counts_nonchild is None: - box_target_counts_nonchild = tree.box_target_counts_nonchild - - if use_global_idx: - direct_workload = np.zeros((tree.nboxes,), dtype=np.intp) - else: - ntarget_boxes = len(traversal.target_boxes) - direct_workload = np.zeros((ntarget_boxes,), dtype=np.intp) - - for itgt_box, tgt_ibox in enumerate(traversal.target_boxes): - ntargets = box_target_counts_nonchild[tgt_ibox] - nsources = 0 - - start, end = traversal.neighbor_source_boxes_starts[itgt_box:itgt_box+2] - - for src_ibox in traversal.neighbor_source_boxes_lists[start:end]: - nsources += tree.box_source_counts_nonchild[src_ibox] - - if traversal.from_sep_close_smaller_starts is not None: - start, end = ( - traversal.from_sep_close_smaller_starts[itgt_box:itgt_box+2]) - - for src_ibox in traversal.from_sep_close_smaller_lists[start:end]: - nsources += tree.box_source_counts_nonchild[src_ibox] - - if traversal.from_sep_close_bigger_starts is not None: - start, end = ( - traversal.from_sep_close_bigger_starts[itgt_box:itgt_box+2]) - - for src_ibox in traversal.from_sep_close_bigger_lists[start:end]: - nsources += tree.box_source_counts_nonchild[src_ibox] - - count = nsources * ntargets - - if use_global_idx: - direct_workload[tgt_ibox] = count - else: - direct_workload[itgt_box] = count - - return direct_workload - - def count_direct_source_boxes(self): - """ - Note: This method does not have a 'use_global_idx' argument because list 1 - and list 3 near box list is indexed like 'target_boxes' while list 4 near box - list is indexed like 'target_or_target_parent_boxes'. - """ - traversal = self.traversal - tree = traversal.tree - - ndirect_src_boxes = np.zeros((tree.nboxes,), dtype=np.intp) - - ndirect_src_boxes[traversal.target_boxes] += ( - traversal.neighbor_source_boxes_starts[1:] - - traversal.neighbor_source_boxes_starts[:-1] - ) - - if traversal.from_sep_close_smaller_starts is not None: - ndirect_src_boxes[traversal.target_boxes] += ( - traversal.from_sep_close_smaller_starts[1:] - - traversal.from_sep_close_smaller_starts[:-1] - ) - - if traversal.from_sep_close_bigger_starts is not None: - ndirect_src_boxes[traversal.target_boxes] += ( - traversal.from_sep_close_bigger_starts[1:] - - traversal.from_sep_close_bigger_starts[:-1] - ) - - return ndirect_src_boxes - - def count_m2l(self, use_global_idx=False): - """ - :return: If *use_global_idx* is True, return a numpy array of shape - (tree.nboxes,) such that the ith entry represents the workload from - multipole to local expansion on box i. If *use_global_idx* is False, - return a numpy array of shape (ntarget_or_target_parent_boxes,) such that - the ith entry represents the workload on *target_or_target_parent_boxes* - i. - """ - trav = self.traversal - wrangler = self.wrangler - parameters = self.parameters - - ntarget_or_target_parent_boxes = len(trav.target_or_target_parent_boxes) - - if use_global_idx: - nm2l = np.zeros((trav.tree.nboxes,), dtype=np.intp) - else: - nm2l = np.zeros((ntarget_or_target_parent_boxes,), dtype=np.intp) - - for itgt_box, tgt_ibox in enumerate(trav.target_or_target_parent_boxes): - start, end = trav.from_sep_siblings_starts[itgt_box:itgt_box+2] - from_sep_siblings_level = trav.tree.box_levels[ - trav.from_sep_siblings_lists[start:end] - ] - - if start == end: - continue - - tgt_box_level = trav.tree.box_levels[tgt_ibox] - - from_sep_siblings_nterms = wrangler.level_nterms[from_sep_siblings_level] - tgt_box_nterms = wrangler.level_nterms[tgt_box_level] - - from_sep_siblings_costs = self.xlat_cost( - from_sep_siblings_nterms, tgt_box_nterms, parameters) - - if use_global_idx: - nm2l[tgt_ibox] += np.sum(from_sep_siblings_costs) - else: - nm2l[itgt_box] += np.sum(from_sep_siblings_costs) - - return nm2l - - def count_m2p(self, use_global_idx=False, box_target_counts_nonchild=None): - trav = self.traversal - tree = trav.tree - - if use_global_idx: - nm2p = np.zeros((tree.nboxes,), dtype=np.intp) - nm2p_boxes = np.zeros((tree.nboxes,), dtype=np.intp) - else: - nm2p = np.zeros((len(trav.target_boxes),), dtype=np.intp) - nm2p_boxes = np.zeros((len(trav.target_boxes),), dtype=np.intp) - - if box_target_counts_nonchild is None: - box_target_counts_nonchild = tree.box_target_counts_nonchild - - for ilevel, sep_smaller_list in enumerate(trav.from_sep_smaller_by_level): - ncoeffs_fmm_cur_level = self.parameters.ncoeffs_fmm_by_level[ilevel] - tgt_box_list = trav.target_boxes_sep_smaller_by_source_level[ilevel] - - for itgt_box, tgt_ibox in enumerate(tgt_box_list): - ntargets = box_target_counts_nonchild[tgt_ibox] - - start, end = sep_smaller_list.starts[itgt_box:itgt_box + 2] - - workload = (end - start) * ntargets * ncoeffs_fmm_cur_level - - if use_global_idx: - nm2p[tgt_ibox] += workload - nm2p_boxes[tgt_ibox] += (end - start) - else: - nm2p[sep_smaller_list.nonempty_indices[itgt_box]] += workload - nm2p_boxes[sep_smaller_list.nonempty_indices[itgt_box]] += ( - end - start - ) - - return nm2p, nm2p_boxes - - def count_p2l(self, use_global_idx=False): - trav = self.traversal - tree = trav.tree - parameters = self.parameters - - if use_global_idx: - np2l = np.zeros((tree.nboxes,), dtype=np.intp) - else: - np2l = np.zeros(len(trav.target_or_target_parent_boxes), dtype=np.intp) - - for itgt_box, tgt_ibox in enumerate(trav.target_boxes): - tgt_box_level = trav.tree.box_levels[tgt_ibox] - ncoeffs = parameters.ncoeffs_fmm_by_level[tgt_box_level] - - start, end = trav.from_sep_bigger_starts[itgt_box:itgt_box + 2] - - np2l_sources = 0 - for src_ibox in trav.from_sep_bigger_lists[start:end]: - np2l_sources += tree.box_source_counts_nonchild[src_ibox] - - if use_global_idx: - np2l[tgt_ibox] = np2l_sources * ncoeffs - else: - np2l[itgt_box] = np2l_sources * ncoeffs - - return np2l - - def count_p2l_source_boxes(self, use_global_idx=False): - trav = self.traversal - tree = trav.tree - - p2l_nsource_boxes = (trav.from_sep_bigger_starts[1:] - - trav.from_sep_bigger_starts[:-1]) - - if use_global_idx: - np2l = np.zeros((tree.nboxes,), dtype=np.intp) - np2l[trav.target_or_target_parent_boxes] = p2l_nsource_boxes - return np2l - else: - return p2l_nsource_boxes - - def count_eval_part(self, use_global_idx=False, box_target_counts_nonchild=None): - trav = self.traversal - tree = trav.tree - parameters = self.parameters - - if use_global_idx: - neval_part = np.zeros(tree.nboxes, dtype=np.intp) - else: - neval_part = np.zeros(len(trav.target_boxes), dtype=np.intp) - - if box_target_counts_nonchild is None: - box_target_counts_nonchild = tree.box_target_counts_nonchild - - for itgt_box, tgt_ibox in enumerate(trav.target_boxes): - ntargets = box_target_counts_nonchild[tgt_ibox] - tgt_box_level = trav.tree.box_levels[tgt_ibox] - ncoeffs_fmm = parameters.ncoeffs_fmm_by_level[tgt_box_level] - - if use_global_idx: - neval_part[tgt_ibox] = ntargets * ncoeffs_fmm - else: - neval_part[itgt_box] = ntargets * ncoeffs_fmm - - return neval_part - - -class PerformanceModel: - - def __init__(self, cl_context, uses_pde_expansions): - self.cl_context = cl_context - self.uses_pde_expansions = uses_pde_expansions - - self.time_result = [] - - from pyopencl.clrandom import PhiloxGenerator - self.rng = PhiloxGenerator(cl_context) - - def time_performance(self, traversal, wrangler): - counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) - - # Record useful metadata for assembling performance data - nm2p, nm2p_boxes = counter.count_m2p() - - timing_data = { - "nterms_fmm_total": counter.count_nters_fmm_total(), - "direct_workload": np.sum(counter.count_direct()), - "direct_nsource_boxes": np.sum(counter.count_direct_source_boxes()), - "m2l_workload": np.sum(counter.count_m2l()), - "m2p_workload": np.sum(nm2p), - "m2p_nboxes": np.sum(nm2p_boxes), - "p2l_workload": np.sum(counter.count_p2l()), - "p2l_nboxes": np.sum(counter.count_p2l_source_boxes()), - "eval_part_workload": np.sum(counter.count_eval_part()) - } - - # Generate random source weights - with cl.CommandQueue(self.cl_context) as queue: - source_weights = self.rng.uniform( - queue, - traversal.tree.nsources, - traversal.tree.coord_dtype - ).get() - - # Time a FMM run - from boxtree.fmm import drive_fmm - drive_fmm(traversal, wrangler, source_weights, timing_data=timing_data) - - self.time_result.append(timing_data) - - def form_multipoles_model(self, wall_time=True): - return self.linear_regression( - "form_multipoles", ["nterms_fmm_total"], - wall_time=wall_time) - - def eval_direct_model(self, wall_time=True): - return self.linear_regression( - "eval_direct", - ["direct_workload", "direct_nsource_boxes"], - wall_time=wall_time) - - def multipole_to_local_model(self, wall_time=True): - return self.linear_regression( - "multipole_to_local", ["m2l_workload"], - wall_time=wall_time - ) - - def eval_multipoles_model(self, wall_time=True): - return self.linear_regression( - "eval_multipoles", ["m2p_workload", "m2p_nboxes"], - wall_time=wall_time - ) - - def form_locals_model(self, wall_time=True): - return self.linear_regression( - "form_locals", ["p2l_workload", "p2l_nboxes"], - wall_time=wall_time - ) - - def eval_locals_model(self, wall_time=True): - return self.linear_regression( - "eval_locals", ["eval_part_workload"], - wall_time=wall_time - ) - - def linear_regression(self, y_name, x_name, wall_time=True): - """ - :arg y_name: Name of the depedent variable - :arg x_name: A list of names of independent variables - """ - nresult = len(self.time_result) - nvariables = len(x_name) - - if nresult < 1: - raise RuntimeError("Please run FMM at least once using time_performance " - "before forming models.") - elif nresult == 1: - result = self.time_result[0] - - if wall_time: - dependent_value = result[y_name]["wall_elapsed"] - else: - dependent_value = result[y_name]["process_elapsed"] - - independent_value = result[x_name[0]] - coeff = dependent_value / independent_value - - return (coeff,) + tuple(0.0 for _ in range(nvariables - 1)) - else: - dependent_value = np.empty((nresult,), dtype=float) - coeff_matrix = np.empty((nresult, nvariables + 1), dtype=float) - - for iresult, result in enumerate(self.time_result): - if wall_time: - dependent_value[iresult] = result[y_name]["wall_elapsed"] - else: - dependent_value[iresult] = result[y_name]["process_elapsed"] - - for icol, variable_name in enumerate(x_name): - coeff_matrix[iresult, icol] = result[variable_name] - - coeff_matrix[:, -1] = 1 - - try: - import statsmodels.api as sm - rlm_model = sm.RLM(dependent_value, coeff_matrix) - rlm_result = rlm_model.fit() - coeff = rlm_result.params - except ImportError: - import warnings - warnings.warn("Statsmodels package not found. Install to obtain more" - "robust regression.") - - from numpy.linalg import lstsq - coeff = lstsq(coeff_matrix, dependent_value, rcond=-1)[0] - - return coeff - - def predict_step_time(self, eval_counter, wall_time=True): - predict_timing = {} - - # {{{ Predict eval_direct - - param = self.eval_direct_model(wall_time=wall_time) - - direct_workload = np.sum(eval_counter.count_direct()) - direct_nsource_boxes = np.sum(eval_counter.count_direct_source_boxes()) - - predict_timing["eval_direct"] = ( - direct_workload * param[0] + direct_nsource_boxes * param[1] + param[2]) - - # }}} - - # {{{ Predict multipole_to_local - - param = self.multipole_to_local_model(wall_time=wall_time) - - m2l_workload = np.sum(eval_counter.count_m2l()) - - predict_timing["multipole_to_local"] = m2l_workload * param[0] + param[1] - - # }}} - - # {{{ Predict eval_multipoles - - param = self.eval_multipoles_model(wall_time=wall_time) - - nm2p, nm2p_boxes = eval_counter.count_m2p() - - m2p_workload = np.sum(nm2p) - m2p_boxes = np.sum(nm2p_boxes) - - predict_timing["eval_multipoles"] = ( - m2p_workload * param[0] + m2p_boxes * param[1] + param[2] - ) - - # }}} - - # {{{ Predict form_locals - - param = self.form_locals_model(wall_time=wall_time) - - p2l_workload = np.sum(eval_counter.count_p2l()) - p2l_nboxes = np.sum(eval_counter.count_p2l_source_boxes()) - - predict_timing["form_locals"] = ( - p2l_workload * param[0] + p2l_nboxes * param[1] + param[2] - ) - - # }}} - - # {{{ - - param = self.eval_locals_model(wall_time=wall_time) - - eval_part_workload = np.sum(eval_counter.count_eval_part()) - - predict_timing["eval_locals"] = eval_part_workload * param[0] + param[1] - - # }}} - - return predict_timing - - def predict_boxes_time(self, traversal, wrangler): - tree = traversal.tree - counter = PerformanceCounter(traversal, wrangler, self.uses_pde_expansions) - - boxes_time = np.zeros((tree.nboxes,), dtype=np.float64) - - # {{{ eval_direct time - - param = self.eval_direct_model() - - direct_workload = counter.count_direct(use_global_idx=True) - ndirect_source_boxes = counter.count_direct_source_boxes() - - boxes_time += (direct_workload * param[0] - + ndirect_source_boxes * param[1] - + param[2]) - - # }}} - - # {{{ multipole_to_local time - - param = self.multipole_to_local_model() - - m2l_workload = counter.count_m2l(use_global_idx=True) - - boxes_time += (m2l_workload * param[0] + param[1]) - - # }}} - - # {{{ eval_multipoles time - - param = self.eval_multipoles_model() - - m2p_workload, m2p_nboxes = counter.count_m2p(use_global_idx=True) - - boxes_time += (m2p_workload * param[0] + m2p_nboxes * param[1] + param[2]) - - # }}} - - # {{{ form_locals time - - param = self.form_locals_model() - - p2l_workload = counter.count_p2l(use_global_idx=True) - p2l_nboxes = counter.count_p2l_source_boxes(use_global_idx=True) - - boxes_time += (p2l_workload * param[0] + p2l_nboxes * param[1] + param[2]) - - # }}} - - # {{{ eval_part time - - param = self.eval_locals_model() - - eval_part_workload = counter.count_eval_part(use_global_idx=True) - - boxes_time += (eval_part_workload * param[0] + param[1]) - - # }}} - - return boxes_time - - def save(self, filename): - with open(filename, 'wb') as f: - pickle.dump(self.time_result, f) - print("Save {} records to disk.".format(len(self.time_result))) - - def load(self, filename): - try: - with open(filename, 'rb') as f: - loaded_result = pickle.load(f) - self.time_result.extend(loaded_result) - print("Load {} records from disk.".format(len(loaded_result))) - except IOError: - print("Cannot open file '" + filename + "'") - except EOFError: - print("Nothing to read from file.") - - def loadjson(self, filename): - try: - with open(filename, 'r') as f: - loaded_results = json.load(f) - - for current_result in loaded_results: - converted_result = {} - - for field_name in current_result: - entry = current_result[field_name] - - if isinstance(entry, (int, np.integer)): - converted_result[field_name] = entry - - elif isinstance(entry, dict): - converted_result[field_name] = TimingResult( - wall_elapsed=entry['wall_elapsed'], - process_elapsed=entry['process_elapsed'] - ) - - else: - raise RuntimeError("Unknown type loaded") - - self.time_result.append(converted_result) - - print("Load {} records from disk.".format(len(loaded_results))) - - except IOError: - print("Cannot open file '" + filename + "'") - except EOFError: - print("Nothing to read from file.") - - def savejson(self, filename): - output = [] - - for current_result in self.time_result: - current_output = {} - - for field_name in current_result: - entry = current_result[field_name] - - if isinstance(entry, (int, np.integer)): - current_output[field_name] = int(entry) - - elif isinstance(entry, TimingResult): - current_output[field_name] = { - 'wall_elapsed': entry.get("wall_elapsed"), - 'process_elapsed': entry.get("process_elapsed") - } - - output.append(current_output) - - with open(filename, 'w') as f: - json.dump(output, f) - print("Save {} records to disk.".format(len(self.time_result))) - - def load_default_model(self): - import os - current_dir = os.path.dirname(os.path.abspath(__file__)) - default_perf_file_path = os.path.join(current_dir, 'default_perf_model.json') - self.loadjson(default_perf_file_path) -- GitLab From 2799fb8331e65c5e3ade31bf8c25312e43632150 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 28 Apr 2019 23:10:59 -0500 Subject: [PATCH 182/260] Remove record_timing argument --- boxtree/distributed/__init__.py | 6 +-- boxtree/distributed/calculation.py | 72 +++--------------------------- boxtree/distributed/util.py | 22 --------- 3 files changed, 7 insertions(+), 93 deletions(-) delete mode 100644 boxtree/distributed/util.py diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index b3c3637..a6739fc 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -158,12 +158,10 @@ class DistributedFMMInfo(object): # }}} - def drive_dfmm(self, source_weights, _communicate_mpoles_via_allreduce=False, - record_timing=False): + def drive_dfmm(self, source_weights, _communicate_mpoles_via_allreduce=False): from boxtree.distributed.calculation import calculate_pot return calculate_pot( self.local_wrangler, self.global_wrangler, self.local_trav, source_weights, self.local_data, - _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce, - record_timing=record_timing + _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce ) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 38b6e77..05e1a9e 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -27,10 +27,8 @@ import numpy as np import pyopencl as cl from boxtree.distributed import MPITags from mpi4py import MPI -import time from boxtree.distributed import dtype_to_mpi from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler -from boxtree.distributed.util import TimeRecorder from pytools import memoize_method import loopy as lp from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_1 # noqa: F401 @@ -145,8 +143,7 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): # {{{ Communicate mpoles -def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False, - record_timing=False): +def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): """Based on Algorithm 3: Reduce and Scatter in [1]. The main idea is to mimic a allreduce as done on a hypercube network, but to @@ -166,10 +163,6 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False, stats = {} - if record_timing: - time_recorder = TimeRecorder("Communicate multiploes", comm, logger) - t_start = time.time() - # contributing_boxes: # # A mask of the the set of boxes that the current process contributes @@ -257,12 +250,6 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False, comm_pattern.advance() - if record_timing: - stats["total_time"] = time.time() - t_start - time_recorder.record() - else: - stats["total_time"] = None - if return_stats: return stats @@ -271,8 +258,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False, # {{{ Distribute source weights -def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD, - record_timing=False): +def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): """ This function transfers needed source_weights from root process to each worker process in communicator :arg comm. @@ -286,9 +272,6 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD, current_rank = comm.Get_rank() total_rank = comm.Get_size() - if record_timing: - time_recorder = TimeRecorder("Distribute source weights", comm, logger) - if current_rank == 0: weight_req = [] local_src_weights = np.empty((total_rank,), dtype=object) @@ -308,9 +291,6 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD, else: local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) - if record_timing: - time_recorder.record() - return local_src_weights # }}} @@ -320,8 +300,7 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD, def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, local_data, comm=MPI.COMM_WORLD, - _communicate_mpoles_via_allreduce=False, - record_timing=False): + _communicate_mpoles_via_allreduce=False): """ Calculate potentials for targets on distributed memory machines. This function needs to be called collectively by all process in :arg comm. @@ -338,9 +317,6 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, :param _communicate_mpoles_via_allreduce: Use MPI allreduce for communicating multipole expressions. Using MPI allreduce is slower but might be helpful for debugging purpose. - :param record_timing: This argument controls whether to log various timing data. - Note setting this option to true will incur minor performance degradation due - to the usage of barriers. :return: On the root process, this function returns calculated potentials. On worker processes, this function returns None. """ @@ -349,11 +325,6 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, current_rank = comm.Get_rank() total_rank = comm.Get_size() - if record_timing: - comm.Barrier() - if current_rank == 0: - start_time = time.time() - # {{{ Distribute source weights if current_rank == 0: @@ -361,7 +332,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, source_weights = source_weights[global_wrangler.tree.user_source_ids] local_src_weights = distribute_source_weights( - source_weights, local_data, comm=comm, record_timing=record_timing + source_weights, local_data, comm=comm ) # }}} @@ -395,15 +366,10 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all else: - communicate_mpoles(local_wrangler, comm, local_trav, mpole_exps, - record_timing=record_timing) + communicate_mpoles(local_wrangler, comm, local_trav, mpole_exps) # }}} - if record_timing: - comm.Barrier() - fmm_eval_start_time = time.time() - # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") logger.debug("direct evaluation from neighbor source boxes ('list 1')") @@ -500,22 +466,11 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # }}} - if record_timing: - logger.info("FMM Evaluation finished on process {0} in {1:.4f} sec.".format( - current_rank, time.time() - fmm_eval_start_time - )) - # {{{ Worker processes send calculated potentials to the root process potentials_mpi_type = dtype_to_mpi(potentials.dtype) - if record_timing: - comm.Barrier() - if current_rank == 0: - if record_timing: - receive_pot_start_time = time.time() - potentials_all_ranks = np.empty((total_rank,), dtype=object) potentials_all_ranks[0] = potentials @@ -525,10 +480,6 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, comm.Recv([potentials_all_ranks[irank], potentials_mpi_type], source=irank, tag=MPITags["GATHER_POTENTIALS"]) - - if record_timing: - logger.info("Receive potentials from worker processes in {0:.4f} sec." - .format(time.time() - receive_pot_start_time)) else: comm.Send([potentials, potentials_mpi_type], dest=0, tag=MPITags["GATHER_POTENTIALS"]) @@ -538,9 +489,6 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # {{{ Assemble potentials from worker processes together on the root process if current_rank == 0: - if record_timing: - post_processing_start_time = time.time() - potentials = np.empty((global_wrangler.tree.ntargets,), dtype=potentials.dtype) @@ -553,19 +501,9 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, logger.debug("finalize potentials") result = global_wrangler.finalize_potentials(result) - if record_timing: - logger.info("Post processing in {0:.4f} sec.".format( - time.time() - post_processing_start_time - )) - # }}} if current_rank == 0: - - if record_timing: - logger.info("Distributed FMM evaluation completes in {0:.4f} sec." - .format(time.time() - start_time)) - return result # }}} diff --git a/boxtree/distributed/util.py b/boxtree/distributed/util.py deleted file mode 100644 index 26b6568..0000000 --- a/boxtree/distributed/util.py +++ /dev/null @@ -1,22 +0,0 @@ -import time - - -class TimeRecorder: - # functions in this class need to be called collectively - def __init__(self, name, comm, logger): - self.name = name - self.comm = comm - self.logger = logger - self.start_time = None - - self.comm.Barrier() - if self.comm.Get_rank() == 0: - self.start_time = time.time() - - def record(self): - self.comm.Barrier() - if self.comm.Get_rank() == 0: - self.logger.info("{0} time: {1} sec.".format( - self.name, - time.time() - self.start_time - )) -- GitLab From 85f83116d7ecb5e5419bbc307f7f72b678793432 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 1 May 2019 15:20:36 -0500 Subject: [PATCH 183/260] Find boxes in subrange only loop through contrib boxes instead of all boxes --- boxtree/distributed/calculation.py | 57 ++++++++++++++++++------------ 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 05e1a9e..1057904 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -102,18 +102,19 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): def find_boxes_used_by_subrange_kernel(self): knl = lp.make_kernel( [ - "{[ibox]: 0 <= ibox < nboxes}", + "{[icontrib_box]: 0 <= icontrib_box < ncontrib_boxes}", "{[iuser]: iuser_start <= iuser < iuser_end}", ], """ - for ibox + for icontrib_box + <> ibox = contributing_boxes_list[icontrib_box] <> iuser_start = box_to_user_starts[ibox] <> iuser_end = box_to_user_starts[ibox + 1] for iuser <> useri = box_to_user_lists[iuser] <> in_subrange = subrange_start <= useri and useri < subrange_end if in_subrange - box_in_subrange[ibox] = 1 + box_in_subrange[icontrib_box] = 1 end end end @@ -123,18 +124,25 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): lp.GlobalArg("box_to_user_lists", shape=None), "..." ]) - knl = lp.split_iname(knl, "ibox", 16, outer_tag="g.0", inner_tag="l.0") + knl = lp.split_iname( + knl, "icontrib_box", 16, outer_tag="g.0", inner_tag="l.0" + ) return knl def find_boxes_used_by_subrange(self, box_in_subrange, subrange, - box_to_user_starts, box_to_user_lists): + box_to_user_starts, box_to_user_lists, + contributing_boxes_list): knl = self.find_boxes_used_by_subrange_kernel() - knl(self.queue, + + knl( + self.queue, subrange_start=subrange[0], subrange_end=subrange[1], box_to_user_starts=box_to_user_starts, box_to_user_lists=box_to_user_lists, - box_in_subrange=box_in_subrange) + box_in_subrange=box_in_subrange, + contributing_boxes_list=contributing_boxes_list + ) box_in_subrange.finish() @@ -185,9 +193,6 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): mpole_exps_buf = np.empty(mpole_exps.shape, dtype=mpole_exps.dtype) boxes_list_buf = np.empty(trav.tree.nboxes, dtype=trav.tree.box_id_dtype) - # Temporary buffer for holding the mask - box_in_subrange = wrangler.empty_box_in_subrange_mask() - stats["bytes_sent_by_stage"] = [] stats["bytes_recvd_by_stage"] = [] @@ -199,25 +204,31 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # Compute the subset of boxes to be sent. message_subrange = comm_pattern.messages() - box_in_subrange.fill(0) + contributing_boxes_list = np.nonzero(contributing_boxes)[0] + + contributing_boxes_list_dev = cl.array.to_device( + wrangler.queue, contributing_boxes_list + ) + + box_in_subrange = cl.array.zeros( + wrangler.queue, contributing_boxes_list.shape[0], dtype=np.int8 + ) wrangler.find_boxes_used_by_subrange( box_in_subrange, message_subrange, - trav.tree.box_to_user_starts, trav.tree.box_to_user_lists) - - box_in_subrange_host = ( - box_in_subrange.map_to_host(flags=cl.map_flags.READ)) + trav.tree.box_to_user_starts, trav.tree.box_to_user_lists, + contributing_boxes_list_dev + ) - with box_in_subrange_host.data: - relevant_boxes_list = ( - np.nonzero(box_in_subrange_host & contributing_boxes) - [0] - .astype(trav.tree.box_id_dtype)) + box_in_subrange_host = box_in_subrange.get().astype(bool) - del box_in_subrange_host + relevant_boxes_list = contributing_boxes_list[ + box_in_subrange_host + ].astype(trav.tree.box_id_dtype) - relevant_mpole_exps = wrangler.slice_mpoles(mpole_exps, - relevant_boxes_list) + relevant_mpole_exps = wrangler.slice_mpoles( + mpole_exps, relevant_boxes_list + ) # Send the box subset to the other processors. for sink in comm_pattern.sinks(): -- GitLab From 6794837f6006d143889c685b2d030ad5290a1dfe Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 2 May 2019 22:34:19 -0500 Subject: [PATCH 184/260] Try PyOpenCL instead of Loopy for find_boxes_used_by_subrange --- boxtree/distributed/calculation.py | 87 ++++++++++++++++-------------- 1 file changed, 47 insertions(+), 40 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 1057904..208da25 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -30,8 +30,9 @@ from mpi4py import MPI from boxtree.distributed import dtype_to_mpi from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler from pytools import memoize_method -import loopy as lp -from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_1 # noqa: F401 +from pyopencl.tools import dtype_to_ctype +from pyopencl.elementwise import ElementwiseKernel +from mako.template import Template import logging logger = logging.getLogger(__name__) @@ -100,34 +101,33 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): @memoize_method def find_boxes_used_by_subrange_kernel(self): - knl = lp.make_kernel( - [ - "{[icontrib_box]: 0 <= icontrib_box < ncontrib_boxes}", - "{[iuser]: iuser_start <= iuser < iuser_end}", - ], - """ - for icontrib_box - <> ibox = contributing_boxes_list[icontrib_box] - <> iuser_start = box_to_user_starts[ibox] - <> iuser_end = box_to_user_starts[ibox + 1] - for iuser - <> useri = box_to_user_lists[iuser] - <> in_subrange = subrange_start <= useri and useri < subrange_end - if in_subrange - box_in_subrange[icontrib_box] = 1 - end - end - end - """, - [ - lp.ValueArg("subrange_start, subrange_end", np.int32), - lp.GlobalArg("box_to_user_lists", shape=None), - "..." - ]) - knl = lp.split_iname( - knl, "icontrib_box", 16, outer_tag="g.0", inner_tag="l.0" + return ElementwiseKernel( + self.queue.context, + Template(r""" + ${box_id_t} *contributing_boxes_list, + int subrange_start, + int subrange_end, + ${box_id_t} *box_to_user_starts, + int *box_to_user_lists, + char *box_in_subrange + """).render( + box_id_t=dtype_to_ctype(self.tree.box_id_dtype), + ), + Template(r""" + ${box_id_t} ibox = contributing_boxes_list[i]; + ${box_id_t} iuser_start = box_to_user_starts[ibox]; + ${box_id_t} iuser_end = box_to_user_starts[ibox + 1]; + for(${box_id_t} iuser = iuser_start; iuser < iuser_end; iuser++) { + int useri = box_to_user_lists[iuser]; + if(subrange_start <= useri && useri < subrange_end) { + box_in_subrange[i] = 1; + } + } + """).render( + box_id_t=dtype_to_ctype(self.tree.box_id_dtype) + ), + "find_boxes_used_by_subrange" ) - return knl def find_boxes_used_by_subrange(self, box_in_subrange, subrange, box_to_user_starts, box_to_user_lists, @@ -135,17 +135,14 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): knl = self.find_boxes_used_by_subrange_kernel() knl( - self.queue, - subrange_start=subrange[0], - subrange_end=subrange[1], - box_to_user_starts=box_to_user_starts, - box_to_user_lists=box_to_user_lists, - box_in_subrange=box_in_subrange, - contributing_boxes_list=contributing_boxes_list + contributing_boxes_list, + subrange[0], + subrange[1], + box_to_user_starts, + box_to_user_lists, + box_in_subrange ) - box_in_subrange.finish() - # }}} @@ -196,6 +193,14 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): stats["bytes_sent_by_stage"] = [] stats["bytes_recvd_by_stage"] = [] + box_to_user_starts_dev = cl.array.to_device( + wrangler.queue, trav.tree.box_to_user_starts + ) + + box_to_user_lists_dev = cl.array.to_device( + wrangler.queue, trav.tree.box_to_user_lists + ) + while not comm_pattern.done(): send_requests = [] @@ -204,7 +209,9 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # Compute the subset of boxes to be sent. message_subrange = comm_pattern.messages() - contributing_boxes_list = np.nonzero(contributing_boxes)[0] + contributing_boxes_list = np.nonzero(contributing_boxes)[0].astype( + trav.tree.box_id_dtype + ) contributing_boxes_list_dev = cl.array.to_device( wrangler.queue, contributing_boxes_list @@ -216,7 +223,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): wrangler.find_boxes_used_by_subrange( box_in_subrange, message_subrange, - trav.tree.box_to_user_starts, trav.tree.box_to_user_lists, + box_to_user_starts_dev, box_to_user_lists_dev, contributing_boxes_list_dev ) -- GitLab From e60636849a7586cd983103e37714f16e1265c2f5 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 4 May 2019 00:24:42 -0500 Subject: [PATCH 185/260] Try pure Python for find_boxes_used_by_subrange --- boxtree/distributed/calculation.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 208da25..c833bfe 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -193,6 +193,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): stats["bytes_sent_by_stage"] = [] stats["bytes_recvd_by_stage"] = [] + """ box_to_user_starts_dev = cl.array.to_device( wrangler.queue, trav.tree.box_to_user_starts ) @@ -200,6 +201,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): box_to_user_lists_dev = cl.array.to_device( wrangler.queue, trav.tree.box_to_user_lists ) + """ while not comm_pattern.done(): send_requests = [] @@ -213,6 +215,9 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): trav.tree.box_id_dtype ) + subrange_start, subrange_end = message_subrange + + """ contributing_boxes_list_dev = cl.array.to_device( wrangler.queue, contributing_boxes_list ) @@ -232,6 +237,21 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): relevant_boxes_list = contributing_boxes_list[ box_in_subrange_host ].astype(trav.tree.box_id_dtype) + """ + + relevant_boxes_list = [] + for contrib_box in contributing_boxes_list: + iuser_start, iuser_end = trav.tree.box_to_user_starts[ + contrib_box:contrib_box + 2 + ] + for user_box in trav.tree.box_to_user_lists[iuser_start:iuser_end]: + if subrange_start <= user_box < subrange_end: + relevant_boxes_list.append(contrib_box) + break + + relevant_boxes_list = np.array( + relevant_boxes_list, dtype=trav.tree.box_id_dtype + ) relevant_mpole_exps = wrangler.slice_mpoles( mpole_exps, relevant_boxes_list -- GitLab From 8bf7f9800d769f427a60689351ea2317361ac9e4 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 7 May 2019 22:18:18 -0500 Subject: [PATCH 186/260] Try PyOpenCL version again --- boxtree/distributed/calculation.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index c833bfe..57cc72b 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -193,7 +193,6 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): stats["bytes_sent_by_stage"] = [] stats["bytes_recvd_by_stage"] = [] - """ box_to_user_starts_dev = cl.array.to_device( wrangler.queue, trav.tree.box_to_user_starts ) @@ -201,7 +200,6 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): box_to_user_lists_dev = cl.array.to_device( wrangler.queue, trav.tree.box_to_user_lists ) - """ while not comm_pattern.done(): send_requests = [] @@ -217,7 +215,6 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): subrange_start, subrange_end = message_subrange - """ contributing_boxes_list_dev = cl.array.to_device( wrangler.queue, contributing_boxes_list ) @@ -237,8 +234,9 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): relevant_boxes_list = contributing_boxes_list[ box_in_subrange_host ].astype(trav.tree.box_id_dtype) - """ + """ + # Pure Python version for debugging purpose relevant_boxes_list = [] for contrib_box in contributing_boxes_list: iuser_start, iuser_end = trav.tree.box_to_user_starts[ @@ -248,6 +246,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): if subrange_start <= user_box < subrange_end: relevant_boxes_list.append(contrib_box) break + """ relevant_boxes_list = np.array( relevant_boxes_list, dtype=trav.tree.box_id_dtype -- GitLab From 0f15e92d81b72bdbf23351afa9cb0d22d556470c Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 19 Aug 2019 16:22:08 -0700 Subject: [PATCH 187/260] Use cost model with calibration parameters --- boxtree/distributed/__init__.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index a6739fc..de0b1be 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -57,7 +57,7 @@ class DistributedFMMInfo(object): def __init__(self, queue, global_trav_dev, distributed_expansion_wrangler_factory, - cost_model=None, comm=MPI.COMM_WORLD): + calibration_params=None, comm=MPI.COMM_WORLD): # TODO: Support box_target_counts_nonchild? @@ -98,16 +98,19 @@ class DistributedFMMInfo(object): if current_rank == 0: # Construct default cost model if not supplied - if cost_model is None: + cost_model = CLFMMCostModel(queue) + + if calibration_params is None: # TODO: should replace the calibration params with a reasonable # deafult one - cost_model = CLFMMCostModel( - queue, CLFMMCostModel.get_constantone_calibration_params() - ) + calibration_params = \ + CLFMMCostModel.get_constantone_calibration_params() boxes_time = cost_model.aggregate_stage_costs_per_box( global_trav_dev, - cost_model(global_trav_dev, self.global_wrangler.level_nterms) + cost_model( + global_trav_dev, self.global_wrangler.level_nterms, + calibration_params) ).get() from boxtree.distributed.partition import partition_work -- GitLab From 18970dcecb0ec82740fbdfbf3cb0706e44770391 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 9 Sep 2019 16:43:21 -0500 Subject: [PATCH 188/260] Add explanation for local_traversal --- boxtree/distributed/local_traversal.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py index 1cb396e..895efed 100644 --- a/boxtree/distributed/local_traversal.py +++ b/boxtree/distributed/local_traversal.py @@ -54,9 +54,13 @@ def generate_local_travs( particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), box_flag_t=box_flag_t ), - Template(""" + Template(r""" + // reset HAS_OWN_TARGETS and HAS_CHILD_TARGETS bits in the flag of each + // box box_flags[i] &= (~${HAS_OWN_TARGETS}); box_flags[i] &= (~${HAS_CHILD_TARGETS}); + + // rebuild HAS_OWN_TARGETS and HAS_CHILD_TARGETS bits if(box_target_counts_nonchild[i]) box_flags[i] |= ${HAS_OWN_TARGETS}; if(box_target_counts_nonchild[i] < box_target_counts_cumul[i]) box_flags[i] |= ${HAS_CHILD_TARGETS}; -- GitLab From 1e61d310ea7c2ffb51782368ac058eb987de0d07 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 22 Oct 2019 15:53:39 -0500 Subject: [PATCH 189/260] Use new cost model interface --- .test-conda-env-py3.yml | 1 + boxtree/distributed/__init__.py | 8 +++----- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.test-conda-env-py3.yml b/.test-conda-env-py3.yml index 6c6ec39..15037b0 100644 --- a/.test-conda-env-py3.yml +++ b/.test-conda-env-py3.yml @@ -12,6 +12,7 @@ dependencies: - pyopencl - islpy - pyfmmlib +- mpi4py # Only needed to make pylint succeed - matplotlib diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index de0b1be..bc722f7 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -106,11 +106,9 @@ class DistributedFMMInfo(object): calibration_params = \ CLFMMCostModel.get_constantone_calibration_params() - boxes_time = cost_model.aggregate_stage_costs_per_box( - global_trav_dev, - cost_model( - global_trav_dev, self.global_wrangler.level_nterms, - calibration_params) + boxes_time = cost_model( + global_trav_dev, self.global_wrangler.level_nterms, + calibration_params, per_box=True ).get() from boxtree.distributed.partition import partition_work -- GitLab From 1ae7540219d487bb7d5846a06b92c7b9e672bda0 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 12 Nov 2019 16:30:05 -0600 Subject: [PATCH 190/260] Allow MPI oversubscription in distributed test cases --- test/test_distributed.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index b6d7cf2..0dcdcd9 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -109,7 +109,7 @@ def test_against_shared(num_processes, dims, nsources, ntargets): import subprocess import sys subprocess.run([ - "mpiexec", "-np", str(num_processes), + "mpiexec", "-np", str(num_processes), "-oversubscribe", "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html sys.executable, "-m", "mpi4py.run", __file__], @@ -200,7 +200,7 @@ def test_constantone(num_processes, dims, nsources, ntargets): import subprocess import sys subprocess.run([ - "mpiexec", "-np", str(num_processes), + "mpiexec", "-np", str(num_processes), "-oversubscribe", "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html sys.executable, "-m", "mpi4py.run", __file__], -- GitLab From a604c1961e2a896ec34b68f713c34a4c4a5db9d4 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 12 Nov 2019 19:07:54 -0600 Subject: [PATCH 191/260] Revert "Allow MPI oversubscription in distributed test cases" This reverts commit 1ae7540219d487bb7d5846a06b92c7b9e672bda0. --- test/test_distributed.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index 0dcdcd9..b6d7cf2 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -109,7 +109,7 @@ def test_against_shared(num_processes, dims, nsources, ntargets): import subprocess import sys subprocess.run([ - "mpiexec", "-np", str(num_processes), "-oversubscribe", + "mpiexec", "-np", str(num_processes), "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html sys.executable, "-m", "mpi4py.run", __file__], @@ -200,7 +200,7 @@ def test_constantone(num_processes, dims, nsources, ntargets): import subprocess import sys subprocess.run([ - "mpiexec", "-np", str(num_processes), "-oversubscribe", + "mpiexec", "-np", str(num_processes), "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html sys.executable, "-m", "mpi4py.run", __file__], -- GitLab From 28d85cc441ad4cda2a1fd62850bb30aeb6f3433b Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 22 Dec 2019 21:56:50 -0800 Subject: [PATCH 192/260] Support MPICH for distributed test cases --- test/test_distributed.py | 57 ++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index b6d7cf2..13bf9f3 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -16,6 +16,41 @@ logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING")) logging.getLogger("boxtree.distributed").setLevel(logging.INFO) +def run_mpi(num_processes, env): + """Launch MPI processes. + + This function forks another process and uses mpiexec to launch *num_processes* + copies of program. + + :arg num_processes: the number of copies of program to launch + :arg env: a Python `dict` of environment variables + """ + import subprocess + import sys + from mpi4py import MPI + + # Using "-m mpi4py" is necessary for avoiding deadlocks on exception cleanup + # See https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html for details. + + mpi_library_name = MPI.Get_library_version() + if mpi_library_name.startswith("MPICH"): + subprocess.run( + ["mpiexec", "-np", str(num_processes), sys.executable, + "-m", "mpi4py", __file__], + env=env, check=True + ) + elif mpi_library_name.startswith("Open MPI"): + command = ["mpiexec", "-np", str(num_processes)] + for env_variable_name in env: + command.append("-x") + command.append(env_variable_name) + command.extend([sys.executable, "-m", "mpi4py", __file__]) + + subprocess.run(command, env=env, check=True) + else: + raise RuntimeError("Unrecognized MPI implementation") + + def _test_against_shared(dims, nsources, ntargets, dtype): from mpi4py import MPI @@ -106,16 +141,7 @@ def test_against_shared(num_processes, dims, nsources, ntargets): newenv["ntargets"] = str(ntargets) newenv["OMP_NUM_THREADS"] = "1" - import subprocess - import sys - subprocess.run([ - "mpiexec", "-np", str(num_processes), - "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", - # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html - sys.executable, "-m", "mpi4py.run", __file__], - env=newenv, - check=True - ) + run_mpi(num_processes, newenv) # {{{ Constantone expansion wrangler @@ -197,16 +223,7 @@ def test_constantone(num_processes, dims, nsources, ntargets): newenv["ntargets"] = str(ntargets) newenv["OMP_NUM_THREADS"] = "1" - import subprocess - import sys - subprocess.run([ - "mpiexec", "-np", str(num_processes), - "-x", "PYTEST", "-x", "dims", "-x", "nsources", "-x", "ntargets", - # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html - sys.executable, "-m", "mpi4py.run", __file__], - env=newenv, - check=True - ) + run_mpi(num_processes, newenv) if __name__ == "__main__": -- GitLab From daabab41ae884784b0e6581717224f7c1f8600cc Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 23 Dec 2019 11:52:29 -0800 Subject: [PATCH 193/260] Skip distributed test cases for Python<3.5 --- test/test_distributed.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index 13bf9f3..de521ea 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -7,6 +7,7 @@ from boxtree.tools import ConstantOneExpansionWrangler as \ import logging import os import pytest +import sys # Note: Do not import mpi4py.MPI object at the module level, because OpenMPI does not # support recursive invocations. @@ -26,7 +27,6 @@ def run_mpi(num_processes, env): :arg env: a Python `dict` of environment variables """ import subprocess - import sys from mpi4py import MPI # Using "-m mpi4py" is necessary for avoiding deadlocks on exception cleanup @@ -131,6 +131,8 @@ def _test_against_shared(dims, nsources, ntargets, dtype): @pytest.mark.parametrize("num_processes, dims, nsources, ntargets", [ (4, 3, 10000, 10000) ]) +@pytest.mark.skipif(sys.version_info < (3, 5), + reason="distributed implementation requires 3.5 or higher") def test_against_shared(num_processes, dims, nsources, ntargets): pytest.importorskip("mpi4py") @@ -213,6 +215,8 @@ def _test_constantone(dims, nsources, ntargets, dtype): @pytest.mark.parametrize("num_processes, dims, nsources, ntargets", [ (4, 3, 10000, 10000) ]) +@pytest.mark.skipif(sys.version_info < (3, 5), + reason="distributed implementation requires 3.5 or higher") def test_constantone(num_processes, dims, nsources, ntargets): pytest.importorskip("mpi4py") @@ -248,8 +252,6 @@ if __name__ == "__main__": _test_constantone(dims, nsources, ntargets, dtype) else: - import sys - if len(sys.argv) > 1: # You can test individual routines by typing -- GitLab From fec42719b909e19a75f39f400336e11570e28125 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 6 Jan 2020 10:23:56 -0800 Subject: [PATCH 194/260] Improve doc --- boxtree/distributed/__init__.py | 22 +++++++++++------ boxtree/distributed/calculation.py | 30 +++++++++++------------ boxtree/distributed/local_tree.py | 7 +++--- boxtree/distributed/partition.py | 39 +++++++++++++++--------------- 4 files changed, 52 insertions(+), 46 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index bc722f7..96c5b43 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -58,6 +58,20 @@ class DistributedFMMInfo(object): def __init__(self, queue, global_trav_dev, distributed_expansion_wrangler_factory, calibration_params=None, comm=MPI.COMM_WORLD): + """ + .. attribute:: global_wrangler + + An object implementing :class:`ExpansionWranglerInterface`. + *global_wrangler* contains reference to the global tree object and is + used for distributing and collecting density/potential between the root + and worker ranks. This attribute is only present on the root rank. + + .. attribute:: local_wrangler + + An object implementing :class:`ExpansionWranglerInterface`. + *local_wrangler* contains reference to the local tree object and is + used for local FMM operations. This attribute is present on all ranks. + """ # TODO: Support box_target_counts_nonchild? @@ -146,14 +160,6 @@ class DistributedFMMInfo(object): # {{{ Get local wrangler - """ - Note: The difference between "local wrangler" and "global wrangler" is that - they reference different tree object. "local wrangler" uses local tree - object on each worker process for FMM computation, whereas "global wrangler" - is only valid on root process used for assembling results from worker - processes. - """ - self.local_wrangler = self.distributed_expansion_wrangler_factory( self.local_tree) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 57cc72b..064fc9a 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -155,7 +155,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): decrease the bandwidth cost by sending only information that is relevant to the processes receiving the message. - This function needs to be called collectively by all processes in :arg comm. + This function needs to be called collectively by all processes in *comm*. .. [1] Lashuk, Ilya, Aparna Chandramowlishwaran, Harper Langston, Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, Lexing @@ -297,13 +297,13 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): """ This function transfers needed source_weights from root process to each - worker process in communicator :arg comm. + worker process in communicator *comm*. - This function needs to be called by all processes in the :arg comm communicator. + This function needs to be called collectively by all processes in *comm*. - :param source_weights: Source weights in tree order on root, None on worker + :arg source_weights: Source weights in tree order on root, None on worker processes. - :param local_data: Returned from *generate_local_tree*. None on worker processes. + :arg local_data: Returned from *generate_local_tree*. None on worker processes. :return Source weights needed for the current process. """ current_rank = comm.Get_rank() @@ -340,21 +340,21 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, _communicate_mpoles_via_allreduce=False): """ Calculate potentials for targets on distributed memory machines. - This function needs to be called collectively by all process in :arg comm. + This function needs to be called collectively by all ranks in *comm*. - :param local_wrangler: Expansion wranglers for each worker process for FMM. - :param global_wrangler: Expansion wrangler on root process for assembling partial - results from worker processes together. This argument differs from - :arg local_wrangler by referening the global tree instead of local trees. - This argument is None on worker processes. - :param local_trav: Local traversal object returned from generate_local_travs. - :param source_weights: Source weights for FMM. None on worker processes. - :param local_data: LocalData object returned from generate_local_tree. + :arg local_wrangler: Expansion wranglers for each worker rank. + :param global_wrangler: Expansion wrangler on the root rank for assembling + partial results from worker processes together. This argument differs from + *local_wrangler* by referening the global tree instead of local trees. + This argument is None on worker ranks. + :param local_trav: Local traversal object returned from *generate_local_travs*. + :param source_weights: Source weights for FMM. None on worker ranks. + :param local_data: LocalData object returned from *generate_local_tree*. :param comm: MPI communicator. :param _communicate_mpoles_via_allreduce: Use MPI allreduce for communicating multipole expressions. Using MPI allreduce is slower but might be helpful for debugging purpose. - :return: On the root process, this function returns calculated potentials. On + :return: On the root rank, this function returns calculated potentials. On worker processes, this function returns None. """ diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 19c4423..b904e93 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -55,8 +55,8 @@ def get_fetch_local_particles_knls(context, global_tree): This function compiles several PyOpenCL kernels helpful for fetching particles of local trees from global tree. - :param context: The context to compile against. - :param global_tree: The global tree from which local trees are generated. + :arg context: The context to compile against. + :arg global_tree: The global tree from which local trees are generated. :return: A FetchLocalParticlesKernels object. """ @@ -226,8 +226,7 @@ def fetch_local_particles(queue, global_tree, src_box_mask, tgt_box_mask, local_ box_source_counts_cumul, box_target_starts, box_target_counts_nonchild, box_target_counts_cumul. - These generated fields are stored directly into :arg:local_tree. - + These generated fields are stored directly into *local_tree*. """ global_tree_dev = global_tree.to_device(queue).with_queue(queue) nsources = global_tree.nsources diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 6413551..fc15a7e 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -30,14 +30,14 @@ from mako.template import Template def partition_work(boxes_time, traversal, total_rank): - """ This function assigns responsible boxes of each process. + """ This function assigns responsible boxes for each rank. Each process is responsible for calculating the multiple expansions as well as evaluating target potentials in *responsible_boxes*. :arg boxes_time: The expected running time of each box. :arg traversal: The traversal object built on root containing all particles. - :arg total_rank: The total number of processes. + :arg total_rank: The total number of ranks. :return: A numpy array of shape (total_rank,), where the ith element is an numpy array containing the responsible boxes of process i. """ @@ -201,11 +201,11 @@ class ResponsibleBoxesQuery(object): def ancestor_boxes_mask(self, responsible_boxes_mask): """ Query the ancestors of responsible boxes. - :param responsible_boxes_mask: A pyopencl.array.Array object of shape + :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. - :return: A pyopencl.array.Array object of shape (tree.nboxes,) whose ith - entry is 1 iff i is an ancestor of the responsible boxes specified by - responsible_boxes_mask. + :return: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) whose + ith entry is 1 iff i is an ancestor of the responsible boxes specified by + *responsible_boxes_mask*. """ ancestor_boxes = cl.array.zeros( self.queue, (self.tree.nboxes,), dtype=np.int8) @@ -224,16 +224,16 @@ class ResponsibleBoxesQuery(object): def src_boxes_mask(self, responsible_boxes_mask, ancestor_boxes_mask): """ Query the boxes whose sources are needed in order to evaluate potentials - of boxes represented by responsible_boxes_mask. + of boxes represented by *responsible_boxes_mask*. - :param responsible_boxes_mask: A pyopencl.array.Array object of shape + :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. - :param ancestor_boxes_mask: A pyopencl.array.Array object of shape + :param ancestor_boxes_mask: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) whose ith entry is 1 iff i is either a responsible box or an ancestor of the responsible boxes. - :return: A pyopencl.array.Array object of shape (tree.nboxes,) whose ith - entry is 1 iff souces of box i are needed for evaluating the potentials - of targets in boxes represented by responsible_boxes_mask. + :return: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) whose + ith entry is 1 iff souces of box i are needed for evaluating the + potentials of targets in boxes represented by *responsible_boxes_mask*. """ src_boxes_mask = responsible_boxes_mask.copy() @@ -279,16 +279,17 @@ class ResponsibleBoxesQuery(object): def multipole_boxes_mask(self, responsible_boxes_mask, ancestor_boxes_mask): """ Query the boxes whose multipoles are used in order to evaluate - potentials of targets in boxes represented by responsible_boxes_mask. + potentials of targets in boxes represented by *responsible_boxes_mask*. - :param responsible_boxes_mask: A pyopencl.array.Array object of shape + :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. - :param ancestor_boxes_mask: A pyopencl.array.Array object of shape + :arg ancestor_boxes_mask: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) whose ith entry is 1 iff i is either a responsible box or an ancestor of the responsible boxes. - :return: A pyopencl.array.Array object of shape (tree.nboxes,) whose ith - entry is 1 iff multipoles of box i are needed for evaluating the - potentials of targets in boxes represented by responsible_boxes_mask. + :return: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) + whose ith entry is 1 iff multipoles of box i are needed for evaluating + the potentials of targets in boxes represented by + *responsible_boxes_mask*. """ multipole_boxes_mask = cl.array.zeros(self.queue, (self.tree.nboxes,), @@ -335,7 +336,7 @@ class ResponsibleBoxesQuery(object): multipole_boxes_mask: Current process needs multipole expressions in these boxes. - :param responsible_boxes_list: A numpy array of responsible box indices. + :arg responsible_boxes_list: A numpy array of responsible box indices. :returns: responsible_box_mask, ancestor_boxes_mask, src_boxes_mask and multipole_boxes_mask, as described above. -- GitLab From e696e52b9c09cdc147d2b6ac98388edf6faf47d2 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sat, 11 Apr 2020 21:09:25 -0500 Subject: [PATCH 195/260] Use new cost model API --- boxtree/distributed/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 96c5b43..07e0f1f 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -25,7 +25,7 @@ THE SOFTWARE. from mpi4py import MPI import numpy as np -from boxtree.cost import CLFMMCostModel +from boxtree.cost import FMMCostModel MPITags = dict( DIST_TREE=0, @@ -112,17 +112,17 @@ class DistributedFMMInfo(object): if current_rank == 0: # Construct default cost model if not supplied - cost_model = CLFMMCostModel(queue) + cost_model = FMMCostModel(queue) if calibration_params is None: # TODO: should replace the calibration params with a reasonable # deafult one calibration_params = \ - CLFMMCostModel.get_constantone_calibration_params() + FMMCostModel.get_unit_calibration_params() - boxes_time = cost_model( + boxes_time = cost_model.cost_per_box( global_trav_dev, self.global_wrangler.level_nterms, - calibration_params, per_box=True + calibration_params ).get() from boxtree.distributed.partition import partition_work -- GitLab From c7d34db7d77bd9a0430553572e754ff64e130b71 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 12 Apr 2020 12:35:50 -0500 Subject: [PATCH 196/260] Allow oversubscription in OpenMPI --- test/test_distributed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_distributed.py b/test/test_distributed.py index de521ea..7ef7eb0 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -40,7 +40,7 @@ def run_mpi(num_processes, env): env=env, check=True ) elif mpi_library_name.startswith("Open MPI"): - command = ["mpiexec", "-np", str(num_processes)] + command = ["mpiexec", "-np", str(num_processes), "--oversubscribe"] for env_variable_name in env: command.append("-x") command.append(env_variable_name) -- GitLab From dde858ad04be171ba2ced7158ebe13be3a6c370f Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 4 May 2020 12:32:01 -0500 Subject: [PATCH 197/260] Move run_mpi to tools --- boxtree/tools.py | 40 ++++++++++++++++++++++++++++++++++++++++ test/test_distributed.py | 39 +++------------------------------------ 2 files changed, 43 insertions(+), 36 deletions(-) diff --git a/boxtree/tools.py b/boxtree/tools.py index cffdb53..96eb47d 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -36,6 +36,7 @@ import loopy as lp from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa from functools import partial +import sys # Use offsets in VectorArg by default. @@ -1013,4 +1014,43 @@ class ConstantOneExpansionWrangler(object): # }}} + +# {{{ MPI launcher + +def run_mpi(script, num_processes, env): + """Launch MPI processes. + + This function forks another process and uses mpiexec to launch *num_processes* + copies of program *script*. + + :arg script: the Python script to run + :arg num_processes: the number of copies of program to launch + :arg env: a Python `dict` of environment variables + """ + import subprocess + from mpi4py import MPI + + # Using "-m mpi4py" is necessary for avoiding deadlocks on exception cleanup + # See https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html for details. + + mpi_library_name = MPI.Get_library_version() + if mpi_library_name.startswith("MPICH"): + subprocess.run( + ["mpiexec", "-np", str(num_processes), sys.executable, + "-m", "mpi4py", script], + env=env, check=True + ) + elif mpi_library_name.startswith("Open MPI"): + command = ["mpiexec", "-np", str(num_processes), "--oversubscribe"] + for env_variable_name in env: + command.append("-x") + command.append(env_variable_name) + command.extend([sys.executable, "-m", "mpi4py", script]) + + subprocess.run(command, env=env, check=True) + else: + raise RuntimeError("Unrecognized MPI implementation") + +# }}} + # vim: foldmethod=marker:filetype=pyopencl diff --git a/test/test_distributed.py b/test/test_distributed.py index 7ef7eb0..443306f 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -4,6 +4,7 @@ import numpy.linalg as la from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler from boxtree.tools import ConstantOneExpansionWrangler as \ ConstantOneExpansionWranglerBase +from boxtree.tools import run_mpi import logging import os import pytest @@ -17,40 +18,6 @@ logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING")) logging.getLogger("boxtree.distributed").setLevel(logging.INFO) -def run_mpi(num_processes, env): - """Launch MPI processes. - - This function forks another process and uses mpiexec to launch *num_processes* - copies of program. - - :arg num_processes: the number of copies of program to launch - :arg env: a Python `dict` of environment variables - """ - import subprocess - from mpi4py import MPI - - # Using "-m mpi4py" is necessary for avoiding deadlocks on exception cleanup - # See https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html for details. - - mpi_library_name = MPI.Get_library_version() - if mpi_library_name.startswith("MPICH"): - subprocess.run( - ["mpiexec", "-np", str(num_processes), sys.executable, - "-m", "mpi4py", __file__], - env=env, check=True - ) - elif mpi_library_name.startswith("Open MPI"): - command = ["mpiexec", "-np", str(num_processes), "--oversubscribe"] - for env_variable_name in env: - command.append("-x") - command.append(env_variable_name) - command.extend([sys.executable, "-m", "mpi4py", __file__]) - - subprocess.run(command, env=env, check=True) - else: - raise RuntimeError("Unrecognized MPI implementation") - - def _test_against_shared(dims, nsources, ntargets, dtype): from mpi4py import MPI @@ -143,7 +110,7 @@ def test_against_shared(num_processes, dims, nsources, ntargets): newenv["ntargets"] = str(ntargets) newenv["OMP_NUM_THREADS"] = "1" - run_mpi(num_processes, newenv) + run_mpi(__file__, num_processes, newenv) # {{{ Constantone expansion wrangler @@ -227,7 +194,7 @@ def test_constantone(num_processes, dims, nsources, ntargets): newenv["ntargets"] = str(ntargets) newenv["OMP_NUM_THREADS"] = "1" - run_mpi(num_processes, newenv) + run_mpi(__file__, num_processes, newenv) if __name__ == "__main__": -- GitLab From 8ce6c9ee80f32bebc6aa6687e4c75297b02c32f5 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Fri, 8 May 2020 10:05:51 -0500 Subject: [PATCH 198/260] Broadcast the complete traversal object to all worker ranks --- boxtree/distributed/__init__.py | 111 +++++++-------- boxtree/distributed/calculation.py | 15 +- boxtree/distributed/local_tree.py | 213 ++++++----------------------- boxtree/distributed/partition.py | 6 +- 4 files changed, 106 insertions(+), 239 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 07e0f1f..b10d3cd 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -28,14 +28,10 @@ import numpy as np from boxtree.cost import FMMCostModel MPITags = dict( - DIST_TREE=0, - DIST_SOURCES=1, - DIST_TARGETS=2, - DIST_RADII=3, - DIST_WEIGHT=4, - GATHER_POTENTIALS=5, - REDUCE_POTENTIALS=6, - REDUCE_INDICES=7 + DIST_WEIGHT=1, + GATHER_POTENTIALS=2, + REDUCE_POTENTIALS=3, + REDUCE_INDICES=4 ) @@ -64,13 +60,13 @@ class DistributedFMMInfo(object): An object implementing :class:`ExpansionWranglerInterface`. *global_wrangler* contains reference to the global tree object and is used for distributing and collecting density/potential between the root - and worker ranks. This attribute is only present on the root rank. + and worker ranks. .. attribute:: local_wrangler An object implementing :class:`ExpansionWranglerInterface`. *local_wrangler* contains reference to the local tree object and is - used for local FMM operations. This attribute is present on all ranks. + used for local FMM operations. """ # TODO: Support box_target_counts_nonchild? @@ -78,83 +74,82 @@ class DistributedFMMInfo(object): self.comm = comm current_rank = comm.Get_rank() + # {{{ broadcast global traversal object + if current_rank == 0: self.global_trav = global_trav_dev.get(queue=queue) else: self.global_trav = None + self.global_trav = comm.bcast(self.global_trav, root=0) + + if current_rank != 0: + global_trav_dev = self.global_trav.to_device(queue) + global_trav_dev.tree = self.global_trav.tree.to_device(queue) + + # }}} + self.distributed_expansion_wrangler_factory = \ distributed_expansion_wrangler_factory # {{{ Get global wrangler - if current_rank == 0: - self.global_wrangler = distributed_expansion_wrangler_factory( - self.global_trav.tree - ) - else: - self.global_wrangler = None + self.global_wrangler = distributed_expansion_wrangler_factory( + self.global_trav.tree + ) # }}} - # {{{ Broadcast well_sep_is_n_away + # {{{ Partiton work - if current_rank == 0: - well_sep_is_n_away = self.global_trav.well_sep_is_n_away - else: - well_sep_is_n_away = None + # Construct default cost model if not supplied + cost_model = FMMCostModel(queue) - well_sep_is_n_away = comm.bcast(well_sep_is_n_away, root=0) + if calibration_params is None: + # TODO: should replace the calibration params with a reasonable + # deafult one + calibration_params = \ + FMMCostModel.get_unit_calibration_params() - # }}} + boxes_time = cost_model.cost_per_box( + global_trav_dev, self.global_wrangler.level_nterms, + calibration_params + ).get() - # {{{ Partiton work + from boxtree.distributed.partition import partition_work + responsible_boxes_list = partition_work( + boxes_time, self.global_trav, comm.Get_size() + ) - if current_rank == 0: - # Construct default cost model if not supplied - cost_model = FMMCostModel(queue) - - if calibration_params is None: - # TODO: should replace the calibration params with a reasonable - # deafult one - calibration_params = \ - FMMCostModel.get_unit_calibration_params() - - boxes_time = cost_model.cost_per_box( - global_trav_dev, self.global_wrangler.level_nterms, - calibration_params - ).get() - - from boxtree.distributed.partition import partition_work - responsible_boxes_list = partition_work( - boxes_time, self.global_trav, comm.Get_size() - ) - else: - responsible_boxes_list = None + # It is assumed that, even if each rank computes `responsible_boxes_list` + # independently, it should be the same across ranks, since ranks use the same + # calibration parameters. # }}} - # {{{ Compute and distribute local tree + # {{{ Compute local tree - if current_rank == 0: - from boxtree.distributed.partition import ResponsibleBoxesQuery - responsible_box_query = ResponsibleBoxesQuery(queue, self.global_trav) - else: - responsible_box_query = None + from boxtree.distributed.partition import ResponsibleBoxesQuery + responsible_box_query = ResponsibleBoxesQuery(queue, self.global_trav) from boxtree.distributed.local_tree import generate_local_tree - self.local_tree, self.local_data, self.box_bounding_box = \ - generate_local_tree(queue, self.global_trav, responsible_boxes_list, - responsible_box_query) + self.local_tree, self.src_idx, self.tgt_idx = generate_local_tree( + queue, self.global_trav, responsible_boxes_list, responsible_box_query + ) # }}} - # {{{ Compute traversal object on each process + # {{{ Compute traversal object on each rank from boxtree.distributed.local_traversal import generate_local_travs self.local_trav = generate_local_travs( - queue, self.local_tree, self.box_bounding_box, - well_sep_is_n_away=well_sep_is_n_away) + queue, self.local_tree, + box_bounding_box={ + "min": self.global_trav.box_target_bounding_box_min, + "max": self.global_trav.box_target_bounding_box_max + }, + well_sep_is_n_away=self.global_trav.well_sep_is_n_away + ) # }}} @@ -169,6 +164,6 @@ class DistributedFMMInfo(object): from boxtree.distributed.calculation import calculate_pot return calculate_pot( self.local_wrangler, self.global_wrangler, self.local_trav, - source_weights, self.local_data, + source_weights, self.src_idx, self.tgt_idx, _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce ) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 064fc9a..bb61a1b 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -295,7 +295,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # {{{ Distribute source weights -def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): +def distribute_source_weights(source_weights, src_idx, comm=MPI.COMM_WORLD): """ This function transfers needed source_weights from root process to each worker process in communicator *comm*. @@ -303,7 +303,7 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): :arg source_weights: Source weights in tree order on root, None on worker processes. - :arg local_data: Returned from *generate_local_tree*. None on worker processes. + :arg src_idx: Returned from *generate_local_tree*. None on worker processes. :return Source weights needed for the current process. """ current_rank = comm.Get_rank() @@ -314,7 +314,7 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): local_src_weights = np.empty((total_rank,), dtype=object) for irank in range(total_rank): - local_src_weights[irank] = source_weights[local_data[irank].src_idx] + local_src_weights[irank] = source_weights[src_idx[irank]] if irank != 0: weight_req.append( @@ -336,7 +336,7 @@ def distribute_source_weights(source_weights, local_data, comm=MPI.COMM_WORLD): # {{{ FMM driver for calculating potentials def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, - local_data, comm=MPI.COMM_WORLD, + src_idx, tgt_idx, comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): """ Calculate potentials for targets on distributed memory machines. @@ -369,7 +369,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, source_weights = source_weights[global_wrangler.tree.user_source_ids] local_src_weights = distribute_source_weights( - source_weights, local_data, comm=comm + source_weights, src_idx, comm=comm ) # }}} @@ -513,7 +513,8 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, for irank in range(1, total_rank): potentials_all_ranks[irank] = np.empty( - (local_data[irank].ntargets,), dtype=potentials.dtype) + tgt_idx[irank].shape, dtype=potentials.dtype + ) comm.Recv([potentials_all_ranks[irank], potentials_mpi_type], source=irank, tag=MPITags["GATHER_POTENTIALS"]) @@ -530,7 +531,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, dtype=potentials.dtype) for irank in range(total_rank): - potentials[local_data[irank].tgt_idx] = potentials_all_ranks[irank] + potentials[tgt_idx[irank]] = potentials_all_ranks[irank] logger.debug("reorder potentials") result = global_wrangler.reorder_potentials(potentials) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index b904e93..02171f6 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -31,7 +31,6 @@ from boxtree import Tree from mpi4py import MPI import time import numpy as np -from boxtree.distributed import MPITags import logging logger = logging.getLogger(__name__) @@ -205,17 +204,6 @@ def get_fetch_local_particles_knls(context, global_tree): ) -LocalData = namedtuple( - 'LocalData', - [ - 'nsources', - 'ntargets', - 'src_idx', - 'tgt_idx' - ] -) - - def fetch_local_particles(queue, global_tree, src_box_mask, tgt_box_mask, local_tree, knls): """ This helper function fetches particles needed for worker processes, and @@ -480,18 +468,7 @@ def fetch_local_particles(queue, global_tree, src_box_mask, tgt_box_mask, local_ # }}} - # {{{ Fetch fields to local_data - - local_data = LocalData( - nsources=local_nsources, - ntargets=local_ntargets, - src_idx=src_idx, - tgt_idx=tgt_idx - ) - - # }}} - - return local_tree, local_data + return local_tree, src_idx, tgt_idx class LocalTreeBuilder: @@ -517,7 +494,7 @@ class LocalTreeBuilder: local_tree.user_source_ids = None local_tree.sorted_target_ids = None - local_tree, local_data = fetch_local_particles( + local_tree, src_idx, tgt_idx = fetch_local_particles( self.queue, self.global_tree, src_boxes_mask, @@ -532,7 +509,7 @@ class LocalTreeBuilder: local_tree.__class__ = LocalTree - return local_tree, local_data + return local_tree, src_idx, tgt_idx class LocalTree(Tree): @@ -568,92 +545,46 @@ class LocalTree(Tree): def generate_local_tree(queue, traversal, responsible_boxes_list, - responsible_box_query, comm=MPI.COMM_WORLD, - no_targets=False): + responsible_box_query, comm=MPI.COMM_WORLD): # Get MPI information - current_rank = comm.Get_rank() - total_rank = comm.Get_size() - - if current_rank == 0: - start_time = time.time() - - if current_rank == 0: - local_data = np.empty((total_rank,), dtype=object) - else: - local_data = None - - if current_rank == 0: - tree = traversal.tree - - local_tree_builder = LocalTreeBuilder(tree, queue) - - box_mpole_is_used = cl.array.empty( - queue, (total_rank, tree.nboxes,), dtype=np.int8 - ) - - # request objects for non-blocking communication - tree_req = [] - particles_req = [] - - # buffer holding communication data so that it is not garbage collected - local_tree = np.empty((total_rank,), dtype=object) - local_targets = np.empty((total_rank,), dtype=object) - local_sources = np.empty((total_rank,), dtype=object) - local_target_radii = np.empty((total_rank,), dtype=object) - - for irank in range(total_rank): - - (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, - box_mpole_is_used[irank]) = \ - responsible_box_query.get_boxes_mask(responsible_boxes_list[irank]) - - local_tree[irank], local_data[irank] = \ - local_tree_builder.from_global_tree( - responsible_boxes_list[irank], responsible_boxes_mask, - src_boxes_mask, ancestor_boxes - ) + rank = comm.Get_rank() + size = comm.Get_size() - # master process does not need to communicate with itself - if irank == 0: - continue + start_time = time.time() - # {{{ Peel sources and targets off tree + tree = traversal.tree + local_tree_builder = LocalTreeBuilder(tree, queue) - local_targets[irank] = local_tree[irank].targets - local_tree[irank].targets = None + (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, box_mpole_is_used) = \ + responsible_box_query.get_boxes_mask(responsible_boxes_list[rank]) - local_sources[irank] = local_tree[irank].sources - local_tree[irank].sources = None - - if tree.targets_have_extent: - local_target_radii[irank] = local_tree[irank].target_radii - local_tree[irank].target_radii = None - - # }}} + local_tree, src_idx, tgt_idx = local_tree_builder.from_global_tree( + responsible_boxes_list[rank], responsible_boxes_mask, src_boxes_mask, + ancestor_boxes + ) - # Send the local tree skeleton without sources and targets - tree_req.append(comm.isend( - local_tree[irank], dest=irank, tag=MPITags["DIST_TREE"])) + # {{{ compute the users of multipole expansions of each box on root rank - # Send the sources and targets - particles_req.append(comm.Isend( - local_sources[irank], dest=irank, tag=MPITags["DIST_SOURCES"])) + box_mpole_is_used_all_ranks = None + if rank == 0: + box_mpole_is_used_all_ranks = np.empty( + (size, tree.nboxes), dtype=box_mpole_is_used.dtype + ) + comm.Gather(box_mpole_is_used.get(), box_mpole_is_used_all_ranks, root=0) - if not no_targets: - particles_req.append(comm.Isend( - local_targets[irank], dest=irank, tag=MPITags["DIST_TARGETS"])) + box_to_user_starts = None + box_to_user_lists = None - if tree.targets_have_extent: - particles_req.append(comm.Isend( - local_target_radii[irank], dest=irank, - tag=MPITags["DIST_RADII"]) - ) + if rank == 0: + box_mpole_is_used_all_ranks = cl.array.to_device( + queue, box_mpole_is_used_all_ranks + ) from boxtree.tools import MaskCompressorKernel matcompr = MaskCompressorKernel(queue.context) (box_to_user_starts, box_to_user_lists, evt) = \ - matcompr(queue, box_mpole_is_used.transpose(), + matcompr(queue, box_mpole_is_used_all_ranks.transpose(), list_dtype=np.int32) cl.wait_for_events([evt]) @@ -664,83 +595,23 @@ def generate_local_tree(queue, traversal, responsible_boxes_list, logger.debug("computing box_to_user: done") - # Receive the local tree from root - if current_rank == 0: - MPI.Request.Waitall(tree_req) - local_tree = local_tree[0] - else: - local_tree = comm.recv(source=0, tag=MPITags["DIST_TREE"]) - - # Receive sources and targets - if current_rank == 0: - MPI.Request.Waitall(particles_req) - else: - reqs = [] - - local_tree.sources = np.empty( - (local_tree.dimensions, local_tree.nsources), - dtype=local_tree.coord_dtype - ) - reqs.append(comm.Irecv( - local_tree.sources, source=0, tag=MPITags["DIST_SOURCES"])) - - if no_targets: - local_tree.targets = None - if local_tree.targets_have_extent: - local_tree.target_radii = None - else: - local_tree.targets = np.empty( - (local_tree.dimensions, local_tree.ntargets), - dtype=local_tree.coord_dtype - ) - - reqs.append(comm.Irecv( - local_tree.targets, source=0, tag=MPITags["DIST_TARGETS"])) - - if local_tree.targets_have_extent: - local_tree.target_radii = np.empty( - (local_tree.ntargets,), - dtype=local_tree.coord_dtype - ) - - reqs.append(comm.Irecv( - local_tree.target_radii, source=0, tag=MPITags["DIST_RADII"])) - - MPI.Request.Waitall(reqs) - - # Receive box extent - if current_rank == 0: - box_target_bounding_box_min = traversal.box_target_bounding_box_min - box_target_bounding_box_max = traversal.box_target_bounding_box_max - else: - box_target_bounding_box_min = np.empty( - (local_tree.dimensions, local_tree.aligned_nboxes), - dtype=local_tree.coord_dtype - ) - box_target_bounding_box_max = np.empty( - (local_tree.dimensions, local_tree.aligned_nboxes), - dtype=local_tree.coord_dtype - ) - comm.Bcast(box_target_bounding_box_min, root=0) - comm.Bcast(box_target_bounding_box_max, root=0) - box_bounding_box = { - "min": box_target_bounding_box_min, - "max": box_target_bounding_box_max - } - - if current_rank != 0: - box_to_user_starts = None - box_to_user_lists = None - box_to_user_starts = comm.bcast(box_to_user_starts, root=0) box_to_user_lists = comm.bcast(box_to_user_lists, root=0) local_tree.box_to_user_starts = box_to_user_starts local_tree.box_to_user_lists = box_to_user_lists - if current_rank == 0: - logger.info("Distribute local tree in {} sec.".format( - str(time.time() - start_time)) - ) + # }}} + + # {{ Gather source indices and target indices of each rank + + src_idx_all_ranks = comm.gather(src_idx, root=0) + tgt_idx_all_ranks = comm.gather(tgt_idx, root=0) + + # }}} + + logger.info("Generate local tree on rank {} in {} sec.".format( + rank, str(time.time() - start_time) + )) - return local_tree, local_data, box_bounding_box + return local_tree, src_idx_all_ranks, tgt_idx_all_ranks diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index fc15a7e..bdf052f 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -147,8 +147,6 @@ class ResponsibleBoxesQuery(object): self.from_sep_bigger_lists_dev = cl.array.to_device( queue, traversal.from_sep_bigger_lists) - # }}} - if self.tree.targets_have_extent: # list 3 close if traversal.from_sep_close_smaller_starts is not None: @@ -164,6 +162,8 @@ class ResponsibleBoxesQuery(object): self.from_sep_close_bigger_lists_dev = cl.array.to_device( queue, traversal.from_sep_close_bigger_lists) + # }}} + # helper kernel for ancestor box query self.mark_parent_knl = cl.elementwise.ElementwiseKernel( queue.context, @@ -323,7 +323,7 @@ class ResponsibleBoxesQuery(object): def get_boxes_mask(self, responsible_boxes_list): """ Given a list of responsible boxes for a process, calculates the following - three masks: + four masks: responsible_box_mask: Current process will evaluate target potentials and multipole expansions in these boxes. Sources and targets in these boxes -- GitLab From 4eaf9f28c1eb68db09c8102264d5515d7db696fc Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 10 May 2020 23:59:22 -0500 Subject: [PATCH 199/260] Broadcast tree instead of traversal --- boxtree/distributed/__init__.py | 30 +++++++++++--------------- boxtree/distributed/local_traversal.py | 12 ++--------- test/test_distributed.py | 22 +++++++++---------- 3 files changed, 25 insertions(+), 39 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index b10d3cd..48c4e88 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -51,7 +51,8 @@ def dtype_to_mpi(dtype): class DistributedFMMInfo(object): - def __init__(self, queue, global_trav_dev, + def __init__(self, queue, global_tree_dev, + traversal_builder, distributed_expansion_wrangler_factory, calibration_params=None, comm=MPI.COMM_WORLD): """ @@ -69,23 +70,19 @@ class DistributedFMMInfo(object): used for local FMM operations. """ - # TODO: Support box_target_counts_nonchild? - self.comm = comm current_rank = comm.Get_rank() - # {{{ broadcast global traversal object + # {{{ Broadcast global tree + global_tree = None if current_rank == 0: - self.global_trav = global_trav_dev.get(queue=queue) - else: - self.global_trav = None - - self.global_trav = comm.bcast(self.global_trav, root=0) + global_tree = global_tree_dev.get(queue) + global_tree = comm.bcast(global_tree, root=0) + global_tree_dev = global_tree.to_device(queue).with_queue(queue) - if current_rank != 0: - global_trav_dev = self.global_trav.to_device(queue) - global_trav_dev.tree = self.global_trav.tree.to_device(queue) + global_trav_dev, _ = traversal_builder(queue, global_tree_dev) + self.global_trav = global_trav_dev.get(queue) # }}} @@ -94,9 +91,7 @@ class DistributedFMMInfo(object): # {{{ Get global wrangler - self.global_wrangler = distributed_expansion_wrangler_factory( - self.global_trav.tree - ) + self.global_wrangler = distributed_expansion_wrangler_factory(global_tree) # }}} @@ -143,12 +138,11 @@ class DistributedFMMInfo(object): from boxtree.distributed.local_traversal import generate_local_travs self.local_trav = generate_local_travs( - queue, self.local_tree, + queue, self.local_tree, traversal_builder, box_bounding_box={ "min": self.global_trav.box_target_bounding_box_min, "max": self.global_trav.box_target_bounding_box_max - }, - well_sep_is_n_away=self.global_trav.well_sep_is_n_away + } ) # }}} diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py index 895efed..773f286 100644 --- a/boxtree/distributed/local_traversal.py +++ b/boxtree/distributed/local_traversal.py @@ -33,8 +33,7 @@ logger = logging.getLogger(__name__) def generate_local_travs( - queue, local_tree, box_bounding_box=None, - well_sep_is_n_away=1, from_sep_smaller_crit=None, + queue, local_tree, traversal_builder, box_bounding_box=None, merge_close_lists=False): start_time = time.time() @@ -117,14 +116,7 @@ def generate_local_travs( modify_own_sources_knl(d_tree.responsible_boxes_list, local_box_flags) modify_child_sources_knl(d_tree.ancestor_mask, local_box_flags) - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder( - queue.context, - well_sep_is_n_away=well_sep_is_n_away, - from_sep_smaller_crit=from_sep_smaller_crit - ) - - d_local_trav, _ = tg( + d_local_trav, _ = traversal_builder( queue, d_tree, debug=True, box_bounding_box=box_bounding_box, local_box_flags=local_box_flags diff --git a/test/test_distributed.py b/test/test_distributed.py index 443306f..6a7aca8 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -26,7 +26,7 @@ def _test_against_shared(dims, nsources, ntargets, dtype): rank = comm.Get_rank() # Initialize arguments for worker processes - d_trav = None + tree = None sources_weights = None helmholtz_k = 0 @@ -37,6 +37,9 @@ def _test_against_shared(dims, nsources, ntargets, dtype): def fmm_level_to_nterms(tree, level): return max(level, 3) + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(ctx, well_sep_is_n_away=2) + # Generate particles and run shared-memory parallelism on rank 0 if rank == 0: @@ -60,8 +63,6 @@ def _test_against_shared(dims, nsources, ntargets, dtype): tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, stick_out_factor=0.25, max_particles_in_box=30, debug=True) - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(ctx, well_sep_is_n_away=2) d_trav, _ = tg(queue, tree, debug=True) trav = d_trav.get(queue=queue) @@ -84,7 +85,8 @@ def _test_against_shared(dims, nsources, ntargets, dtype): from boxtree.distributed import DistributedFMMInfo distribued_fmm_info = DistributedFMMInfo( - queue, d_trav, distributed_expansion_wrangler_factory, comm=comm) + queue, tree, tg, distributed_expansion_wrangler_factory, comm=comm + ) pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) if rank == 0: @@ -132,7 +134,7 @@ def _test_constantone(dims, nsources, ntargets, dtype): rank = comm.Get_rank() # Initialization - d_trav = None + tree = None sources_weights = None # Configure PyOpenCL @@ -140,6 +142,9 @@ def _test_constantone(dims, nsources, ntargets, dtype): ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) + from boxtree.traversal import FMMTraversalBuilder + tg = FMMTraversalBuilder(ctx) + if rank == 0: # Generate random particles @@ -157,17 +162,12 @@ def _test_constantone(dims, nsources, ntargets, dtype): tree, _ = tb(queue, sources, targets=targets, max_particles_in_box=30, debug=True) - # Build global interaction lists - from boxtree.traversal import FMMTraversalBuilder - tg = FMMTraversalBuilder(ctx) - d_trav, _ = tg(queue, tree, debug=True) - def constantone_expansion_wrangler_factory(tree): return ConstantOneExpansionWrangler(tree) from boxtree.distributed import DistributedFMMInfo distributed_fmm_info = DistributedFMMInfo( - queue, d_trav, constantone_expansion_wrangler_factory, comm=MPI.COMM_WORLD + queue, tree, tg, constantone_expansion_wrangler_factory, comm=MPI.COMM_WORLD ) pot_dfmm = distributed_fmm_info.drive_dfmm( -- GitLab From 9937ac572b9a57491a5453e60975d913595f9e71 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 11 May 2020 17:41:21 -0500 Subject: [PATCH 200/260] Change generate_local_tree interface for accepting a tree instead of a traversal --- boxtree/distributed/__init__.py | 2 +- boxtree/distributed/calculation.py | 3 ++- boxtree/distributed/local_tree.py | 3 +-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 48c4e88..9c5d5e8 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -129,7 +129,7 @@ class DistributedFMMInfo(object): from boxtree.distributed.local_tree import generate_local_tree self.local_tree, self.src_idx, self.tgt_idx = generate_local_tree( - queue, self.global_trav, responsible_boxes_list, responsible_box_query + queue, global_tree, responsible_boxes_list, responsible_box_query ) # }}} diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index bb61a1b..a85f5d9 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -349,7 +349,8 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, This argument is None on worker ranks. :param local_trav: Local traversal object returned from *generate_local_travs*. :param source_weights: Source weights for FMM. None on worker ranks. - :param local_data: LocalData object returned from *generate_local_tree*. + :param src_idx: returned from *generate_local_tree*. + :param tgt_idx: returned from *generate_local_tree*. :param comm: MPI communicator. :param _communicate_mpoles_via_allreduce: Use MPI allreduce for communicating multipole expressions. Using MPI allreduce is slower but might be helpful for diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 02171f6..b0b145a 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -544,7 +544,7 @@ class LocalTree(Tree): return self._dimensions -def generate_local_tree(queue, traversal, responsible_boxes_list, +def generate_local_tree(queue, tree, responsible_boxes_list, responsible_box_query, comm=MPI.COMM_WORLD): # Get MPI information @@ -553,7 +553,6 @@ def generate_local_tree(queue, traversal, responsible_boxes_list, start_time = time.time() - tree = traversal.tree local_tree_builder = LocalTreeBuilder(tree, queue) (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, box_mpole_is_used) = \ -- GitLab From dd7905a641207e36f8b1674bf3a17f62597f1433 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 12 May 2020 00:35:53 -0500 Subject: [PATCH 201/260] Gather source and target indices outside generate_local_tree --- boxtree/distributed/__init__.py | 9 ++++++++- boxtree/distributed/calculation.py | 14 ++++++++------ boxtree/distributed/local_tree.py | 9 +-------- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 9c5d5e8..0154343 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -134,6 +134,13 @@ class DistributedFMMInfo(object): # }}} + # {{ Gather source indices and target indices of each rank + + self.src_idx_all_ranks = comm.gather(self.src_idx, root=0) + self.tgt_idx_all_ranks = comm.gather(self.tgt_idx, root=0) + + # }}} + # {{{ Compute traversal object on each rank from boxtree.distributed.local_traversal import generate_local_travs @@ -158,6 +165,6 @@ class DistributedFMMInfo(object): from boxtree.distributed.calculation import calculate_pot return calculate_pot( self.local_wrangler, self.global_wrangler, self.local_trav, - source_weights, self.src_idx, self.tgt_idx, + source_weights, self.src_idx_all_ranks, self.tgt_idx_all_ranks, _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce ) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index a85f5d9..df97ec2 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -336,7 +336,7 @@ def distribute_source_weights(source_weights, src_idx, comm=MPI.COMM_WORLD): # {{{ FMM driver for calculating potentials def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, - src_idx, tgt_idx, comm=MPI.COMM_WORLD, + src_idx_all_ranks, tgt_idx_all_ranks, comm=MPI.COMM_WORLD, _communicate_mpoles_via_allreduce=False): """ Calculate potentials for targets on distributed memory machines. @@ -349,8 +349,10 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, This argument is None on worker ranks. :param local_trav: Local traversal object returned from *generate_local_travs*. :param source_weights: Source weights for FMM. None on worker ranks. - :param src_idx: returned from *generate_local_tree*. - :param tgt_idx: returned from *generate_local_tree*. + :param src_idx_all_ranks: gathered from the return value of + *generate_local_tree*. Only significant on root rank. + :param tgt_idx_all_ranks: gathered from the return value of + *generate_local_tree*. Only significant on root rank. :param comm: MPI communicator. :param _communicate_mpoles_via_allreduce: Use MPI allreduce for communicating multipole expressions. Using MPI allreduce is slower but might be helpful for @@ -370,7 +372,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, source_weights = source_weights[global_wrangler.tree.user_source_ids] local_src_weights = distribute_source_weights( - source_weights, src_idx, comm=comm + source_weights, src_idx_all_ranks, comm=comm ) # }}} @@ -514,7 +516,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, for irank in range(1, total_rank): potentials_all_ranks[irank] = np.empty( - tgt_idx[irank].shape, dtype=potentials.dtype + tgt_idx_all_ranks[irank].shape, dtype=potentials.dtype ) comm.Recv([potentials_all_ranks[irank], potentials_mpi_type], @@ -532,7 +534,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, dtype=potentials.dtype) for irank in range(total_rank): - potentials[tgt_idx[irank]] = potentials_all_ranks[irank] + potentials[tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] logger.debug("reorder potentials") result = global_wrangler.reorder_potentials(potentials) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index b0b145a..54ea206 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -602,15 +602,8 @@ def generate_local_tree(queue, tree, responsible_boxes_list, # }}} - # {{ Gather source indices and target indices of each rank - - src_idx_all_ranks = comm.gather(src_idx, root=0) - tgt_idx_all_ranks = comm.gather(tgt_idx, root=0) - - # }}} - logger.info("Generate local tree on rank {} in {} sec.".format( rank, str(time.time() - start_time) )) - return local_tree, src_idx_all_ranks, tgt_idx_all_ranks + return local_tree, src_idx, tgt_idx -- GitLab From e976146e54f493d61d4d19c3d7278f61650098e1 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 28 May 2020 11:19:54 -0500 Subject: [PATCH 202/260] Add a base class DistributedExpansionWrangler --- boxtree/distributed/calculation.py | 97 +++++++++++++++--------------- boxtree/traversal.py | 14 +++++ test/test_distributed.py | 4 +- 3 files changed, 65 insertions(+), 50 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index df97ec2..e00209e 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -40,14 +40,42 @@ logger = logging.getLogger(__name__) # {{{ Distributed FMM wrangler -class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): - - def __init__(self, queue, tree, helmholtz_k, fmm_level_to_nterms=None): - super(DistributedFMMLibExpansionWrangler, self).__init__( - tree, helmholtz_k, fmm_level_to_nterms - ) - - self.queue = queue +class DistributedExpansionWrangler: + def distribute_source_weights( + self, source_weights, src_idx_all_ranks, comm=MPI.COMM_WORLD): + """ This method transfers needed source_weights from root process to each + worker process in communicator *comm*. + + This method needs to be called collectively by all processes in *comm*. + + :arg source_weights: Source weights in tree order on root, None on worker + processes. + :arg src_idx_all_ranks: Returned from *generate_local_tree*. None on worker + processes. + :return Source weights needed for the current process. + """ + mpi_rank = comm.Get_rank() + mpi_size = comm.Get_size() + + if mpi_rank == 0: + distribute_weight_req = [] + local_src_weights = np.empty((mpi_size,), dtype=object) + + for irank in range(mpi_size): + local_src_weights[irank] = source_weights[src_idx_all_ranks[irank]] + + if irank != 0: + distribute_weight_req.append(comm.isend( + local_src_weights[irank], dest=irank, + tag=MPITags["DIST_WEIGHT"] + )) + + MPI.Request.Waitall(distribute_weight_req) + local_src_weights = local_src_weights[0] + else: + local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) + + return local_src_weights def slice_mpoles(self, mpoles, slice_indices): if len(slice_indices) == 0: @@ -143,6 +171,17 @@ class DistributedFMMLibExpansionWrangler(FMMLibExpansionWrangler): box_in_subrange ) + +class DistributedFMMLibExpansionWrangler( + FMMLibExpansionWrangler, DistributedExpansionWrangler): + + def __init__(self, queue, tree, helmholtz_k, fmm_level_to_nterms=None): + super(DistributedFMMLibExpansionWrangler, self).__init__( + tree, helmholtz_k, fmm_level_to_nterms + ) + + self.queue = queue + # }}} @@ -293,46 +332,6 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): # }}} -# {{{ Distribute source weights - -def distribute_source_weights(source_weights, src_idx, comm=MPI.COMM_WORLD): - """ This function transfers needed source_weights from root process to each - worker process in communicator *comm*. - - This function needs to be called collectively by all processes in *comm*. - - :arg source_weights: Source weights in tree order on root, None on worker - processes. - :arg src_idx: Returned from *generate_local_tree*. None on worker processes. - :return Source weights needed for the current process. - """ - current_rank = comm.Get_rank() - total_rank = comm.Get_size() - - if current_rank == 0: - weight_req = [] - local_src_weights = np.empty((total_rank,), dtype=object) - - for irank in range(total_rank): - local_src_weights[irank] = source_weights[src_idx[irank]] - - if irank != 0: - weight_req.append( - comm.isend(local_src_weights[irank], dest=irank, - tag=MPITags["DIST_WEIGHT"]) - ) - - MPI.Request.Waitall(weight_req) - - local_src_weights = local_src_weights[0] - else: - local_src_weights = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) - - return local_src_weights - -# }}} - - # {{{ FMM driver for calculating potentials def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, @@ -371,7 +370,7 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # Convert src_weights to tree order source_weights = source_weights[global_wrangler.tree.user_source_ids] - local_src_weights = distribute_source_weights( + local_src_weights = local_wrangler.distribute_source_weights( source_weights, src_idx_all_ranks, comm=comm ) diff --git a/boxtree/traversal.py b/boxtree/traversal.py index 85036d0..8d6ea48 100644 --- a/boxtree/traversal.py +++ b/boxtree/traversal.py @@ -1698,6 +1698,20 @@ class FMMTraversalInfo(DeviceDataRecord): def ntarget_or_target_parent_boxes(self): return len(self.target_or_target_parent_boxes) + def to_device(self, queue, exclude_fields=frozenset()): + exclude_fields = set(exclude_fields) + exclude_fields.add("level_start_source_box_nrs") + exclude_fields.add("level_start_target_box_nrs") + exclude_fields.add("level_start_target_or_target_parent_box_nrs") + exclude_fields.add("level_start_source_parent_box_nrs") + exclude_fields.add("tree") + + self.tree = self.tree.to_device(queue) + + return super(FMMTraversalInfo, self).to_device( + queue, frozenset(exclude_fields) + ) + # }}} diff --git a/test/test_distributed.py b/test/test_distributed.py index 6a7aca8..e6fb5aa 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -5,6 +5,7 @@ from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler from boxtree.tools import ConstantOneExpansionWrangler as \ ConstantOneExpansionWranglerBase from boxtree.tools import run_mpi +from boxtree.distributed.calculation import DistributedExpansionWrangler import logging import os import pytest @@ -117,7 +118,8 @@ def test_against_shared(num_processes, dims, nsources, ntargets): # {{{ Constantone expansion wrangler -class ConstantOneExpansionWrangler(ConstantOneExpansionWranglerBase): +class ConstantOneExpansionWrangler( + ConstantOneExpansionWranglerBase, DistributedExpansionWrangler): def __init__(self, tree): super(ConstantOneExpansionWrangler, self).__init__(tree) -- GitLab From 6135bba91ce93c78746312490a20b04f2f8cfdf2 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 28 May 2020 12:54:41 -0500 Subject: [PATCH 203/260] Fix pylint --- boxtree/distributed/calculation.py | 15 +++++++++++---- test/test_distributed.py | 18 +++++++----------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index e00209e..c1c595c 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -41,6 +41,14 @@ logger = logging.getLogger(__name__) # {{{ Distributed FMM wrangler class DistributedExpansionWrangler: + def __init__(self, queue, tree): + self.queue = queue + self.tree = tree + + def multipole_expansions_view(self, mpole_exps, level): + # should be implemented in subclasses + pass + def distribute_source_weights( self, source_weights, src_idx_all_ranks, comm=MPI.COMM_WORLD): """ This method transfers needed source_weights from root process to each @@ -176,12 +184,11 @@ class DistributedFMMLibExpansionWrangler( FMMLibExpansionWrangler, DistributedExpansionWrangler): def __init__(self, queue, tree, helmholtz_k, fmm_level_to_nterms=None): - super(DistributedFMMLibExpansionWrangler, self).__init__( - tree, helmholtz_k, fmm_level_to_nterms + DistributedExpansionWrangler.__init__(self, queue, tree) + FMMLibExpansionWrangler.__init__( + self, tree, helmholtz_k, fmm_level_to_nterms ) - self.queue = queue - # }}} diff --git a/test/test_distributed.py b/test/test_distributed.py index e6fb5aa..881c71b 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -5,7 +5,6 @@ from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler from boxtree.tools import ConstantOneExpansionWrangler as \ ConstantOneExpansionWranglerBase from boxtree.tools import run_mpi -from boxtree.distributed.calculation import DistributedExpansionWrangler import logging import os import pytest @@ -116,19 +115,16 @@ def test_against_shared(num_processes, dims, nsources, ntargets): run_mpi(__file__, num_processes, newenv) -# {{{ Constantone expansion wrangler - -class ConstantOneExpansionWrangler( - ConstantOneExpansionWranglerBase, DistributedExpansionWrangler): - - def __init__(self, tree): - super(ConstantOneExpansionWrangler, self).__init__(tree) - self.level_nterms = np.ones(tree.nlevels, dtype=np.int32) +def _test_constantone(dims, nsources, ntargets, dtype): + from boxtree.distributed.calculation import DistributedExpansionWrangler -# }}} + class ConstantOneExpansionWrangler( + ConstantOneExpansionWranglerBase, DistributedExpansionWrangler): + def __init__(self, tree): + super(ConstantOneExpansionWrangler, self).__init__(tree) + self.level_nterms = np.ones(tree.nlevels, dtype=np.int32) -def _test_constantone(dims, nsources, ntargets, dtype): from mpi4py import MPI # Get the current rank -- GitLab From b396b1fd20ea5bc082703103458c4d0bfb1ddc47 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 28 May 2020 13:12:27 -0500 Subject: [PATCH 204/260] More pylint fix --- boxtree/distributed/calculation.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index c1c595c..f38ef7c 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -46,8 +46,10 @@ class DistributedExpansionWrangler: self.tree = tree def multipole_expansions_view(self, mpole_exps, level): - # should be implemented in subclasses - pass + # should be overwritten in subclasses + level_start_box_idx = -1 + mpoles_current_level = np.array(0) + return level_start_box_idx, mpoles_current_level def distribute_source_weights( self, source_weights, src_idx_all_ranks, comm=MPI.COMM_WORLD): -- GitLab From 744361e4dc3a2b9583c28b796372cc6ba7667836 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 9 Sep 2020 23:17:08 -0700 Subject: [PATCH 205/260] Add more documentation --- boxtree/distributed/__init__.py | 71 +++++++++++++++++++----------- boxtree/distributed/calculation.py | 71 ++++++++++++++++++------------ doc/distributed.rst | 21 +++++++++ doc/index.rst | 1 + test/test_distributed.py | 8 ++-- 5 files changed, 115 insertions(+), 57 deletions(-) create mode 100644 doc/distributed.rst diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 88d67a3..6f53ad1 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -27,6 +27,8 @@ from mpi4py import MPI import numpy as np from boxtree.cost import FMMCostModel +__all__ = ['DistributedFMMRunner'] + MPITags = dict( DIST_WEIGHT=1, GATHER_POTENTIALS=2, @@ -36,8 +38,8 @@ MPITags = dict( def dtype_to_mpi(dtype): - """ This function translates a numpy.dtype object into the corresponding type - used in mpi4py. + """ This function translates a numpy datatype into the corresponding type used in + mpi4py. """ if hasattr(MPI, '_typedict'): mpi_type = MPI._typedict[np.dtype(dtype).char] @@ -49,34 +51,52 @@ def dtype_to_mpi(dtype): return mpi_type -class DistributedFMMInfo(object): +class DistributedFMMRunner(object): + """ + .. attribute:: global_wrangler + + An object implementing :class:`boxtree.fmm.ExpansionWranglerInterface`. + *global_wrangler* contains reference to the global tree object on host memory + and is used for distributing and collecting density/potential between the + root and worker ranks. + + .. attribute:: local_wrangler + + An object implementing :class:`boxtree.fmm.ExpansionWranglerInterface`. + *local_wrangler* contains reference to the local tree object on host memory + and is used for local FMM operations. + """ def __init__(self, queue, global_tree_dev, traversal_builder, distributed_expansion_wrangler_factory, calibration_params=None, comm=MPI.COMM_WORLD): - """ - .. attribute:: global_wrangler - - An object implementing :class:`ExpansionWranglerInterface`. - *global_wrangler* contains reference to the global tree object and is - used for distributing and collecting density/potential between the root - and worker ranks. - - .. attribute:: local_wrangler - - An object implementing :class:`ExpansionWranglerInterface`. - *local_wrangler* contains reference to the local tree object and is - used for local FMM operations. + """Constructor of the ``DistributedFMMRunner`` class. + + This constructor distributes the global tree from the root rank to each + worker rank. + + :arg global_tree_dev: a :class:`boxtree.Tree` object in device memory. + :arg traversal_builder: an object which, when called, takes a + :class:`pyopencl.CommandQueue` object and a :class:`boxtree.Tree` object, + and generates a :class:`boxtree.traversal.FMMTraversalInfo` object from + the tree using the command queue. + :arg distributed_expansion_wrangler_factory: an object which, when called, + takes a :class:`boxtree.Tree` object and returns an object implementing + :class:`boxtree.fmm.ExpansionWranglerInterface`. + :arg calibration_params: Calibration parameters for the cost model, + if supplied. The cost model is used for estimating the execution time of + each box, which is used for improving load balancing. + :arg comm: MPI communicator. """ self.comm = comm - current_rank = comm.Get_rank() + mpi_rank = comm.Get_rank() # {{{ Broadcast global tree global_tree = None - if current_rank == 0: + if mpi_rank == 0: global_tree = global_tree_dev.get(queue) global_tree = comm.bcast(global_tree, root=0) global_tree_dev = global_tree.to_device(queue).with_queue(queue) @@ -97,28 +117,27 @@ class DistributedFMMInfo(object): # {{{ Partiton work - # Construct default cost model if not supplied cost_model = FMMCostModel() if calibration_params is None: - # TODO: should replace the calibration params with a reasonable - # deafult one + # Use default calibration parameters if not supplied + # TODO: should replace the default calibration params with a more + # accurate one calibration_params = \ FMMCostModel.get_unit_calibration_params() - boxes_time = cost_model.cost_per_box( + cost_per_box = cost_model.cost_per_box( queue, global_trav_dev, self.global_wrangler.level_nterms, calibration_params ).get() from boxtree.distributed.partition import partition_work responsible_boxes_list = partition_work( - boxes_time, self.global_trav, comm.Get_size() + cost_per_box, self.global_trav, comm.Get_size() ) # It is assumed that, even if each rank computes `responsible_boxes_list` - # independently, it should be the same across ranks, since ranks use the same - # calibration parameters. + # independently, it should be the same across ranks. # }}} @@ -162,6 +181,8 @@ class DistributedFMMInfo(object): # }}} def drive_dfmm(self, source_weights, _communicate_mpoles_via_allreduce=False): + """Calculate potentials at target points. + """ from boxtree.distributed.calculation import calculate_pot return calculate_pot( self.local_wrangler, self.global_wrangler, self.local_trav, diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index f38ef7c..71568a8 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -53,16 +53,20 @@ class DistributedExpansionWrangler: def distribute_source_weights( self, source_weights, src_idx_all_ranks, comm=MPI.COMM_WORLD): - """ This method transfers needed source_weights from root process to each - worker process in communicator *comm*. - - This method needs to be called collectively by all processes in *comm*. - - :arg source_weights: Source weights in tree order on root, None on worker - processes. - :arg src_idx_all_ranks: Returned from *generate_local_tree*. None on worker - processes. - :return Source weights needed for the current process. + """This method transfers needed source_weights from root rank to each worker + rank in communicator *comm*. + + This method needs to be called collectively by all ranks in communicator + *comm*. + + :arg source_weights: a :class:`numpy.ndarray` with shape ``(nsources,)`` + representing the weights of sources on the root rank. ``None`` on worker + ranks. + :arg src_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the + ith entry is a :class:`numpy.ndarray` indexed into *source_weights* to be + sent from the root rank to rank *i*. Each entry can be generated by + *generate_local_tree*. ``None`` on worker ranks. + :return: The received source weights of the current rank. """ mpi_rank = comm.Get_rank() mpi_size = comm.Get_size() @@ -134,9 +138,6 @@ class DistributedExpansionWrangler: mpole_updates_start = mpole_updates_end - def empty_box_in_subrange_mask(self): - return cl.array.empty(self.queue, self.tree.nboxes, dtype=np.int8) - @memoize_method def find_boxes_used_by_subrange_kernel(self): return ElementwiseKernel( @@ -167,9 +168,28 @@ class DistributedExpansionWrangler: "find_boxes_used_by_subrange" ) - def find_boxes_used_by_subrange(self, box_in_subrange, subrange, - box_to_user_starts, box_to_user_lists, - contributing_boxes_list): + def find_boxes_used_by_subrange( + self, subrange, box_to_user_starts, box_to_user_lists, + contributing_boxes_list): + """Test whether the multipole expansions of the contributing boxes are used + by at least one box in a range. + + :arg subrange: the range is represented by ``[subrange[0], subrange[1])``. + :arg box_to_user_start: a :class:`pyopencl.array.Array` object indicating the + start and end index in *box_to_user_lists* for each box in + *contributing_boxes_list*. + :arg box_to_user_lists: a :class:`pyopencl.array.Array` object storing the + users of each box in *contributing_boxes_list*. + :returns: a :class:`pyopencl.array.Array` object with the same shape as + *contributing_boxes_list*, where the *i*th entry is 1 if + ``contributing_boxes_list[i]`` is used by at least on box in the + subrange specified. + """ + box_in_subrange = cl.array.zeros( + contributing_boxes_list.queue, + contributing_boxes_list.shape[0], + dtype=np.int8 + ) knl = self.find_boxes_used_by_subrange_kernel() knl( @@ -181,10 +201,11 @@ class DistributedExpansionWrangler: box_in_subrange ) + return box_in_subrange + class DistributedFMMLibExpansionWrangler( FMMLibExpansionWrangler, DistributedExpansionWrangler): - def __init__(self, queue, tree, helmholtz_k, fmm_level_to_nterms=None): DistributedExpansionWrangler.__init__(self, queue, tree) FMMLibExpansionWrangler.__init__( @@ -211,8 +232,8 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): multipole method on heterogeneous architectures." Communications of the ACM 55, no. 5 (2012): 101-109. """ - rank = comm.Get_rank() - nprocs = comm.Get_size() + mpi_rank = comm.Get_rank() + mpi_size = comm.Get_size() stats = {} @@ -232,7 +253,7 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): contributing_boxes[trav.tree.responsible_boxes_list] = 1 from boxtree.tools import AllReduceCommPattern - comm_pattern = AllReduceCommPattern(rank, nprocs) + comm_pattern = AllReduceCommPattern(mpi_rank, mpi_size) # Temporary buffers for receiving data mpole_exps_buf = np.empty(mpole_exps.shape, dtype=mpole_exps.dtype) @@ -261,18 +282,12 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): trav.tree.box_id_dtype ) - subrange_start, subrange_end = message_subrange - contributing_boxes_list_dev = cl.array.to_device( wrangler.queue, contributing_boxes_list ) - box_in_subrange = cl.array.zeros( - wrangler.queue, contributing_boxes_list.shape[0], dtype=np.int8 - ) - - wrangler.find_boxes_used_by_subrange( - box_in_subrange, message_subrange, + box_in_subrange = wrangler.find_boxes_used_by_subrange( + message_subrange, box_to_user_starts_dev, box_to_user_lists_dev, contributing_boxes_list_dev ) diff --git a/doc/distributed.rst b/doc/distributed.rst new file mode 100644 index 0000000..b85811f --- /dev/null +++ b/doc/distributed.rst @@ -0,0 +1,21 @@ +Distributed Computation +======================= + +High-level interface +-------------------- + +To perform stardard point-FMM, first construct a +:class:`boxtree.distributed.DistributedFMMRunner` object. The constructor will +distribute the necessary information from the root rank to all worker ranks. Then, +the :meth:`boxtree.distributed.DistributedFMMRunner.drive_dfmm` can be used for +launching FMM. + +.. autoclass:: boxtree.distributed.DistributedFMMRunner + + .. automethod:: drive_dfmm + +FMM Computation +--------------- + +.. autoclass:: boxtree.distributed.calculation.DistributedExpansionWrangler + :members: diff --git a/doc/index.rst b/doc/index.rst index edff8a3..33868c7 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -41,6 +41,7 @@ Overview fmm lookup cost + distributed misc Indices and tables diff --git a/test/test_distributed.py b/test/test_distributed.py index 881c71b..14c6e4d 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -83,8 +83,8 @@ def _test_against_shared(dims, nsources, ntargets, dtype): return DistributedFMMLibExpansionWrangler( queue, tree, helmholtz_k, fmm_level_to_nterms=fmm_level_to_nterms) - from boxtree.distributed import DistributedFMMInfo - distribued_fmm_info = DistributedFMMInfo( + from boxtree.distributed import DistributedFMMRunner + distribued_fmm_info = DistributedFMMRunner( queue, tree, tg, distributed_expansion_wrangler_factory, comm=comm ) pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) @@ -163,8 +163,8 @@ def _test_constantone(dims, nsources, ntargets, dtype): def constantone_expansion_wrangler_factory(tree): return ConstantOneExpansionWrangler(tree) - from boxtree.distributed import DistributedFMMInfo - distributed_fmm_info = DistributedFMMInfo( + from boxtree.distributed import DistributedFMMRunner + distributed_fmm_info = DistributedFMMRunner( queue, tree, tg, constantone_expansion_wrangler_factory, comm=MPI.COMM_WORLD ) -- GitLab From b1241171ebc9ca88172bee95c580f7889b84a520 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 20 Sep 2020 21:12:58 -0700 Subject: [PATCH 206/260] Log timing for each stage in distributed FMM eval --- boxtree/distributed/__init__.py | 7 +- boxtree/distributed/calculation.py | 151 +++++++++++++++++------------ test/test_distributed.py | 18 +++- 3 files changed, 110 insertions(+), 66 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 6f53ad1..92a4c70 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -180,12 +180,15 @@ class DistributedFMMRunner(object): # }}} - def drive_dfmm(self, source_weights, _communicate_mpoles_via_allreduce=False): + def drive_dfmm( + self, source_weights, _communicate_mpoles_via_allreduce=False, + timing_data=None): """Calculate potentials at target points. """ from boxtree.distributed.calculation import calculate_pot return calculate_pot( self.local_wrangler, self.global_wrangler, self.local_trav, source_weights, self.src_idx_all_ranks, self.tgt_idx_all_ranks, - _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce + _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce, + timing_data=timing_data ) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 71568a8..d291c33 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -28,6 +28,7 @@ import pyopencl as cl from boxtree.distributed import MPITags from mpi4py import MPI from boxtree.distributed import dtype_to_mpi +from boxtree.fmm import TimingRecorder from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler from pytools import memoize_method from pyopencl.tools import dtype_to_ctype @@ -360,37 +361,47 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, src_idx_all_ranks, tgt_idx_all_ranks, comm=MPI.COMM_WORLD, - _communicate_mpoles_via_allreduce=False): + _communicate_mpoles_via_allreduce=False, + timing_data=None): """ Calculate potentials for targets on distributed memory machines. This function needs to be called collectively by all ranks in *comm*. - :arg local_wrangler: Expansion wranglers for each worker rank. - :param global_wrangler: Expansion wrangler on the root rank for assembling - partial results from worker processes together. This argument differs from - *local_wrangler* by referening the global tree instead of local trees. - This argument is None on worker ranks. - :param local_trav: Local traversal object returned from *generate_local_travs*. - :param source_weights: Source weights for FMM. None on worker ranks. - :param src_idx_all_ranks: gathered from the return value of - *generate_local_tree*. Only significant on root rank. - :param tgt_idx_all_ranks: gathered from the return value of - *generate_local_tree*. Only significant on root rank. - :param comm: MPI communicator. - :param _communicate_mpoles_via_allreduce: Use MPI allreduce for communicating - multipole expressions. Using MPI allreduce is slower but might be helpful for - debugging purpose. + :arg local_wrangler: expansion wrangler referencing the local tree. This argument + is significant on all ranks. + :arg global_wrangler: expansion wrangler referencing the global tree. This + argument is used on the root rank for assembling partial results from + worker ranks together. This argument is significant only on the root rank. + :arg local_trav: local traversal object returned from *generate_local_travs*. + :arg source_weights: source weights for FMM. This argument is significant only + on the root rank. + :arg src_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the ith + entry is a :class:`numpy.ndarray` representing the source indices of the + local tree on rank *i*. Each entry can be returned from + *generate_local_tree*. This argument is significant only on root rank. + :arg tgt_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the ith + entry is a :class:`numpy.ndarray` representing the target indices of the + local tree on rank *i*. Each entry can be returned from + *generate_local_tree*. This argument is significant only on root rank. + :arg comm: MPI communicator. + :arg _communicate_mpoles_via_allreduce: whether to use MPI allreduce for + communicating multipole expressions. Using MPI allreduce is slower but might + be helpful for debugging purpose. + :arg timing_data: Either *None*, or a :class:`dict` that is populated with + timing information for the stages of the algorithm (in the form of + :class:`TimingResult`) for the local rank, if such information is available. :return: On the root rank, this function returns calculated potentials. On worker processes, this function returns None. """ + recorder = TimingRecorder() # Get MPI information - current_rank = comm.Get_rank() - total_rank = comm.Get_size() + mpi_rank = comm.Get_rank() + mpi_size = comm.Get_size() # {{{ Distribute source weights - if current_rank == 0: + if mpi_rank == 0: # Convert src_weights to tree order source_weights = source_weights[global_wrangler.tree.user_source_ids] @@ -402,22 +413,24 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # {{{ "Step 2.1:" Construct local multipoles - logger.debug("construct local multipoles") - mpole_exps = local_wrangler.form_multipoles( + mpole_exps, timing_future = local_wrangler.form_multipoles( local_trav.level_start_source_box_nrs, local_trav.source_boxes, - local_src_weights)[0] + local_src_weights) + + recorder.add("form_multipoles", timing_future) # }}} # {{{ "Step 2.2:" Propagate multipoles upward - logger.debug("propagate multipoles upward") - local_wrangler.coarsen_multipoles( + mpole_exps, timing_future = local_wrangler.coarsen_multipoles( local_trav.level_start_source_parent_box_nrs, local_trav.source_parent_boxes, mpole_exps) + recorder.add("coarsen_multipoles", timing_future) + # mpole_exps is called Phi in [1] # }}} @@ -435,12 +448,13 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") - logger.debug("direct evaluation from neighbor source boxes ('list 1')") - potentials = local_wrangler.eval_direct( + potentials, timing_future = local_wrangler.eval_direct( local_trav.target_boxes, local_trav.neighbor_source_boxes_starts, local_trav.neighbor_source_boxes_lists, - local_src_weights)[0] + local_src_weights) + + recorder.add("eval_direct", timing_future) # these potentials are called alpha in [1] @@ -448,13 +462,14 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local - logger.debug("translate separated siblings' ('list 2') mpoles to local") - local_exps = local_wrangler.multipole_to_local( + local_exps, timing_future = local_wrangler.multipole_to_local( local_trav.level_start_target_or_target_parent_box_nrs, local_trav.target_or_target_parent_boxes, local_trav.from_sep_siblings_starts, local_trav.from_sep_siblings_lists, - mpole_exps)[0] + mpole_exps) + + recorder.add("multipole_to_local", timing_future) # local_exps represents both Gamma and Delta in [1] @@ -462,70 +477,80 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # {{{ "Stage 5:" evaluate sep. smaller mpoles ("list 3") at particles - logger.debug("evaluate sep. smaller mpoles at particles ('list 3 far')") - # (the point of aiming this stage at particles is specifically to keep its # contribution *out* of the downward-propagating local expansions) - potentials = potentials + local_wrangler.eval_multipoles( + mpole_result, timing_future = local_wrangler.eval_multipoles( local_trav.target_boxes_sep_smaller_by_source_level, local_trav.from_sep_smaller_by_level, - mpole_exps)[0] + mpole_exps) + + recorder.add("eval_multipoles", timing_future) + + potentials = potentials + mpole_result # these potentials are called beta in [1] if local_trav.from_sep_close_smaller_starts is not None: - logger.debug("evaluate separated close smaller interactions directly " - "('list 3 close')") - potentials = potentials + local_wrangler.eval_direct( + direct_result, timing_future = local_wrangler.eval_direct( local_trav.target_boxes, local_trav.from_sep_close_smaller_starts, local_trav.from_sep_close_smaller_lists, - local_src_weights)[0] + local_src_weights) + + recorder.add("eval_direct", timing_future) + + potentials = potentials + direct_result # }}} # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") - logger.debug("form locals for separated bigger source boxes ('list 4 far')") - - local_exps = local_exps + local_wrangler.form_locals( + local_result, timing_future = local_wrangler.form_locals( local_trav.level_start_target_or_target_parent_box_nrs, local_trav.target_or_target_parent_boxes, local_trav.from_sep_bigger_starts, local_trav.from_sep_bigger_lists, - local_src_weights)[0] + local_src_weights) - if local_trav.from_sep_close_bigger_starts is not None: - logger.debug("evaluate separated close bigger interactions directly " - "('list 4 close')") + recorder.add("form_locals", timing_future) + + local_exps = local_exps + local_result - potentials = potentials + local_wrangler.eval_direct( + if local_trav.from_sep_close_bigger_starts is not None: + direct_result, timing_future = local_wrangler.eval_direct( local_trav.target_boxes, local_trav.from_sep_close_bigger_starts, local_trav.from_sep_close_bigger_lists, - local_src_weights)[0] + local_src_weights) + + recorder.add("eval_direct", timing_future) + + potentials = potentials + direct_result # }}} # {{{ "Stage 7:" propagate local_exps downward - logger.debug("propagate local_exps downward") - - local_wrangler.refine_locals( + local_exps, timing_future = local_wrangler.refine_locals( local_trav.level_start_target_or_target_parent_box_nrs, local_trav.target_or_target_parent_boxes, local_exps) + recorder.add("refine_locals", timing_future) + # }}} # {{{ "Stage 8:" evaluate locals - logger.debug("evaluate locals") - potentials = potentials + local_wrangler.eval_locals( + local_result, timing_future = local_wrangler.eval_locals( local_trav.level_start_target_box_nrs, local_trav.target_boxes, - local_exps)[0] + local_exps) + + recorder.add("eval_locals", timing_future) + + potentials = potentials + local_result # }}} @@ -533,11 +558,11 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, potentials_mpi_type = dtype_to_mpi(potentials.dtype) - if current_rank == 0: - potentials_all_ranks = np.empty((total_rank,), dtype=object) + if mpi_rank == 0: + potentials_all_ranks = np.empty((mpi_size,), dtype=object) potentials_all_ranks[0] = potentials - for irank in range(1, total_rank): + for irank in range(1, mpi_size): potentials_all_ranks[irank] = np.empty( tgt_idx_all_ranks[irank].shape, dtype=potentials.dtype ) @@ -552,22 +577,22 @@ def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, # {{{ Assemble potentials from worker processes together on the root process - if current_rank == 0: - potentials = np.empty((global_wrangler.tree.ntargets,), - dtype=potentials.dtype) + if mpi_rank == 0: + potentials = np.empty(global_wrangler.tree.ntargets, dtype=potentials.dtype) - for irank in range(total_rank): + for irank in range(mpi_size): potentials[tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] - logger.debug("reorder potentials") result = global_wrangler.reorder_potentials(potentials) - logger.debug("finalize potentials") result = global_wrangler.finalize_potentials(result) # }}} - if current_rank == 0: + if timing_data is not None: + timing_data.update(recorder.summarize()) + + if mpi_rank == 0: return result # }}} diff --git a/test/test_distributed.py b/test/test_distributed.py index 14c6e4d..c6e202e 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -87,7 +87,23 @@ def _test_against_shared(dims, nsources, ntargets, dtype): distribued_fmm_info = DistributedFMMRunner( queue, tree, tg, distributed_expansion_wrangler_factory, comm=comm ) - pot_dfmm = distribued_fmm_info.drive_dfmm(sources_weights) + + timing_data = {} + pot_dfmm = distribued_fmm_info.drive_dfmm( + sources_weights, timing_data=timing_data + ) + assert timing_data + + # Uncomment the following section to print the time taken of each stage + """ + if rank == 1: + from pytools import Table + table = Table() + table.add_row(["stage", "time (s)"]) + for stage in timing_data: + table.add_row([stage, "%.2f" % timing_data[stage]["wall_elapsed"]]) + print(table) + """ if rank == 0: error = (la.norm(pot_fmm - pot_dfmm * 2 * np.pi, ord=np.inf) -- GitLab From 8e5a172341846085462eabce98d05f90ff4a8c4f Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 24 Sep 2020 07:19:22 -0700 Subject: [PATCH 207/260] Refactor local tree generation --- boxtree/distributed/local_traversal.py | 6 +- boxtree/distributed/local_tree.py | 101 ++++++++++--------------- setup.py | 7 +- 3 files changed, 44 insertions(+), 70 deletions(-) diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py index 773f286..63dbc87 100644 --- a/boxtree/distributed/local_traversal.py +++ b/boxtree/distributed/local_traversal.py @@ -78,7 +78,9 @@ def generate_local_travs( d_tree.box_flags) # Generate local source flags - local_box_flags = d_tree.box_flags & 250 + local_box_flags = d_tree.box_flags & (255 - box_flags_enum.HAS_OWN_SOURCES) + local_box_flags = local_box_flags & (255 - box_flags_enum.HAS_CHILD_SOURCES) + modify_own_sources_knl = cl.elementwise.ElementwiseKernel( queue.context, Template(r""" @@ -117,7 +119,7 @@ def generate_local_travs( modify_child_sources_knl(d_tree.ancestor_mask, local_box_flags) d_local_trav, _ = traversal_builder( - queue, d_tree, debug=True, + queue, d_tree, box_bounding_box=box_bounding_box, local_box_flags=local_box_flags ) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 54ea206..5927652 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -50,15 +50,13 @@ FetchLocalParticlesKernels = namedtuple( def get_fetch_local_particles_knls(context, global_tree): - """ - This function compiles several PyOpenCL kernels helpful for fetching particles of + """Compiles PyOpenCL kernels helpful for constructing particles of local trees from global tree. :arg context: The context to compile against. :arg global_tree: The global tree from which local trees are generated. :return: A FetchLocalParticlesKernels object. """ - particle_mask_knl = cl.elementwise.ElementwiseKernel( context, arguments=Template(""" @@ -204,10 +202,10 @@ def get_fetch_local_particles_knls(context, global_tree): ) -def fetch_local_particles(queue, global_tree, src_box_mask, tgt_box_mask, local_tree, - knls): - """ This helper function fetches particles needed for worker processes, and - reconstruct list of lists indexing. +def fetch_local_particles( + queue, global_tree, src_box_mask, tgt_box_mask, local_tree): + """This helper function generates particles of the local tree, and reconstruct + list of lists indexing accordingly. Specifically, this function generates the following fields for the local tree: sources, targets, target_radii, box_source_starts, box_source_counts_nonchild, @@ -216,6 +214,7 @@ def fetch_local_particles(queue, global_tree, src_box_mask, tgt_box_mask, local_ These generated fields are stored directly into *local_tree*. """ + knls = get_fetch_local_particles_knls(queue.context, global_tree) global_tree_dev = global_tree.to_device(queue).with_queue(queue) nsources = global_tree.nsources @@ -471,47 +470,6 @@ def fetch_local_particles(queue, global_tree, src_box_mask, tgt_box_mask, local_ return local_tree, src_idx, tgt_idx -class LocalTreeBuilder: - - def __init__(self, global_tree, queue): - self.global_tree = global_tree - self.knls = get_fetch_local_particles_knls(queue.context, global_tree) - self.queue = queue - - def from_global_tree(self, responsible_boxes_list, responsible_boxes_mask, - src_boxes_mask, ancestor_mask): - - local_tree = self.global_tree.copy( - responsible_boxes_list=responsible_boxes_list, - ancestor_mask=ancestor_mask.get(), - box_to_user_starts=None, - box_to_user_lists=None, - _dimensions=None, - _ntargets=None, - _nsources=None, - ) - - local_tree.user_source_ids = None - local_tree.sorted_target_ids = None - - local_tree, src_idx, tgt_idx = fetch_local_particles( - self.queue, - self.global_tree, - src_boxes_mask, - responsible_boxes_mask, - local_tree, - self.knls - ) - - local_tree._dimensions = local_tree.dimensions - local_tree._ntargets = local_tree.targets[0].shape[0] - local_tree._nsources = local_tree.sources[0].shape[0] - - local_tree.__class__ = LocalTree - - return local_tree, src_idx, tgt_idx - - class LocalTree(Tree): """ .. attribute:: box_to_user_starts @@ -544,38 +502,57 @@ class LocalTree(Tree): return self._dimensions -def generate_local_tree(queue, tree, responsible_boxes_list, +def generate_local_tree(queue, global_tree, responsible_boxes_list, responsible_box_query, comm=MPI.COMM_WORLD): - # Get MPI information - rank = comm.Get_rank() - size = comm.Get_size() + mpi_rank = comm.Get_rank() + mpi_size = comm.Get_size() start_time = time.time() - local_tree_builder = LocalTreeBuilder(tree, queue) - (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, box_mpole_is_used) = \ - responsible_box_query.get_boxes_mask(responsible_boxes_list[rank]) + responsible_box_query.get_boxes_mask(responsible_boxes_list[mpi_rank]) + + local_tree = global_tree.copy( + responsible_boxes_list=responsible_boxes_list[mpi_rank], + ancestor_mask=ancestor_boxes.get(), + box_to_user_starts=None, + box_to_user_lists=None, + _dimensions=None, + _ntargets=None, + _nsources=None, + ) + + local_tree.user_source_ids = None + local_tree.sorted_target_ids = None - local_tree, src_idx, tgt_idx = local_tree_builder.from_global_tree( - responsible_boxes_list[rank], responsible_boxes_mask, src_boxes_mask, - ancestor_boxes + local_tree, src_idx, tgt_idx = fetch_local_particles( + queue, + global_tree, + src_boxes_mask, + responsible_boxes_mask, + local_tree, ) + local_tree._dimensions = local_tree.dimensions + local_tree._ntargets = local_tree.targets[0].shape[0] + local_tree._nsources = local_tree.sources[0].shape[0] + + local_tree.__class__ = LocalTree + # {{{ compute the users of multipole expansions of each box on root rank box_mpole_is_used_all_ranks = None - if rank == 0: + if mpi_rank == 0: box_mpole_is_used_all_ranks = np.empty( - (size, tree.nboxes), dtype=box_mpole_is_used.dtype + (mpi_size, global_tree.nboxes), dtype=box_mpole_is_used.dtype ) comm.Gather(box_mpole_is_used.get(), box_mpole_is_used_all_ranks, root=0) box_to_user_starts = None box_to_user_lists = None - if rank == 0: + if mpi_rank == 0: box_mpole_is_used_all_ranks = cl.array.to_device( queue, box_mpole_is_used_all_ranks ) @@ -603,7 +580,7 @@ def generate_local_tree(queue, tree, responsible_boxes_list, # }}} logger.info("Generate local tree on rank {} in {} sec.".format( - rank, str(time.time() - start_time) + mpi_rank, str(time.time() - start_time) )) return local_tree, src_idx, tgt_idx diff --git a/setup.py b/setup.py index 7106fda..633ba01 100644 --- a/setup.py +++ b/setup.py @@ -45,12 +45,7 @@ def main(): "pytest>=2.3", "cgen>=2013.1.2", "six", - ], - package_data={ - "boxtree": [ - "distributed/default_perf_model.json" - ] - }) + ]) if __name__ == '__main__': -- GitLab From 740ac1c6162e6b83b847b581633ad9b4c26e62c9 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 24 Sep 2020 23:51:14 -0700 Subject: [PATCH 208/260] Placate flake8 --- boxtree/distributed/__init__.py | 6 +++--- boxtree/distributed/local_tree.py | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 92a4c70..a4d9843 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -27,7 +27,7 @@ from mpi4py import MPI import numpy as np from boxtree.cost import FMMCostModel -__all__ = ['DistributedFMMRunner'] +__all__ = ["DistributedFMMRunner"] MPITags = dict( DIST_WEIGHT=1, @@ -41,9 +41,9 @@ def dtype_to_mpi(dtype): """ This function translates a numpy datatype into the corresponding type used in mpi4py. """ - if hasattr(MPI, '_typedict'): + if hasattr(MPI, "_typedict"): mpi_type = MPI._typedict[np.dtype(dtype).char] - elif hasattr(MPI, '__TypeDict__'): + elif hasattr(MPI, "__TypeDict__"): mpi_type = MPI.__TypeDict__[np.dtype(dtype).char] else: raise RuntimeError("There is no dictionary to translate from Numpy dtype to " diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 5927652..ca99baf 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -36,15 +36,15 @@ import logging logger = logging.getLogger(__name__) FetchLocalParticlesKernels = namedtuple( - 'FetchLocalParticlesKernels', + "FetchLocalParticlesKernels", [ - 'particle_mask_knl', - 'mask_scan_knl', - 'fetch_local_src_knl', - 'fetch_local_tgt_knl', - 'generate_box_particle_starts', - 'generate_box_particle_counts_nonchild', - 'generate_box_particle_counts_cumul' + "particle_mask_knl", + "mask_scan_knl", + "fetch_local_src_knl", + "fetch_local_tgt_knl", + "generate_box_particle_starts", + "generate_box_particle_counts_nonchild", + "generate_box_particle_counts_cumul" ] ) -- GitLab From 5e4571818ee31603539a0f3a17df14d0ae796fbf Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 7 Oct 2020 08:55:13 -0700 Subject: [PATCH 209/260] Refactor distributed FMM driver into drive_fmm --- boxtree/distributed/__init__.py | 16 +- boxtree/distributed/calculation.py | 243 ----------------------------- boxtree/fmm.py | 121 ++++++++++++-- 3 files changed, 120 insertions(+), 260 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index a4d9843..7e0d7b8 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -185,10 +185,14 @@ class DistributedFMMRunner(object): timing_data=None): """Calculate potentials at target points. """ - from boxtree.distributed.calculation import calculate_pot - return calculate_pot( - self.local_wrangler, self.global_wrangler, self.local_trav, - source_weights, self.src_idx_all_ranks, self.tgt_idx_all_ranks, - _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce, - timing_data=timing_data + from boxtree.fmm import drive_fmm + return drive_fmm( + self.local_trav, self.local_wrangler, source_weights, + timing_data=timing_data, + distributed=True, + global_wrangler=self.global_wrangler, + src_idx_all_ranks=self.src_idx_all_ranks, + tgt_idx_all_ranks=self.tgt_idx_all_ranks, + comm=self.comm, + _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce ) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index d291c33..35e12ab 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -27,8 +27,6 @@ import numpy as np import pyopencl as cl from boxtree.distributed import MPITags from mpi4py import MPI -from boxtree.distributed import dtype_to_mpi -from boxtree.fmm import TimingRecorder from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler from pytools import memoize_method from pyopencl.tools import dtype_to_ctype @@ -355,244 +353,3 @@ def communicate_mpoles(wrangler, comm, trav, mpole_exps, return_stats=False): return stats # }}} - - -# {{{ FMM driver for calculating potentials - -def calculate_pot(local_wrangler, global_wrangler, local_trav, source_weights, - src_idx_all_ranks, tgt_idx_all_ranks, comm=MPI.COMM_WORLD, - _communicate_mpoles_via_allreduce=False, - timing_data=None): - """ Calculate potentials for targets on distributed memory machines. - - This function needs to be called collectively by all ranks in *comm*. - - :arg local_wrangler: expansion wrangler referencing the local tree. This argument - is significant on all ranks. - :arg global_wrangler: expansion wrangler referencing the global tree. This - argument is used on the root rank for assembling partial results from - worker ranks together. This argument is significant only on the root rank. - :arg local_trav: local traversal object returned from *generate_local_travs*. - :arg source_weights: source weights for FMM. This argument is significant only - on the root rank. - :arg src_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the ith - entry is a :class:`numpy.ndarray` representing the source indices of the - local tree on rank *i*. Each entry can be returned from - *generate_local_tree*. This argument is significant only on root rank. - :arg tgt_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the ith - entry is a :class:`numpy.ndarray` representing the target indices of the - local tree on rank *i*. Each entry can be returned from - *generate_local_tree*. This argument is significant only on root rank. - :arg comm: MPI communicator. - :arg _communicate_mpoles_via_allreduce: whether to use MPI allreduce for - communicating multipole expressions. Using MPI allreduce is slower but might - be helpful for debugging purpose. - :arg timing_data: Either *None*, or a :class:`dict` that is populated with - timing information for the stages of the algorithm (in the form of - :class:`TimingResult`) for the local rank, if such information is available. - :return: On the root rank, this function returns calculated potentials. On - worker processes, this function returns None. - """ - recorder = TimingRecorder() - - # Get MPI information - mpi_rank = comm.Get_rank() - mpi_size = comm.Get_size() - - # {{{ Distribute source weights - - if mpi_rank == 0: - # Convert src_weights to tree order - source_weights = source_weights[global_wrangler.tree.user_source_ids] - - local_src_weights = local_wrangler.distribute_source_weights( - source_weights, src_idx_all_ranks, comm=comm - ) - - # }}} - - # {{{ "Step 2.1:" Construct local multipoles - - mpole_exps, timing_future = local_wrangler.form_multipoles( - local_trav.level_start_source_box_nrs, - local_trav.source_boxes, - local_src_weights) - - recorder.add("form_multipoles", timing_future) - - # }}} - - # {{{ "Step 2.2:" Propagate multipoles upward - - mpole_exps, timing_future = local_wrangler.coarsen_multipoles( - local_trav.level_start_source_parent_box_nrs, - local_trav.source_parent_boxes, - mpole_exps) - - recorder.add("coarsen_multipoles", timing_future) - - # mpole_exps is called Phi in [1] - - # }}} - - # {{{ Communicate mpoles - - if _communicate_mpoles_via_allreduce: - mpole_exps_all = np.zeros_like(mpole_exps) - comm.Allreduce(mpole_exps, mpole_exps_all) - mpole_exps = mpole_exps_all - else: - communicate_mpoles(local_wrangler, comm, local_trav, mpole_exps) - - # }}} - - # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") - - potentials, timing_future = local_wrangler.eval_direct( - local_trav.target_boxes, - local_trav.neighbor_source_boxes_starts, - local_trav.neighbor_source_boxes_lists, - local_src_weights) - - recorder.add("eval_direct", timing_future) - - # these potentials are called alpha in [1] - - # }}} - - # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local - - local_exps, timing_future = local_wrangler.multipole_to_local( - local_trav.level_start_target_or_target_parent_box_nrs, - local_trav.target_or_target_parent_boxes, - local_trav.from_sep_siblings_starts, - local_trav.from_sep_siblings_lists, - mpole_exps) - - recorder.add("multipole_to_local", timing_future) - - # local_exps represents both Gamma and Delta in [1] - - # }}} - - # {{{ "Stage 5:" evaluate sep. smaller mpoles ("list 3") at particles - - # (the point of aiming this stage at particles is specifically to keep its - # contribution *out* of the downward-propagating local expansions) - - mpole_result, timing_future = local_wrangler.eval_multipoles( - local_trav.target_boxes_sep_smaller_by_source_level, - local_trav.from_sep_smaller_by_level, - mpole_exps) - - recorder.add("eval_multipoles", timing_future) - - potentials = potentials + mpole_result - - # these potentials are called beta in [1] - - if local_trav.from_sep_close_smaller_starts is not None: - direct_result, timing_future = local_wrangler.eval_direct( - local_trav.target_boxes, - local_trav.from_sep_close_smaller_starts, - local_trav.from_sep_close_smaller_lists, - local_src_weights) - - recorder.add("eval_direct", timing_future) - - potentials = potentials + direct_result - - # }}} - - # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") - - local_result, timing_future = local_wrangler.form_locals( - local_trav.level_start_target_or_target_parent_box_nrs, - local_trav.target_or_target_parent_boxes, - local_trav.from_sep_bigger_starts, - local_trav.from_sep_bigger_lists, - local_src_weights) - - recorder.add("form_locals", timing_future) - - local_exps = local_exps + local_result - - if local_trav.from_sep_close_bigger_starts is not None: - direct_result, timing_future = local_wrangler.eval_direct( - local_trav.target_boxes, - local_trav.from_sep_close_bigger_starts, - local_trav.from_sep_close_bigger_lists, - local_src_weights) - - recorder.add("eval_direct", timing_future) - - potentials = potentials + direct_result - - # }}} - - # {{{ "Stage 7:" propagate local_exps downward - - local_exps, timing_future = local_wrangler.refine_locals( - local_trav.level_start_target_or_target_parent_box_nrs, - local_trav.target_or_target_parent_boxes, - local_exps) - - recorder.add("refine_locals", timing_future) - - # }}} - - # {{{ "Stage 8:" evaluate locals - - local_result, timing_future = local_wrangler.eval_locals( - local_trav.level_start_target_box_nrs, - local_trav.target_boxes, - local_exps) - - recorder.add("eval_locals", timing_future) - - potentials = potentials + local_result - - # }}} - - # {{{ Worker processes send calculated potentials to the root process - - potentials_mpi_type = dtype_to_mpi(potentials.dtype) - - if mpi_rank == 0: - potentials_all_ranks = np.empty((mpi_size,), dtype=object) - potentials_all_ranks[0] = potentials - - for irank in range(1, mpi_size): - potentials_all_ranks[irank] = np.empty( - tgt_idx_all_ranks[irank].shape, dtype=potentials.dtype - ) - - comm.Recv([potentials_all_ranks[irank], potentials_mpi_type], - source=irank, tag=MPITags["GATHER_POTENTIALS"]) - else: - comm.Send([potentials, potentials_mpi_type], - dest=0, tag=MPITags["GATHER_POTENTIALS"]) - - # }}} - - # {{{ Assemble potentials from worker processes together on the root process - - if mpi_rank == 0: - potentials = np.empty(global_wrangler.tree.ntargets, dtype=potentials.dtype) - - for irank in range(mpi_size): - potentials[tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] - - result = global_wrangler.reorder_potentials(potentials) - - result = global_wrangler.finalize_potentials(result) - - # }}} - - if timing_data is not None: - timing_data.update(recorder.summarize()) - - if mpi_rank == 0: - return result - -# }}} diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 9258005..37b8254 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -34,10 +34,22 @@ except ImportError: from collections import Mapping +try: + from mpi4py import MPI +except ImportError: + pass + + +import numpy as np +from boxtree.distributed import dtype_to_mpi +from boxtree.distributed import MPITags from pytools import ProcessLogger -def drive_fmm(traversal, expansion_wrangler, src_weights, timing_data=None): +def drive_fmm(traversal, expansion_wrangler, src_weights, timing_data=None, + distributed=False, global_wrangler=None, + src_idx_all_ranks=None, tgt_idx_all_ranks=None, comm=None, + _communicate_mpoles_via_allreduce=False): """Top-level driver routine for a fast multipole calculation. In part, this is intended as a template for custom FMMs, in the sense that @@ -48,17 +60,43 @@ def drive_fmm(traversal, expansion_wrangler, src_weights, timing_data=None): Nonetheless, many common applications (such as point-to-point FMMs) can be covered by supplying the right *expansion_wrangler* to this routine. - :arg traversal: A :class:`boxtree.traversal.FMMTraversalInfo` instance. + For distributed implementation, this function needs to be called collectively by + all ranks in *comm*. + + :arg traversal: A :class:`boxtree.traversal.FMMTraversalInfo` instance. For + distributed implementation, this argument should be the local traversal + object generated by *generate_local_travs*. :arg expansion_wrangler: An object exhibiting the - :class:`ExpansionWranglerInterface`. - :arg src_weights: Source 'density/weights/charges'. - Passed unmodified to *expansion_wrangler*. + :class:`ExpansionWranglerInterface`. For distributed implementation, this + wrangler should reference the local tree on each rank. + :arg src_weights: Source 'density/weights/charges'. For distributed + implementation, this argument is only significant on the root rank. :arg timing_data: Either *None*, or a :class:`dict` that is populated with timing information for the stages of the algorithm (in the form of :class:`TimingResult`), if such information is available. - - Returns the potentials computed by *expansion_wrangler*. - + :arg distributed: Whether to run the driver in a distributed manner. + (Require mpi4py). + :arg global_wrangler: An object exhibiting the + :class:`ExpansionWranglerInterface`. This wrangler should reference the + global tree, which is used for assembling partial results from + worker ranks together. This argument is only significant for distributed + implementation and on the root rank. + :arg src_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the ith + entry is a :class:`numpy.ndarray` representing the source indices of the + local tree on rank *i*. Each entry can be returned from + *generate_local_tree*. This argument is significant only on root rank. + :arg tgt_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the ith + entry is a :class:`numpy.ndarray` representing the target indices of the + local tree on rank *i*. Each entry can be returned from + *generate_local_tree*. This argument is significant only on root rank. + :arg comm: MPI communicator. Default to ``MPI_COMM_WORLD``. + :arg _communicate_mpoles_via_allreduce: whether to use MPI allreduce for + communicating multipole expressions. Using MPI allreduce is slower but might + be helpful for debugging purpose. + + :return: the potentials computed by *expansion_wrangler*. For the distributed + implementation, the potentials are gathered and returned on the root rank; + this function returns *None* on the worker ranks. """ wrangler = expansion_wrangler @@ -68,7 +106,22 @@ def drive_fmm(traversal, expansion_wrangler, src_weights, timing_data=None): fmm_proc = ProcessLogger(logger, "fmm") recorder = TimingRecorder() - src_weights = wrangler.reorder_sources(src_weights) + if distributed: + # Get MPI information + if comm is None: + comm = MPI.COMM_WORLD + mpi_rank = comm.Get_rank() + mpi_size = comm.Get_size() + + if not distributed: + src_weights = wrangler.reorder_sources(src_weights) + else: + if mpi_rank == 0: + src_weights = global_wrangler.reorder_sources(src_weights) + + src_weights = wrangler.distribute_source_weights( + src_weights, src_idx_all_ranks, comm=comm + ) # {{{ "Step 2.1:" Construct local multipoles @@ -94,6 +147,15 @@ def drive_fmm(traversal, expansion_wrangler, src_weights, timing_data=None): # }}} + if distributed: + if _communicate_mpoles_via_allreduce: + mpole_exps_all = np.zeros_like(mpole_exps) + comm.Allreduce(mpole_exps, mpole_exps_all) + mpole_exps = mpole_exps_all + else: + from boxtree.distributed.calculation import communicate_mpoles + communicate_mpoles(wrangler, comm, traversal, mpole_exps) + # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") potentials, timing_future = wrangler.eval_direct( @@ -205,9 +267,46 @@ def drive_fmm(traversal, expansion_wrangler, src_weights, timing_data=None): # }}} - result = wrangler.reorder_potentials(potentials) + # {{{ Worker ranks send calculated potentials to the root rank + + if distributed: + potentials_mpi_type = dtype_to_mpi(potentials.dtype) + + if mpi_rank == 0: + potentials_all_ranks = np.empty((mpi_size,), dtype=object) + potentials_all_ranks[0] = potentials + + for irank in range(1, mpi_size): + potentials_all_ranks[irank] = np.empty( + tgt_idx_all_ranks[irank].shape, dtype=potentials.dtype + ) + + comm.Recv([potentials_all_ranks[irank], potentials_mpi_type], + source=irank, tag=MPITags["GATHER_POTENTIALS"]) + else: + comm.Send([potentials, potentials_mpi_type], + dest=0, tag=MPITags["GATHER_POTENTIALS"]) + + # }}} + + # {{{ Assemble potentials from worker ranks together on the root rank + + if distributed and mpi_rank == 0: + potentials = np.empty(global_wrangler.tree.ntargets, dtype=potentials.dtype) + + for irank in range(mpi_size): + potentials[tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] + + # }}} - result = wrangler.finalize_potentials(result) + if distributed: + result = None + if mpi_rank == 0: + result = global_wrangler.reorder_potentials(potentials) + result = global_wrangler.finalize_potentials(result) + else: + result = wrangler.reorder_potentials(potentials) + result = wrangler.finalize_potentials(result) fmm_proc.done() -- GitLab From 61be326b3ae315d4bf30d1c65a4467fc4a3cc6f4 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 7 Oct 2020 20:58:09 -0700 Subject: [PATCH 210/260] Fix test failure --- .gitlab-ci.yml | 2 +- boxtree/fmm.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5d7ddc8..093ee0b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -101,7 +101,7 @@ Pylint: Documentation: script: - - EXTRA_INSTALL="pybind11 numpy mako" + - EXTRA_INSTALL="pybind11 numpy mako mpi4py" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-docs.sh - ". ./build-docs.sh" tags: diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 37b8254..3e56699 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -36,13 +36,13 @@ except ImportError: try: from mpi4py import MPI + from boxtree.distributed import dtype_to_mpi + from boxtree.distributed import MPITags except ImportError: pass import numpy as np -from boxtree.distributed import dtype_to_mpi -from boxtree.distributed import MPITags from pytools import ProcessLogger -- GitLab From 20f7415c948813f4f6e2db1cd49f720a27ce41d0 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 18 Oct 2020 17:38:24 -0700 Subject: [PATCH 211/260] Move kernels for generating local trees to methods instead of using namedtuple --- boxtree/distributed/local_tree.py | 268 ++++++++++++++++-------------- 1 file changed, 142 insertions(+), 126 deletions(-) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index ca99baf..116dbd4 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -23,7 +23,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -from collections import namedtuple import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype @@ -31,33 +30,15 @@ from boxtree import Tree from mpi4py import MPI import time import numpy as np +from pytools import memoize import logging logger = logging.getLogger(__name__) -FetchLocalParticlesKernels = namedtuple( - "FetchLocalParticlesKernels", - [ - "particle_mask_knl", - "mask_scan_knl", - "fetch_local_src_knl", - "fetch_local_tgt_knl", - "generate_box_particle_starts", - "generate_box_particle_counts_nonchild", - "generate_box_particle_counts_cumul" - ] -) - - -def get_fetch_local_particles_knls(context, global_tree): - """Compiles PyOpenCL kernels helpful for constructing particles of - local trees from global tree. - :arg context: The context to compile against. - :arg global_tree: The global tree from which local trees are generated. - :return: A FetchLocalParticlesKernels object. - """ - particle_mask_knl = cl.elementwise.ElementwiseKernel( +@memoize +def particle_mask_kernel(context, particle_id_dtype): + return cl.elementwise.ElementwiseKernel( context, arguments=Template(""" __global char *responsible_boxes, @@ -65,7 +46,7 @@ def get_fetch_local_particles_knls(context, global_tree): __global ${particle_id_t} *box_particle_counts_nonchild, __global ${particle_id_t} *particle_mask """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype) + particle_id_t=dtype_to_ctype(particle_id_dtype) ), operation=Template(""" if(responsible_boxes[i]) { @@ -75,106 +56,127 @@ def get_fetch_local_particles_knls(context, global_tree): particle_mask[pid] = 1; } } - """).render(particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype)) + """).render(particle_id_t=dtype_to_ctype(particle_id_dtype)) ) + +@memoize +def mask_scan_kernel(context, particle_id_dtype): from pyopencl.scan import GenericScanKernel - mask_scan_knl = GenericScanKernel( - context, global_tree.particle_id_dtype, + return GenericScanKernel( + context, particle_id_dtype, arguments=Template(""" __global ${mask_t} *ary, __global ${mask_t} *scan """, strict_undefined=True).render( - mask_t=dtype_to_ctype(global_tree.particle_id_dtype) + mask_t=dtype_to_ctype(particle_id_dtype) ), input_expr="ary[i]", scan_expr="a+b", neutral="0", output_statement="scan[i + 1] = item;" ) - fetch_local_paticles_arguments = Template(""" - __global const ${mask_t} *particle_mask, - __global const ${mask_t} *particle_scan - % for dim in range(ndims): - , __global const ${coord_t} *particles_${dim} - % endfor + +fetch_local_paticles_arguments = Template(""" + __global const ${mask_t} *particle_mask, + __global const ${mask_t} *particle_scan + % for dim in range(ndims): + , __global const ${coord_t} *particles_${dim} + % endfor + % for dim in range(ndims): + , __global ${coord_t} *local_particles_${dim} + % endfor + % if particles_have_extent: + , __global const ${coord_t} *particle_radii + , __global ${coord_t} *local_particle_radii + % endif +""", strict_undefined=True) + +fetch_local_particles_prg = Template(""" + if(particle_mask[i]) { + ${particle_id_t} des = particle_scan[i]; % for dim in range(ndims): - , __global ${coord_t} *local_particles_${dim} + local_particles_${dim}[des] = particles_${dim}[i]; % endfor % if particles_have_extent: - , __global const ${coord_t} *particle_radii - , __global ${coord_t} *local_particle_radii + local_particle_radii[des] = particle_radii[i]; % endif - """, strict_undefined=True) - - fetch_local_particles_prg = Template(""" - if(particle_mask[i]) { - ${particle_id_t} des = particle_scan[i]; - % for dim in range(ndims): - local_particles_${dim}[des] = particles_${dim}[i]; - % endfor - % if particles_have_extent: - local_particle_radii[des] = particle_radii[i]; - % endif - } - """, strict_undefined=True) - - fetch_local_src_knl = cl.elementwise.ElementwiseKernel( + } +""", strict_undefined=True) + + +@memoize +def fetch_local_sources_kernel( + context, particle_id_dtype, coord_dtype, dimensions, sources_have_extent): + return cl.elementwise.ElementwiseKernel( context, fetch_local_paticles_arguments.render( - mask_t=dtype_to_ctype(global_tree.particle_id_dtype), - coord_t=dtype_to_ctype(global_tree.coord_dtype), - ndims=global_tree.dimensions, - particles_have_extent=global_tree.sources_have_extent + mask_t=dtype_to_ctype(particle_id_dtype), + coord_t=dtype_to_ctype(coord_dtype), + ndims=dimensions, + particles_have_extent=sources_have_extent ), fetch_local_particles_prg.render( - particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype), - ndims=global_tree.dimensions, - particles_have_extent=global_tree.sources_have_extent + particle_id_t=dtype_to_ctype(particle_id_dtype), + ndims=dimensions, + particles_have_extent=sources_have_extent ) ) - fetch_local_tgt_knl = cl.elementwise.ElementwiseKernel( + +@memoize +def fetch_local_targets_kernel( + context, particle_id_dtype, coord_dtype, dimensions, targets_have_extent): + return cl.elementwise.ElementwiseKernel( context, fetch_local_paticles_arguments.render( - mask_t=dtype_to_ctype(global_tree.particle_id_dtype), - coord_t=dtype_to_ctype(global_tree.coord_dtype), - ndims=global_tree.dimensions, - particles_have_extent=global_tree.targets_have_extent + mask_t=dtype_to_ctype(particle_id_dtype), + coord_t=dtype_to_ctype(coord_dtype), + ndims=dimensions, + particles_have_extent=targets_have_extent ), fetch_local_particles_prg.render( - particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype), - ndims=global_tree.dimensions, - particles_have_extent=global_tree.targets_have_extent + particle_id_t=dtype_to_ctype(particle_id_dtype), + ndims=dimensions, + particles_have_extent=targets_have_extent ) ) - generate_box_particle_starts = cl.elementwise.ElementwiseKernel( + +@memoize +def generate_box_particle_starts_kernel(context, particle_id_dtype): + return cl.elementwise.ElementwiseKernel( context, Template(""" __global ${particle_id_t} *old_starts, __global ${particle_id_t} *particle_scan, __global ${particle_id_t} *new_starts """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype) + particle_id_t=dtype_to_ctype(particle_id_dtype) ), "new_starts[i] = particle_scan[old_starts[i]]", name="generate_box_particle_starts" ) - generate_box_particle_counts_nonchild = cl.elementwise.ElementwiseKernel( + +@memoize +def generate_box_particle_counts_nonchild_kernel(context, particle_id_dtype): + return cl.elementwise.ElementwiseKernel( context, Template(""" __global char *res_boxes, __global ${particle_id_t} *old_counts_nonchild, __global ${particle_id_t} *new_counts_nonchild """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype) + particle_id_t=dtype_to_ctype(particle_id_dtype) ), "if(res_boxes[i]) new_counts_nonchild[i] = old_counts_nonchild[i];" ) - generate_box_particle_counts_cumul = cl.elementwise.ElementwiseKernel( + +@memoize +def generate_box_particle_counts_cumul_kernel(context, particle_id_dtype): + return cl.elementwise.ElementwiseKernel( context, Template(""" __global ${particle_id_t} *old_counts_cumul, @@ -182,7 +184,7 @@ def get_fetch_local_particles_knls(context, global_tree): __global ${particle_id_t} *new_counts_cumul, __global ${particle_id_t} *particle_scan """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(global_tree.particle_id_dtype) + particle_id_t=dtype_to_ctype(particle_id_dtype) ), """ new_counts_cumul[i] = @@ -191,16 +193,6 @@ def get_fetch_local_particles_knls(context, global_tree): """ ) - return FetchLocalParticlesKernels( - particle_mask_knl=particle_mask_knl, - mask_scan_knl=mask_scan_knl, - fetch_local_src_knl=fetch_local_src_knl, - fetch_local_tgt_knl=fetch_local_tgt_knl, - generate_box_particle_starts=generate_box_particle_starts, - generate_box_particle_counts_nonchild=generate_box_particle_counts_nonchild, - generate_box_particle_counts_cumul=generate_box_particle_counts_cumul - ) - def fetch_local_particles( queue, global_tree, src_box_mask, tgt_box_mask, local_tree): @@ -214,7 +206,6 @@ def fetch_local_particles( These generated fields are stored directly into *local_tree*. """ - knls = get_fetch_local_particles_knls(queue.context, global_tree) global_tree_dev = global_tree.to_device(queue).with_queue(queue) nsources = global_tree.nsources @@ -225,7 +216,7 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype ) - knls.particle_mask_knl( + particle_mask_kernel(queue.context, global_tree.particle_id_dtype)( src_box_mask, global_tree_dev.box_source_starts, global_tree_dev.box_source_counts_nonchild, @@ -242,7 +233,9 @@ def fetch_local_particles( ) src_particle_scan[0] = 0 - knls.mask_scan_knl(src_particle_mask, src_particle_scan) + mask_scan_kernel(queue.context, global_tree.particle_id_dtype)( + src_particle_mask, src_particle_scan + ) # }}} @@ -260,9 +253,15 @@ def fetch_local_particles( for idim in range(global_tree.dimensions) ] - assert(global_tree.sources_have_extent is False) + assert global_tree.sources_have_extent is False - knls.fetch_local_src_knl( + fetch_local_sources_kernel( + queue.context, + global_tree.particle_id_dtype, + global_tree.coord_dtype, + global_tree.dimensions, + global_tree.sources_have_extent + )( src_particle_mask, src_particle_scan, *global_tree_dev.sources.tolist(), *local_sources_list @@ -277,11 +276,12 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype ) - knls.generate_box_particle_starts( - global_tree_dev.box_source_starts, - src_particle_scan, - local_box_source_starts - ) + generate_box_particle_starts_kernel( + queue.context, global_tree.particle_id_dtype)( + global_tree_dev.box_source_starts, + src_particle_scan, + local_box_source_starts + ) # }}} @@ -292,11 +292,12 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype ) - knls.generate_box_particle_counts_nonchild( - src_box_mask, - global_tree_dev.box_source_counts_nonchild, - local_box_source_counts_nonchild - ) + generate_box_particle_counts_nonchild_kernel( + queue.context, global_tree.particle_id_dtype)( + src_box_mask, + global_tree_dev.box_source_counts_nonchild, + local_box_source_counts_nonchild + ) # }}} @@ -307,12 +308,13 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype ) - knls.generate_box_particle_counts_cumul( - global_tree_dev.box_source_counts_cumul, - global_tree_dev.box_source_starts, - local_box_source_counts_cumul, - src_particle_scan - ) + generate_box_particle_counts_cumul_kernel( + queue.context, global_tree.particle_id_dtype)( + global_tree_dev.box_source_counts_cumul, + global_tree_dev.box_source_starts, + local_box_source_counts_cumul, + src_particle_scan + ) # }}} @@ -325,7 +327,7 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype ) - knls.particle_mask_knl( + particle_mask_kernel(queue.context, global_tree.particle_id_dtype)( tgt_box_mask, global_tree_dev.box_target_starts, global_tree_dev.box_target_counts_nonchild, @@ -342,7 +344,9 @@ def fetch_local_particles( ) tgt_particle_scan[0] = 0 - knls.mask_scan_knl(tgt_particle_mask, tgt_particle_scan) + mask_scan_kernel(queue.context, global_tree.particle_id_dtype)( + tgt_particle_mask, tgt_particle_scan + ) # }}} @@ -361,23 +365,32 @@ def fetch_local_particles( ] if local_tree.targets_have_extent: - local_target_radii = cl.array.empty( queue, (local_ntargets,), dtype=global_tree.coord_dtype ) - knls.fetch_local_tgt_knl( + fetch_local_targets_kernel( + queue.context, + global_tree.particle_id_dtype, + global_tree.coord_dtype, + global_tree.dimensions, + True + )( tgt_particle_mask, tgt_particle_scan, *global_tree_dev.targets.tolist(), *local_targets_list, global_tree_dev.target_radii, local_target_radii ) - else: - - knls.fetch_local_tgt_knl( + fetch_local_targets_kernel( + queue.context, + global_tree.particle_id_dtype, + global_tree.coord_dtype, + global_tree.dimensions, + False + )( tgt_particle_mask, tgt_particle_scan, *global_tree_dev.targets.tolist(), *local_targets_list @@ -390,11 +403,12 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype ) - knls.generate_box_particle_starts( - global_tree_dev.box_target_starts, - tgt_particle_scan, - local_box_target_starts - ) + generate_box_particle_starts_kernel( + queue.context, global_tree.particle_id_dtype)( + global_tree_dev.box_target_starts, + tgt_particle_scan, + local_box_target_starts + ) # }}} @@ -404,10 +418,11 @@ def fetch_local_particles( queue, (global_tree.nboxes,), dtype=global_tree.particle_id_dtype) - knls.generate_box_particle_counts_nonchild( - tgt_box_mask, - global_tree_dev.box_target_counts_nonchild, - local_box_target_counts_nonchild + generate_box_particle_counts_nonchild_kernel( + queue.context, global_tree.particle_id_dtype)( + tgt_box_mask, + global_tree_dev.box_target_counts_nonchild, + local_box_target_counts_nonchild ) # }}} @@ -419,12 +434,13 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype ) - knls.generate_box_particle_counts_cumul( - global_tree_dev.box_target_counts_cumul, - global_tree_dev.box_target_starts, - local_box_target_counts_cumul, - tgt_particle_scan - ) + generate_box_particle_counts_cumul_kernel( + queue.context, global_tree.particle_id_dtype)( + global_tree_dev.box_target_counts_cumul, + global_tree_dev.box_target_starts, + local_box_target_counts_cumul, + tgt_particle_scan + ) # }}} -- GitLab From 2356748ac690cd84d7495b1b9137220a139407e0 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Tue, 20 Oct 2020 22:48:41 -0700 Subject: [PATCH 212/260] Improve partition interfaces --- .gitlab-ci.yml | 1 + boxtree/distributed/__init__.py | 5 +- boxtree/distributed/calculation.py | 4 +- boxtree/distributed/local_tree.py | 9 +- boxtree/distributed/partition.py | 439 +++++++++++++---------------- 5 files changed, 202 insertions(+), 256 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 093ee0b..e4fc039 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -55,6 +55,7 @@ Python 3 POCL K40: artifacts: reports: junit: test/pytest.xml + allow_failure: true Python 3 POCL MPI: script: diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 7e0d7b8..5115d62 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -143,12 +143,9 @@ class DistributedFMMRunner(object): # {{{ Compute local tree - from boxtree.distributed.partition import ResponsibleBoxesQuery - responsible_box_query = ResponsibleBoxesQuery(queue, self.global_trav) - from boxtree.distributed.local_tree import generate_local_tree self.local_tree, self.src_idx, self.tgt_idx = generate_local_tree( - queue, global_tree, responsible_boxes_list, responsible_box_query + queue, self.global_trav, responsible_boxes_list ) # }}} diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 35e12ab..1208921 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -173,14 +173,14 @@ class DistributedExpansionWrangler: """Test whether the multipole expansions of the contributing boxes are used by at least one box in a range. - :arg subrange: the range is represented by ``[subrange[0], subrange[1])``. + :arg subrange: the range is represented by ``(subrange[0], subrange[1])``. :arg box_to_user_start: a :class:`pyopencl.array.Array` object indicating the start and end index in *box_to_user_lists* for each box in *contributing_boxes_list*. :arg box_to_user_lists: a :class:`pyopencl.array.Array` object storing the users of each box in *contributing_boxes_list*. :returns: a :class:`pyopencl.array.Array` object with the same shape as - *contributing_boxes_list*, where the *i*th entry is 1 if + *contributing_boxes_list*, where the ith entry is 1 if ``contributing_boxes_list[i]`` is used by at least on box in the subrange specified. """ diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 116dbd4..92152cd 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -518,16 +518,19 @@ class LocalTree(Tree): return self._dimensions -def generate_local_tree(queue, global_tree, responsible_boxes_list, - responsible_box_query, comm=MPI.COMM_WORLD): +def generate_local_tree(queue, global_traversal, responsible_boxes_list, + comm=MPI.COMM_WORLD): + global_tree = global_traversal.tree + # Get MPI information mpi_rank = comm.Get_rank() mpi_size = comm.Get_size() start_time = time.time() + from boxtree.distributed.partition import get_boxes_mask (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, box_mpole_is_used) = \ - responsible_box_query.get_boxes_mask(responsible_boxes_list[mpi_rank]) + get_boxes_mask(queue, global_traversal, responsible_boxes_list[mpi_rank]) local_tree = global_tree.copy( responsible_boxes_list=responsible_boxes_list[mpi_rank], diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index bdf052f..38e56fa 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -27,10 +27,11 @@ import numpy as np import pyopencl as cl from pyopencl.tools import dtype_to_ctype from mako.template import Template +from pytools import memoize def partition_work(boxes_time, traversal, total_rank): - """ This function assigns responsible boxes for each rank. + """This function assigns responsible boxes for each rank. Each process is responsible for calculating the multiple expansions as well as evaluating target potentials in *responsible_boxes*. @@ -85,275 +86,219 @@ def partition_work(boxes_time, traversal, total_rank): return responsible_boxes_list -class ResponsibleBoxesQuery(object): - """ Query related to the responsible boxes for a given traversal. +@memoize +def mark_parent_kernel(context, box_id_dtype): + return cl.elementwise.ElementwiseKernel( + context, + "__global char *current, __global char *parent, " + "__global %s *box_parent_ids" % dtype_to_ctype(box_id_dtype), + "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" + ) + + +# helper kernel for adding boxes from interaction list 1 and 4 +@memoize +def add_interaction_list_boxes_kernel(context, box_id_dtype): + return cl.elementwise.ElementwiseKernel( + context, + Template(""" + __global ${box_id_t} *box_list, + __global char *responsible_boxes_mask, + __global ${box_id_t} *interaction_boxes_starts, + __global ${box_id_t} *interaction_boxes_lists, + __global char *src_boxes_mask + """, strict_undefined=True).render( + box_id_t=dtype_to_ctype(box_id_dtype) + ), + Template(r""" + typedef ${box_id_t} box_id_t; + box_id_t current_box = box_list[i]; + if(responsible_boxes_mask[current_box]) { + for(box_id_t box_idx = interaction_boxes_starts[i]; + box_idx < interaction_boxes_starts[i + 1]; + ++box_idx) + src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; + } + """, strict_undefined=True).render( + box_id_t=dtype_to_ctype(box_id_dtype) + ), + ) + + +def get_ancestor_boxes_mask(queue, traversal, responsible_boxes_mask): + """Query the ancestors of responsible boxes. + + :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape + ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is a responsible box. + :return: A :class:`pyopencl.array.Array` object of shape ``(tree.nboxes,)`` whose + ith entry is 1 if ``i`` is an ancestor of the responsible boxes specified by + *responsible_boxes_mask*. """ + ancestor_boxes = cl.array.zeros(queue, (traversal.tree.nboxes,), dtype=np.int8) + ancestor_boxes_last = responsible_boxes_mask.copy() - def __init__(self, queue, traversal): - """ - :param queue: A pyopencl.CommandQueue object. - :param traversal: The global traversal built on root with all particles. - """ - self.queue = queue - self.traversal = traversal - self.tree = traversal.tree - - # {{{ fetch tree structure and interaction lists to device memory - - self.box_parent_ids_dev = cl.array.to_device(queue, self.tree.box_parent_ids) - self.target_boxes_dev = cl.array.to_device(queue, traversal.target_boxes) - self.target_or_target_parent_boxes_dev = cl.array.to_device( - queue, traversal.target_or_target_parent_boxes) - - # list 1 - self.neighbor_source_boxes_starts_dev = cl.array.to_device( - queue, traversal.neighbor_source_boxes_starts) - self.neighbor_source_boxes_lists_dev = cl.array.to_device( - queue, traversal.neighbor_source_boxes_lists) - - # list 2 - self.from_sep_siblings_starts_dev = cl.array.to_device( - queue, traversal.from_sep_siblings_starts) - self.from_sep_siblings_lists_dev = cl.array.to_device( - queue, traversal.from_sep_siblings_lists) - - # list 3 - self.target_boxes_sep_smaller_by_source_level_dev = np.empty( - (self.tree.nlevels,), dtype=object) - for ilevel in range(self.tree.nlevels): - self.target_boxes_sep_smaller_by_source_level_dev[ilevel] = \ - cl.array.to_device( - queue, - traversal.target_boxes_sep_smaller_by_source_level[ilevel] - ) - - self.from_sep_smaller_by_level_starts_dev = np.empty( - (self.tree.nlevels,), dtype=object) - for ilevel in range(self.tree.nlevels): - self.from_sep_smaller_by_level_starts_dev[ilevel] = cl.array.to_device( - queue, traversal.from_sep_smaller_by_level[ilevel].starts + while ancestor_boxes_last.any(): + ancestor_boxes_new = cl.array.zeros( + queue, (traversal.tree.nboxes,), dtype=np.int8 + ) + mark_parent_kernel(queue.context, traversal.tree.box_id_dtype)( + ancestor_boxes_last, ancestor_boxes_new, traversal.tree.box_parent_ids + ) + ancestor_boxes_new = ancestor_boxes_new & (~ancestor_boxes) + ancestor_boxes = ancestor_boxes | ancestor_boxes_new + ancestor_boxes_last = ancestor_boxes_new + + return ancestor_boxes + + +def get_src_boxes_mask( + queue, traversal, responsible_boxes_mask, ancestor_boxes_mask): + """Query the boxes whose sources are needed in order to evaluate potentials + of boxes represented by *responsible_boxes_mask*. + + :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape + ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is a responsible box. + :param ancestor_boxes_mask: A :class:`pyopencl.array.Array` object of shape + ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is either a responsible box + or an ancestor of the responsible boxes. + :return: A :class:`pyopencl.array.Array` object of shape ``(tree.nboxes,)`` whose + ith entry is 1 if souces of box ``i`` are needed for evaluating the + potentials of targets in boxes represented by *responsible_boxes_mask*. + """ + src_boxes_mask = responsible_boxes_mask.copy() + + # Add list 1 of responsible boxes + add_interaction_list_boxes_kernel(queue.context, traversal.tree.box_id_dtype)( + traversal.target_boxes, responsible_boxes_mask, + traversal.neighbor_source_boxes_starts, + traversal.neighbor_source_boxes_lists, src_boxes_mask, + range=range(0, traversal.target_boxes.shape[0]), + queue=queue + ) + + # Add list 4 of responsible boxes or ancestor boxes + add_interaction_list_boxes_kernel(queue.context, traversal.tree.box_id_dtype)( + traversal.target_or_target_parent_boxes, + responsible_boxes_mask | ancestor_boxes_mask, + traversal.from_sep_bigger_starts, traversal.from_sep_bigger_lists, + src_boxes_mask, + range=range(0, traversal.target_or_target_parent_boxes.shape[0]), + queue=queue + ) + + if traversal.tree.targets_have_extent: + # Add list 3 close of responsible boxes + if traversal.from_sep_close_smaller_starts is not None: + add_interaction_list_boxes_kernel( + queue.context, traversal.tree.box_id_dtype)( + traversal.target_boxes, + responsible_boxes_mask, + traversal.from_sep_close_smaller_starts, + traversal.from_sep_close_smaller_lists, + src_boxes_mask, + queue=queue ) - self.from_sep_smaller_by_level_lists_dev = np.empty( - (self.tree.nlevels,), dtype=object) - for ilevel in range(self.tree.nlevels): - self.from_sep_smaller_by_level_lists_dev[ilevel] = cl.array.to_device( - queue, traversal.from_sep_smaller_by_level[ilevel].lists + # Add list 4 close of responsible boxes + if traversal.from_sep_close_bigger_starts is not None: + add_interaction_list_boxes_kernel( + queue.context, traversal.tree.box_id_dtype)( + traversal.target_boxes, + responsible_boxes_mask | ancestor_boxes_mask, + traversal.from_sep_close_bigger_starts, + traversal.from_sep_close_bigger_lists, + src_boxes_mask, + queue=queue ) - # list 4 - self.from_sep_bigger_starts_dev = cl.array.to_device( - queue, traversal.from_sep_bigger_starts) - self.from_sep_bigger_lists_dev = cl.array.to_device( - queue, traversal.from_sep_bigger_lists) - - if self.tree.targets_have_extent: - # list 3 close - if traversal.from_sep_close_smaller_starts is not None: - self.from_sep_close_smaller_starts_dev = cl.array.to_device( - queue, traversal.from_sep_close_smaller_starts) - self.from_sep_close_smaller_lists_dev = cl.array.to_device( - queue, traversal.from_sep_close_smaller_lists) - - # list 4 close - if traversal.from_sep_close_bigger_starts is not None: - self.from_sep_close_bigger_starts_dev = cl.array.to_device( - queue, traversal.from_sep_close_bigger_starts) - self.from_sep_close_bigger_lists_dev = cl.array.to_device( - queue, traversal.from_sep_close_bigger_lists) - - # }}} - - # helper kernel for ancestor box query - self.mark_parent_knl = cl.elementwise.ElementwiseKernel( - queue.context, - "__global char *current, __global char *parent, " - "__global %s *box_parent_ids" % dtype_to_ctype(self.tree.box_id_dtype), - "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" - ) + return src_boxes_mask - # helper kernel for adding boxes from interaction list 1 and 4 - self.add_interaction_list_boxes = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${box_id_t} *box_list, - __global char *responsible_boxes_mask, - __global ${box_id_t} *interaction_boxes_starts, - __global ${box_id_t} *interaction_boxes_lists, - __global char *src_boxes_mask - """, strict_undefined=True).render( - box_id_t=dtype_to_ctype(self.tree.box_id_dtype) - ), - Template(r""" - typedef ${box_id_t} box_id_t; - box_id_t current_box = box_list[i]; - if(responsible_boxes_mask[current_box]) { - for(box_id_t box_idx = interaction_boxes_starts[i]; - box_idx < interaction_boxes_starts[i + 1]; - ++box_idx) - src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; - } - """, strict_undefined=True).render( - box_id_t=dtype_to_ctype(self.tree.box_id_dtype) - ), - ) - def ancestor_boxes_mask(self, responsible_boxes_mask): - """ Query the ancestors of responsible boxes. - - :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape - (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. - :return: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) whose - ith entry is 1 iff i is an ancestor of the responsible boxes specified by - *responsible_boxes_mask*. - """ - ancestor_boxes = cl.array.zeros( - self.queue, (self.tree.nboxes,), dtype=np.int8) - ancestor_boxes_last = responsible_boxes_mask.copy() - - while ancestor_boxes_last.any(): - ancestor_boxes_new = cl.array.zeros(self.queue, (self.tree.nboxes,), - dtype=np.int8) - self.mark_parent_knl(ancestor_boxes_last, ancestor_boxes_new, - self.box_parent_ids_dev) - ancestor_boxes_new = ancestor_boxes_new & (~ancestor_boxes) - ancestor_boxes = ancestor_boxes | ancestor_boxes_new - ancestor_boxes_last = ancestor_boxes_new - - return ancestor_boxes - - def src_boxes_mask(self, responsible_boxes_mask, ancestor_boxes_mask): - """ Query the boxes whose sources are needed in order to evaluate potentials - of boxes represented by *responsible_boxes_mask*. - - :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape - (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. - :param ancestor_boxes_mask: A :class:`pyopencl.array.Array` object of shape - (tree.nboxes,) whose ith entry is 1 iff i is either a responsible box - or an ancestor of the responsible boxes. - :return: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) whose - ith entry is 1 iff souces of box i are needed for evaluating the - potentials of targets in boxes represented by *responsible_boxes_mask*. - """ - src_boxes_mask = responsible_boxes_mask.copy() - - # Add list 1 of responsible boxes - self.add_interaction_list_boxes( - self.target_boxes_dev, responsible_boxes_mask, - self.neighbor_source_boxes_starts_dev, - self.neighbor_source_boxes_lists_dev, src_boxes_mask, - range=range(0, self.traversal.target_boxes.shape[0]) - ) +def get_multipole_boxes_mask( + queue, traversal, responsible_boxes_mask, ancestor_boxes_mask): + """Query the boxes whose multipoles are used in order to evaluate + potentials of targets in boxes represented by *responsible_boxes_mask*. - # Add list 4 of responsible boxes or ancestor boxes - self.add_interaction_list_boxes( - self.target_or_target_parent_boxes_dev, - responsible_boxes_mask | ancestor_boxes_mask, - self.from_sep_bigger_starts_dev, self.from_sep_bigger_lists_dev, - src_boxes_mask, - range=range(0, self.traversal.target_or_target_parent_boxes.shape[0])) - - if self.tree.targets_have_extent: - - # Add list 3 close of responsible boxes - if self.traversal.from_sep_close_smaller_starts is not None: - self.add_interaction_list_boxes( - self.target_boxes_dev, - responsible_boxes_mask, - self.from_sep_close_smaller_starts_dev, - self.from_sep_close_smaller_lists_dev, - src_boxes_mask - ) - - # Add list 4 close of responsible boxes - if self.traversal.from_sep_close_bigger_starts is not None: - self.add_interaction_list_boxes( - self.target_boxes_dev, - responsible_boxes_mask | ancestor_boxes_mask, - self.from_sep_close_bigger_starts_dev, - self.from_sep_close_bigger_lists_dev, - src_boxes_mask - ) - - return src_boxes_mask - - def multipole_boxes_mask(self, responsible_boxes_mask, ancestor_boxes_mask): - """ Query the boxes whose multipoles are used in order to evaluate - potentials of targets in boxes represented by *responsible_boxes_mask*. + :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape + ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is a responsible box. + :arg ancestor_boxes_mask: A :class:`pyopencl.array.Array` object of shape + ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is either a responsible box + or an ancestor of the responsible boxes. + :return: A :class:`pyopencl.array.Array` object of shape ``(tree.nboxes,)`` + whose ith entry is 1 if multipoles of box ``i`` are needed for evaluating + the potentials of targets in boxes represented by *responsible_boxes_mask*. + """ - :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape - (tree.nboxes,) whose ith entry is 1 iff i is a responsible box. - :arg ancestor_boxes_mask: A :class:`pyopencl.array.Array` object of shape - (tree.nboxes,) whose ith entry is 1 iff i is either a responsible box - or an ancestor of the responsible boxes. - :return: A :class:`pyopencl.array.Array` object of shape (tree.nboxes,) - whose ith entry is 1 iff multipoles of box i are needed for evaluating - the potentials of targets in boxes represented by - *responsible_boxes_mask*. - """ - - multipole_boxes_mask = cl.array.zeros(self.queue, (self.tree.nboxes,), - dtype=np.int8) - - # A mpole is used by process p if it is in the List 2 of either a box - # owned by p or one of its ancestors. - self.add_interaction_list_boxes( - self.target_or_target_parent_boxes_dev, - responsible_boxes_mask | ancestor_boxes_mask, - self.from_sep_siblings_starts_dev, - self.from_sep_siblings_lists_dev, - multipole_boxes_mask + multipole_boxes_mask = cl.array.zeros( + queue, (traversal.tree.nboxes,), dtype=np.int8 + ) + + # A mpole is used by process p if it is in the List 2 of either a box + # owned by p or one of its ancestors. + add_interaction_list_boxes_kernel(queue.context, traversal.tree.box_id_dtype)( + traversal.target_or_target_parent_boxes, + responsible_boxes_mask | ancestor_boxes_mask, + traversal.from_sep_siblings_starts, + traversal.from_sep_siblings_lists, + multipole_boxes_mask, + queue=queue + ) + multipole_boxes_mask.finish() + + # A mpole is used by process p if it is in the List 3 of a box owned by p. + for ilevel in range(traversal.tree.nlevels): + add_interaction_list_boxes_kernel( + queue.context, traversal.tree.box_id_dtype)( + traversal.target_boxes_sep_smaller_by_source_level[ilevel], + responsible_boxes_mask, + traversal.from_sep_smaller_by_level[ilevel].starts, + traversal.from_sep_smaller_by_level[ilevel].lists, + multipole_boxes_mask, + queue=queue ) - multipole_boxes_mask.finish() - # A mpole is used by process p if it is in the List 3 of a box owned by p. - for ilevel in range(self.tree.nlevels): - self.add_interaction_list_boxes( - self.target_boxes_sep_smaller_by_source_level_dev[ilevel], - responsible_boxes_mask, - self.from_sep_smaller_by_level_starts_dev[ilevel], - self.from_sep_smaller_by_level_lists_dev[ilevel], - multipole_boxes_mask - ) + multipole_boxes_mask.finish() - multipole_boxes_mask.finish() + return multipole_boxes_mask - return multipole_boxes_mask - def get_boxes_mask(self, responsible_boxes_list): - """ - Given a list of responsible boxes for a process, calculates the following - four masks: +def get_boxes_mask(queue, traversal, responsible_boxes_list): + """Given a list of responsible boxes for a process, this helper function + calculates the following four masks: - responsible_box_mask: Current process will evaluate target potentials and - multipole expansions in these boxes. Sources and targets in these boxes - are needed. + responsible_box_mask: Current process will evaluate target potentials and + multipole expansions in these boxes. Sources and targets in these boxes + are needed. - ancestor_boxes_mask: The the ancestor of the responsible boxes. + ancestor_boxes_mask: The the ancestor of the responsible boxes. - src_boxes_mask: Current process needs sources but not targets in these boxes. + src_boxes_mask: Current process needs sources but not targets in these boxes. - multipole_boxes_mask: Current process needs multipole expressions in these - boxes. + multipole_boxes_mask: Current process needs multipole expressions in these + boxes. - :arg responsible_boxes_list: A numpy array of responsible box indices. + :arg responsible_boxes_list: A numpy array of responsible box indices. - :returns: responsible_box_mask, ancestor_boxes_mask, src_boxes_mask and - multipole_boxes_mask, as described above. - """ + :returns: responsible_box_mask, ancestor_boxes_mask, src_boxes_mask and + multipole_boxes_mask, as described above. + """ + traversal = traversal.to_device(queue) - responsible_boxes_mask = np.zeros((self.tree.nboxes,), dtype=np.int8) - responsible_boxes_mask[responsible_boxes_list] = 1 - responsible_boxes_mask = cl.array.to_device( - self.queue, responsible_boxes_mask) + responsible_boxes_mask = np.zeros((traversal.tree.nboxes,), dtype=np.int8) + responsible_boxes_mask[responsible_boxes_list] = 1 + responsible_boxes_mask = cl.array.to_device(queue, responsible_boxes_mask) - ancestor_boxes_mask = self.ancestor_boxes_mask(responsible_boxes_mask) + ancestor_boxes_mask = get_ancestor_boxes_mask( + queue, traversal, responsible_boxes_mask + ) - src_boxes_mask = self.src_boxes_mask( - responsible_boxes_mask, ancestor_boxes_mask) + src_boxes_mask = get_src_boxes_mask( + queue, traversal, responsible_boxes_mask, ancestor_boxes_mask + ) - multipole_boxes_mask = self.multipole_boxes_mask( - responsible_boxes_mask, ancestor_boxes_mask) + multipole_boxes_mask = get_multipole_boxes_mask( + queue, traversal, responsible_boxes_mask, ancestor_boxes_mask + ) - return (responsible_boxes_mask, ancestor_boxes_mask, src_boxes_mask, - multipole_boxes_mask) + return (responsible_boxes_mask, ancestor_boxes_mask, src_boxes_mask, + multipole_boxes_mask) -- GitLab From 04b2d6f3e5297800b5a584143c1a3d26caa9877a Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 15 Nov 2020 22:48:11 -0800 Subject: [PATCH 213/260] Add ImmutableHostDeviceArray --- .gitlab-ci.yml | 1 - boxtree/distributed/__init__.py | 4 +- boxtree/distributed/local_traversal.py | 20 +++---- boxtree/distributed/local_tree.py | 3 ++ boxtree/tools.py | 75 ++++++++++++++++++++++++-- boxtree/traversal.py | 12 +++++ boxtree/tree.py | 7 +++ test/test_distributed.py | 14 +++++ 8 files changed, 118 insertions(+), 18 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e4fc039..093ee0b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -55,7 +55,6 @@ Python 3 POCL K40: artifacts: reports: junit: test/pytest.xml - allow_failure: true Python 3 POCL MPI: script: diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 5115d62..69ea471 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -173,7 +173,7 @@ class DistributedFMMRunner(object): # {{{ Get local wrangler self.local_wrangler = self.distributed_expansion_wrangler_factory( - self.local_tree) + self.local_tree.get(None)) # }}} @@ -184,7 +184,7 @@ class DistributedFMMRunner(object): """ from boxtree.fmm import drive_fmm return drive_fmm( - self.local_trav, self.local_wrangler, source_weights, + self.local_trav.get(None), self.local_wrangler, source_weights, timing_data=timing_data, distributed=True, global_wrangler=self.global_wrangler, diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py index 63dbc87..d4e90bd 100644 --- a/boxtree/distributed/local_traversal.py +++ b/boxtree/distributed/local_traversal.py @@ -35,10 +35,9 @@ logger = logging.getLogger(__name__) def generate_local_travs( queue, local_tree, traversal_builder, box_bounding_box=None, merge_close_lists=False): - start_time = time.time() - d_tree = local_tree.to_device(queue).with_queue(queue) + local_tree.with_queue(queue) # Modify box flags for targets from boxtree import box_flags_enum @@ -73,12 +72,13 @@ def generate_local_travs( ) ) - modify_target_flags_knl(d_tree.box_target_counts_nonchild, - d_tree.box_target_counts_cumul, - d_tree.box_flags) + modify_target_flags_knl(local_tree.box_target_counts_nonchild.device, + local_tree.box_target_counts_cumul.device, + local_tree.box_flags.device) # Generate local source flags - local_box_flags = d_tree.box_flags & (255 - box_flags_enum.HAS_OWN_SOURCES) + local_box_flags = \ + local_tree.box_flags.device & (255 - box_flags_enum.HAS_OWN_SOURCES) local_box_flags = local_box_flags & (255 - box_flags_enum.HAS_CHILD_SOURCES) modify_own_sources_knl = cl.elementwise.ElementwiseKernel( @@ -115,16 +115,16 @@ def generate_local_travs( ) ) - modify_own_sources_knl(d_tree.responsible_boxes_list, local_box_flags) - modify_child_sources_knl(d_tree.ancestor_mask, local_box_flags) + modify_own_sources_knl(local_tree.responsible_boxes_list.device, local_box_flags) + modify_child_sources_knl(local_tree.ancestor_mask.device, local_box_flags) d_local_trav, _ = traversal_builder( - queue, d_tree, + queue, local_tree.to_device(queue), box_bounding_box=box_bounding_box, local_box_flags=local_box_flags ) - if merge_close_lists and d_tree.targets_have_extent: + if merge_close_lists and local_tree.targets_have_extent: d_local_trav = d_local_trav.merge_close_lists(queue) local_trav = d_local_trav.get(queue=queue) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 92152cd..2fdd055 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -598,6 +598,9 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, # }}} + local_tree = local_tree.to_host_device_array(queue) + local_tree.with_queue(None) + logger.info("Generate local tree on rank {} in {} sec.".format( mpi_rank, str(time.time() - start_time) )) diff --git a/boxtree/tools.py b/boxtree/tools.py index dceb720..08fc2d5 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -304,11 +304,14 @@ class DeviceDataRecord(Record): def get(self, queue, **kwargs): """Return a copy of `self` in which all data lives on the host, i.e. - all :class:`pyopencl.array.Array` objects are replaced by corresponding - :class:`numpy.ndarray` instances on the host. + all :class:`pyopencl.array.Array` and :class:`ImmutableHostDeviceArray` + objects are replaced by corresponding :class:`numpy.ndarray` instances on the + host. """ - def try_get(attr): + if isinstance(attr, ImmutableHostDeviceArray): + return attr.host + try: get_meth = attr.get except AttributeError: @@ -339,7 +342,7 @@ class DeviceDataRecord(Record): return self._transform_arrays(try_with_queue) def to_device(self, queue, exclude_fields=frozenset()): - """ Return a copy of `self` in all :class:`numpy.ndarray` arrays are + """Return a copy of `self` in all :class:`numpy.ndarray` arrays are transferred to device memory as :class:`pyopencl.array.Array` objects. :arg exclude_fields: a :class:`frozenset` containing fields excluding from @@ -349,10 +352,32 @@ class DeviceDataRecord(Record): def _to_device(attr): if isinstance(attr, np.ndarray): return cl.array.to_device(queue, attr).with_queue(None) + elif isinstance(attr, ImmutableHostDeviceArray): + return attr.device + else: + return attr + + return self._transform_arrays(_to_device, exclude_fields=exclude_fields) + + def to_host_device_array(self, queue, exclude_fields=frozenset()): + """Return a copy of `self` where all device and host arrays are transformed + to :class:`ImmutableHostDeviceArray` objects. + + :arg exclude_fields: a :class:`frozenset` containing fields excluding from + transformed to :class:`ImmutableHostDeviceArray`. + """ + def _to_host_device_array(attr): + if isinstance(attr, np.ndarray): + return ImmutableHostDeviceArray(queue, attr) + if isinstance(attr, cl.array.Array): + host_array = attr.get(queue=queue) + return ImmutableHostDeviceArray(queue, host_array) else: return attr - return self._transform_arrays(_to_device, exclude_fields) + return self._transform_arrays( + _to_host_device_array, exclude_fields=exclude_fields + ) # }}} @@ -1059,4 +1084,44 @@ def run_mpi(script, num_processes, env): # }}} +# {{{ HostDeviceArray + +class ImmutableHostDeviceArray: + def __init__(self, queue, host_array): + self.queue = queue + self.host_array = host_array + self.device_array = None + + def with_queue(self, queue): + self.queue = queue + + @property + def svm_capable(self): + svm_capabilities = self.queue.device.get_info(cl.device_info.SVM_CAPABILITIES) + if svm_capabilities & cl.device_svm_capabilities.FINE_GRAIN_BUFFER != 0: + return True + else: + return False + + @property + def host(self): + return self.host_array + + @property + def device(self): + if self.device_array is None: + # @TODO: Make SVM works with ElementwiseKernel + # if self.svm_capable: + # self.device_array = cl.SVM(self.host_array) + # else: + # self.device_array = cl.array.to_device(self.queue, self.host_array) + self.device_array = cl.array.to_device(self.queue, self.host_array) + + # cl.Array(data=cl.SVM(...)) + + self.device_array.with_queue(self.queue) + return self.device_array + +# }}} + # vim: foldmethod=marker:filetype=pyopencl diff --git a/boxtree/traversal.py b/boxtree/traversal.py index 78035ee..5639dba 100644 --- a/boxtree/traversal.py +++ b/boxtree/traversal.py @@ -1722,6 +1722,18 @@ class FMMTraversalInfo(DeviceDataRecord): queue, frozenset(exclude_fields) ) + def to_host_device_array(self, queue, exclude_fields=frozenset()): + exclude_fields = set(exclude_fields) + exclude_fields.add("level_start_source_box_nrs") + exclude_fields.add("level_start_target_box_nrs") + exclude_fields.add("level_start_target_or_target_parent_box_nrs") + exclude_fields.add("level_start_source_parent_box_nrs") + exclude_fields.add("tree") + + return super(FMMTraversalInfo, self).to_host_device_array( + queue, frozenset(exclude_fields) + ) + # }}} diff --git a/boxtree/tree.py b/boxtree/tree.py index aa76fe8..15973fb 100644 --- a/boxtree/tree.py +++ b/boxtree/tree.py @@ -398,6 +398,13 @@ class Tree(DeviceDataRecord): return super(Tree, self).to_device(queue, frozenset(exclude_fields)) + def to_host_device_array(self, queue, exclude_fields=frozenset()): + # level_start_box_nrs should remain in host memory + exclude_fields = set(exclude_fields) + exclude_fields.add("level_start_box_nrs") + + return super(Tree, self).to_host_device_array(queue, frozenset(exclude_fields)) + # }}} diff --git a/test/test_distributed.py b/test/test_distributed.py index c6e202e..c7c5770 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -18,12 +18,25 @@ logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING")) logging.getLogger("boxtree.distributed").setLevel(logging.INFO) +def set_cache_dir(comm): + """Make each rank use a differnt cache location to avoid conflict. + """ + from pathlib import Path + from mpi4py import MPI + if "XDG_CACHE_HOME" in os.environ: + cache_home = Path(os.environ["XDG_CACHE_HOME"]) + else: + cache_home = Path.home() / ".cache" + os.environ["XDG_CACHE_HOME"] = str(cache_home / str(comm.Get_rank())) + + def _test_against_shared(dims, nsources, ntargets, dtype): from mpi4py import MPI # Get the current rank comm = MPI.COMM_WORLD rank = comm.Get_rank() + set_cache_dir(comm) # Initialize arguments for worker processes tree = None @@ -146,6 +159,7 @@ def _test_constantone(dims, nsources, ntargets, dtype): # Get the current rank comm = MPI.COMM_WORLD rank = comm.Get_rank() + set_cache_dir(comm) # Initialization tree = None -- GitLab From 16865d464a405cabb16f12ae06584ec32aed8999 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 18 Nov 2020 23:07:51 -0800 Subject: [PATCH 214/260] Add documentation page for the distributed implementation --- boxtree/distributed/__init__.py | 80 +++++++++++++++++++++++++- boxtree/distributed/local_traversal.py | 12 ++++ boxtree/distributed/local_tree.py | 21 ++++++- boxtree/distributed/partition.py | 27 ++++----- boxtree/tools.py | 4 +- boxtree/tree.py | 3 +- doc/distributed.rst | 19 +----- test/test_distributed.py | 1 - 8 files changed, 127 insertions(+), 40 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 69ea471..168b02f 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -23,6 +23,84 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +__doc__ = """ +High-level Point FMM Interface +------------------------------ + +To perform point-FMM, first construct a +:class:`boxtree.distributed.DistributedFMMRunner` object. The constructor will +distribute the necessary information from the root rank to all worker ranks. Then, +the :meth:`boxtree.distributed.DistributedFMMRunner.drive_dfmm` can be used for +launching FMM. + +.. autoclass:: boxtree.distributed.DistributedFMMRunner + + .. automethod:: drive_dfmm + +Distributed Algorithm Overview +------------------------------ + +1. Construct the global tree and traversal lists on root rank and broadcast to all + worker ranks. +2. Partition boxes into disjoint sets, where the number of sets is the number of MPI + ranks. (See :ref:`partition-boxes`) +3. Each rank constructs the local tree and traversal lists independently, according + to the partition. (See :ref:`construct-local-tree-traversal`) +4. Distribute source weights from the root rank to all worker ranks. (See + :ref:`distribute-source-weights`) +5. Each rank independently forms multipole expansions from the leaf nodes of the + local tree and propagates the partial multipole expansions upwards. +6. Communicate multipole expansions so that all ranks have the complete multipole + expansions needed. +7. Each ranks indepedently forms local expansions, propagates the local expansions + downwards, and evaluate potentials of target points in its partition. The + calculated potentials are then assembled on the root rank. + +For step 5-7, see :ref:`distributed-fmm-evaluation`. + +Note that step 4-7 may be repeated multiple times with the same tree and traversal +object built from step 1-3. For example, when solving a PDE, step 4-7 is executed +for each GMRES iteration. + +The next sections will cover the interfaces of these steps. + +.. _partition-boxes: + +Partition Boxes +--------------- + +.. autofunction:: boxtree.distributed.partition.partition_work + +.. autofunction:: boxtree.distributed.partition.get_boxes_mask + +.. _construct-local-tree-traversal: + +Construct Local Tree and Traversal +---------------------------------- + +.. autofunction:: boxtree.distributed.local_tree.generate_local_tree + +.. autofunction:: boxtree.distributed.local_traversal.generate_local_travs + +.. _distribute-source-weights: + +Distribute source weights +------------------------- + +.. autofunction:: boxtree.distributed.calculation.DistributedExpansionWrangler\ +.distribute_source_weights + +.. _distributed-fmm-evaluation: + +Distributed FMM Evaluation +-------------------------- + +The distributed version of the FMM evaluation shares the same interface as the +shared-memory version. To evaluate FMM in distributed manner, set ``distributed`` to +``True`` in :func:`boxtree.fmm.drive_fmm`. + +""" + from mpi4py import MPI import numpy as np from boxtree.cost import FMMCostModel @@ -145,7 +223,7 @@ class DistributedFMMRunner(object): from boxtree.distributed.local_tree import generate_local_tree self.local_tree, self.src_idx, self.tgt_idx = generate_local_tree( - queue, self.global_trav, responsible_boxes_list + queue, self.global_trav, responsible_boxes_list[mpi_rank] ) # }}} diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py index d4e90bd..78ff56f 100644 --- a/boxtree/distributed/local_traversal.py +++ b/boxtree/distributed/local_traversal.py @@ -35,10 +35,22 @@ logger = logging.getLogger(__name__) def generate_local_travs( queue, local_tree, traversal_builder, box_bounding_box=None, merge_close_lists=False): + """Generate local traversal from local tree. + + :arg queue: a :class:`pyopencl.CommandQueue` object. + :arg local_tree: the local tree of + :class:`boxtree.tools.ImmutableHostDeviceArray` on which the local traversal + object will be constructed. + :arg traversal_builder: a function, taken a :class:`pyopencl.CommandQueue` and + a tree, returns the traversal object based on the tree. + + :return: generated local traversal object in host memory + """ start_time = time.time() local_tree.with_queue(queue) + # TODO: Maybe move the logic here to local tree construction? # Modify box flags for targets from boxtree import box_flags_enum box_flag_t = dtype_to_ctype(box_flags_enum.dtype) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 2fdd055..8b0ae9d 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -520,6 +520,21 @@ class LocalTree(Tree): def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm=MPI.COMM_WORLD): + """Generate the local tree for the current rank. + + :arg queue: a :class:`pyopencl.CommandQueue` object. + :arg global_traversal: Global :class:`boxtree.traversal.FMMTraversalInfo` object + on host memory. + :arg responsible_boxes_list: a :class:`numpy.ndarray` object containing the + responsible boxes of the current rank. + + :return: a tuple of ``(local_tree, src_idx, tgt_idx)``, where ``local_tree`` is + a :class:`boxtree.tools.ImmutableHostDeviceArray` of generated local tree, + ``src_idx`` is the indices of the local sources in the global tree, and + ``tgt_idx`` is the indices of the local targets in the global tree. + ``src_idx`` and ``tgt_idx`` are needed for distributing source weights from + root rank and assembling calculated potentials on the root rank. + """ global_tree = global_traversal.tree # Get MPI information @@ -530,10 +545,10 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, from boxtree.distributed.partition import get_boxes_mask (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, box_mpole_is_used) = \ - get_boxes_mask(queue, global_traversal, responsible_boxes_list[mpi_rank]) + get_boxes_mask(queue, global_traversal, responsible_boxes_list) local_tree = global_tree.copy( - responsible_boxes_list=responsible_boxes_list[mpi_rank], + responsible_boxes_list=responsible_boxes_list, ancestor_mask=ancestor_boxes.get(), box_to_user_starts=None, box_to_user_lists=None, @@ -550,7 +565,7 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, global_tree, src_boxes_mask, responsible_boxes_mask, - local_tree, + local_tree ) local_tree._dimensions = local_tree.dimensions diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 38e56fa..f60385f 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -39,8 +39,8 @@ def partition_work(boxes_time, traversal, total_rank): :arg boxes_time: The expected running time of each box. :arg traversal: The traversal object built on root containing all particles. :arg total_rank: The total number of ranks. - :return: A numpy array of shape (total_rank,), where the ith element is an numpy - array containing the responsible boxes of process i. + :return: A numpy array of shape ``(total_rank,)``, where the ith element is an + numpy array containing the responsible boxes of process i. """ tree = traversal.tree @@ -263,19 +263,16 @@ def get_multipole_boxes_mask( def get_boxes_mask(queue, traversal, responsible_boxes_list): - """Given a list of responsible boxes for a process, this helper function - calculates the following four masks: - - responsible_box_mask: Current process will evaluate target potentials and - multipole expansions in these boxes. Sources and targets in these boxes - are needed. - - ancestor_boxes_mask: The the ancestor of the responsible boxes. - - src_boxes_mask: Current process needs sources but not targets in these boxes. - - multipole_boxes_mask: Current process needs multipole expressions in these - boxes. + """Given the responsible boxes for a rank, this helper function calculates the + following four masks: + + * responsible_box_mask: Current process will evaluate target potentials and + multipole expansions in these boxes. Sources and targets in these boxes + are needed. + * ancestor_boxes_mask: The the ancestor of the responsible boxes. + * src_boxes_mask: Current process needs sources but not targets in these boxes. + * multipole_boxes_mask: Current process needs multipole expressions in these + boxes. :arg responsible_boxes_list: A numpy array of responsible box indices. diff --git a/boxtree/tools.py b/boxtree/tools.py index 08fc2d5..e1f8588 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -1084,6 +1084,7 @@ def run_mpi(script, num_processes, env): # }}} + # {{{ HostDeviceArray class ImmutableHostDeviceArray: @@ -1097,7 +1098,8 @@ class ImmutableHostDeviceArray: @property def svm_capable(self): - svm_capabilities = self.queue.device.get_info(cl.device_info.SVM_CAPABILITIES) + svm_capabilities = \ + self.queue.device.get_info(cl.device_info.SVM_CAPABILITIES) if svm_capabilities & cl.device_svm_capabilities.FINE_GRAIN_BUFFER != 0: return True else: diff --git a/boxtree/tree.py b/boxtree/tree.py index 15973fb..ea8e5d6 100644 --- a/boxtree/tree.py +++ b/boxtree/tree.py @@ -403,7 +403,8 @@ class Tree(DeviceDataRecord): exclude_fields = set(exclude_fields) exclude_fields.add("level_start_box_nrs") - return super(Tree, self).to_host_device_array(queue, frozenset(exclude_fields)) + return super(Tree, self).to_host_device_array( + queue, frozenset(exclude_fields)) # }}} diff --git a/doc/distributed.rst b/doc/distributed.rst index b85811f..a9d90cb 100644 --- a/doc/distributed.rst +++ b/doc/distributed.rst @@ -1,21 +1,4 @@ Distributed Computation ======================= -High-level interface --------------------- - -To perform stardard point-FMM, first construct a -:class:`boxtree.distributed.DistributedFMMRunner` object. The constructor will -distribute the necessary information from the root rank to all worker ranks. Then, -the :meth:`boxtree.distributed.DistributedFMMRunner.drive_dfmm` can be used for -launching FMM. - -.. autoclass:: boxtree.distributed.DistributedFMMRunner - - .. automethod:: drive_dfmm - -FMM Computation ---------------- - -.. autoclass:: boxtree.distributed.calculation.DistributedExpansionWrangler - :members: +.. automodule:: boxtree.distributed.__init__ diff --git a/test/test_distributed.py b/test/test_distributed.py index c7c5770..3b08fbc 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -22,7 +22,6 @@ def set_cache_dir(comm): """Make each rank use a differnt cache location to avoid conflict. """ from pathlib import Path - from mpi4py import MPI if "XDG_CACHE_HOME" in os.environ: cache_home = Path(os.environ["XDG_CACHE_HOME"]) else: -- GitLab From 6c7ec7cd38cb45345fed88f257871e15c164c11d Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 10 Mar 2021 00:24:52 -0800 Subject: [PATCH 215/260] Fix doc CI failure --- boxtree/distributed/local_traversal.py | 4 ++-- boxtree/distributed/local_tree.py | 6 +++--- boxtree/tools.py | 25 +++++++++++++------------ 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py index 78ff56f..cc7c7dc 100644 --- a/boxtree/distributed/local_traversal.py +++ b/boxtree/distributed/local_traversal.py @@ -38,8 +38,8 @@ def generate_local_travs( """Generate local traversal from local tree. :arg queue: a :class:`pyopencl.CommandQueue` object. - :arg local_tree: the local tree of - :class:`boxtree.tools.ImmutableHostDeviceArray` on which the local traversal + :arg local_tree: the local tree of class + `boxtree.tools.ImmutableHostDeviceArray` on which the local traversal object will be constructed. :arg traversal_builder: a function, taken a :class:`pyopencl.CommandQueue` and a tree, returns the traversal object based on the tree. diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 081152c..74ad462 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -533,9 +533,9 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, responsible boxes of the current rank. :return: a tuple of ``(local_tree, src_idx, tgt_idx)``, where ``local_tree`` is - a :class:`boxtree.tools.ImmutableHostDeviceArray` of generated local tree, - ``src_idx`` is the indices of the local sources in the global tree, and - ``tgt_idx`` is the indices of the local targets in the global tree. + an object with class `boxtree.tools.ImmutableHostDeviceArray` of generated + local tree, ``src_idx`` is the indices of the local sources in the global + tree, and ``tgt_idx`` is the indices of the local targets in the global tree. ``src_idx`` and ``tgt_idx`` are needed for distributing source weights from root rank and assembling calculated potentials on the root rank. """ diff --git a/boxtree/tools.py b/boxtree/tools.py index f8d7206..bc69463 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -304,9 +304,8 @@ class DeviceDataRecord(Record): def get(self, queue, **kwargs): """Return a copy of `self` in which all data lives on the host, i.e. - all :class:`pyopencl.array.Array` and :class:`ImmutableHostDeviceArray` - objects are replaced by corresponding :class:`numpy.ndarray` instances on the - host. + all :class:`pyopencl.array.Array` and `ImmutableHostDeviceArray` objects are + replaced by corresponding :class:`numpy.ndarray` instances on the host. """ def try_get(attr): if isinstance(attr, ImmutableHostDeviceArray): @@ -361,10 +360,10 @@ class DeviceDataRecord(Record): def to_host_device_array(self, queue, exclude_fields=frozenset()): """Return a copy of `self` where all device and host arrays are transformed - to :class:`ImmutableHostDeviceArray` objects. + to `ImmutableHostDeviceArray` objects. :arg exclude_fields: a :class:`frozenset` containing fields excluding from - transformed to :class:`ImmutableHostDeviceArray`. + transformed to `ImmutableHostDeviceArray`. """ def _to_host_device_array(attr): if isinstance(attr, np.ndarray): @@ -1092,6 +1091,14 @@ def run_mpi(script, num_processes, env): # {{{ HostDeviceArray class ImmutableHostDeviceArray: + """Interface for arrays on both host and device. + + .. note:: This interface assumes the array is immutable. The behavior of + modifying the content of either the host array or the device array is undefined. + + @TODO: Once available, replace this implementation with PyOpenCL's in-house + implementation. + """ def __init__(self, queue, host_array): self.queue = queue self.host_array = host_array @@ -1116,15 +1123,9 @@ class ImmutableHostDeviceArray: @property def device(self): if self.device_array is None: - # @TODO: Make SVM works with ElementwiseKernel - # if self.svm_capable: - # self.device_array = cl.SVM(self.host_array) - # else: - # self.device_array = cl.array.to_device(self.queue, self.host_array) + # @TODO: Use SVM self.device_array = cl.array.to_device(self.queue, self.host_array) - # cl.Array(data=cl.SVM(...)) - self.device_array.with_queue(self.queue) return self.device_array -- GitLab From 500a89b73699778316eecb0c45eb7a95f3c0d2bd Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 11 Mar 2021 00:23:31 -0800 Subject: [PATCH 216/260] Address reviewer's comments --- boxtree/distributed/__init__.py | 11 ++- boxtree/distributed/calculation.py | 17 +---- boxtree/distributed/local_tree.py | 2 +- boxtree/distributed/partition.py | 18 ++--- boxtree/fmm.py | 106 +++++++++++++++++------------ boxtree/tools.py | 5 +- boxtree/traversal.py | 31 ++++----- test/test_distributed.py | 53 ++++++++++++--- test/test_tools.py | 1 - 9 files changed, 136 insertions(+), 108 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 168b02f..67ffa95 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -40,8 +40,8 @@ launching FMM. Distributed Algorithm Overview ------------------------------ -1. Construct the global tree and traversal lists on root rank and broadcast to all - worker ranks. +1. Construct the global tree and traversal lists on the root rank and broadcast to + all worker ranks. 2. Partition boxes into disjoint sets, where the number of sets is the number of MPI ranks. (See :ref:`partition-boxes`) 3. Each rank constructs the local tree and traversal lists independently, according @@ -264,10 +264,9 @@ class DistributedFMMRunner(object): return drive_fmm( self.local_trav.get(None), self.local_wrangler, source_weights, timing_data=timing_data, - distributed=True, - global_wrangler=self.global_wrangler, - src_idx_all_ranks=self.src_idx_all_ranks, - tgt_idx_all_ranks=self.tgt_idx_all_ranks, comm=self.comm, + global_wrangler=self.global_wrangler, + global_src_idx_all_ranks=self.src_idx_all_ranks, + global_tgt_idx_all_ranks=self.tgt_idx_all_ranks, _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce ) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index e07a8e1..f5aeb29 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -52,21 +52,6 @@ class DistributedExpansionWrangler: def distribute_source_weights( self, src_weight_vecs, src_idx_all_ranks, comm=MPI.COMM_WORLD): - """This method transfers needed source_weights from root rank to each worker - rank in communicator *comm*. - - This method needs to be called collectively by all ranks in communicator - *comm*. - - :arg src_weight_vecs: a sequence of :class:`numpy.ndarray` with shape - ``(nsources,)`` representing the weights of sources on the root rank. - ``None`` on worker ranks. - :arg src_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the - ith entry is a :class:`numpy.ndarray` indexed into *source_weights* to be - sent from the root rank to rank *i*. Each entry can be generated by - *generate_local_tree*. ``None`` on worker ranks. - :return: The received source weights of the current rank. - """ mpi_rank = comm.Get_rank() mpi_size = comm.Get_size() @@ -182,7 +167,7 @@ class DistributedExpansionWrangler: :arg box_to_user_lists: a :class:`pyopencl.array.Array` object storing the users of each box in *contributing_boxes_list*. :returns: a :class:`pyopencl.array.Array` object with the same shape as - *contributing_boxes_list*, where the ith entry is 1 if + *contributing_boxes_list*, where the i-th entry is 1 if ``contributing_boxes_list[i]`` is used by at least on box in the subrange specified. """ diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 74ad462..cb8fa72 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -578,7 +578,7 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, local_tree.__class__ = LocalTree - # {{{ compute the users of multipole expansions of each box on root rank + # {{{ compute the users of multipole expansions of each box on the root rank box_mpole_is_used_all_ranks = None if mpi_rank == 0: diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index f60385f..6f13d9e 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -39,7 +39,7 @@ def partition_work(boxes_time, traversal, total_rank): :arg boxes_time: The expected running time of each box. :arg traversal: The traversal object built on root containing all particles. :arg total_rank: The total number of ranks. - :return: A numpy array of shape ``(total_rank,)``, where the ith element is an + :return: A numpy array of shape ``(total_rank,)``, where the i-th element is an numpy array containing the responsible boxes of process i. """ tree = traversal.tree @@ -129,9 +129,9 @@ def get_ancestor_boxes_mask(queue, traversal, responsible_boxes_mask): """Query the ancestors of responsible boxes. :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape - ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is a responsible box. + ``(tree.nboxes,)`` whose i-th entry is 1 if ``i`` is a responsible box. :return: A :class:`pyopencl.array.Array` object of shape ``(tree.nboxes,)`` whose - ith entry is 1 if ``i`` is an ancestor of the responsible boxes specified by + i-th entry is 1 if ``i`` is an ancestor of the responsible boxes specified by *responsible_boxes_mask*. """ ancestor_boxes = cl.array.zeros(queue, (traversal.tree.nboxes,), dtype=np.int8) @@ -157,12 +157,12 @@ def get_src_boxes_mask( of boxes represented by *responsible_boxes_mask*. :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape - ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is a responsible box. + ``(tree.nboxes,)`` whose i-th entry is 1 if ``i`` is a responsible box. :param ancestor_boxes_mask: A :class:`pyopencl.array.Array` object of shape - ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is either a responsible box + ``(tree.nboxes,)`` whose i-th entry is 1 if ``i`` is either a responsible box or an ancestor of the responsible boxes. :return: A :class:`pyopencl.array.Array` object of shape ``(tree.nboxes,)`` whose - ith entry is 1 if souces of box ``i`` are needed for evaluating the + i-th entry is 1 if souces of box ``i`` are needed for evaluating the potentials of targets in boxes represented by *responsible_boxes_mask*. """ src_boxes_mask = responsible_boxes_mask.copy() @@ -220,12 +220,12 @@ def get_multipole_boxes_mask( potentials of targets in boxes represented by *responsible_boxes_mask*. :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape - ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is a responsible box. + ``(tree.nboxes,)`` whose i-th entry is 1 if ``i`` is a responsible box. :arg ancestor_boxes_mask: A :class:`pyopencl.array.Array` object of shape - ``(tree.nboxes,)`` whose ith entry is 1 if ``i`` is either a responsible box + ``(tree.nboxes,)`` whose i-th entry is 1 if ``i`` is either a responsible box or an ancestor of the responsible boxes. :return: A :class:`pyopencl.array.Array` object of shape ``(tree.nboxes,)`` - whose ith entry is 1 if multipoles of box ``i`` are needed for evaluating + whose i-th entry is 1 if multipoles of box ``i`` are needed for evaluating the potentials of targets in boxes represented by *responsible_boxes_mask*. """ diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 9608a31..edd3101 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -34,21 +34,13 @@ except ImportError: from collections import Mapping -try: - from mpi4py import MPI - from boxtree.distributed import dtype_to_mpi - from boxtree.distributed import MPITags -except ImportError: - pass - - import numpy as np from pytools import ProcessLogger def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None, - distributed=False, global_wrangler=None, - src_idx_all_ranks=None, tgt_idx_all_ranks=None, comm=None, + comm=None, global_wrangler=None, + global_src_idx_all_ranks=None, global_tgt_idx_all_ranks=None, _communicate_mpoles_via_allreduce=False): """Top-level driver routine for a fast multipole calculation. @@ -60,8 +52,9 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None, Nonetheless, many common applications (such as point-to-point FMMs) can be covered by supplying the right *expansion_wrangler* to this routine. - For distributed implementation, this function needs to be called collectively by - all ranks in *comm*. + To enable distributed computation, set *comm* to a valid MPI communicator, and + call this function collectively for all ranks in *comm*. The distributed + implementation requires mpi4py. :arg traversal: A :class:`boxtree.traversal.FMMTraversalInfo` instance. For distributed implementation, this argument should be the local traversal @@ -75,25 +68,20 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None, :arg timing_data: Either *None*, or a :class:`dict` that is populated with timing information for the stages of the algorithm (in the form of :class:`TimingResult`), if such information is available. - :arg distributed: Whether to run the driver in a distributed manner. - (Require mpi4py). :arg global_wrangler: An object exhibiting the :class:`ExpansionWranglerInterface`. This wrangler should reference the global tree, which is used for assembling partial results from worker ranks together. This argument is only significant for distributed implementation and on the root rank. - :arg src_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the ith - entry is a :class:`numpy.ndarray` representing the source indices of the - local tree on rank *i*. Each entry can be returned from - *generate_local_tree*. This argument is significant only on root rank. - :arg tgt_idx_all_ranks: a :class:`list` with shape ``(nranks,)``, where the ith - entry is a :class:`numpy.ndarray` representing the target indices of the - local tree on rank *i*. Each entry can be returned from - *generate_local_tree*. This argument is significant only on root rank. + :arg global_src_idx_all_ranks: a :class:`list` of length ``nranks``, where the + i-th entry is a :class:`numpy.ndarray` representing the global indices of + sources in the local tree on rank *i*. Each entry can be returned from + *generate_local_tree*. This argument is significant only on the root rank. + :arg global_tgt_idx_all_ranks: a :class:`list` of length ``nranks``, where the + i-th entry is a :class:`numpy.ndarray` representing the global indices of + targets in the local tree on rank *i*. Each entry can be returned from + *generate_local_tree*. This argument is significant only on the root rank. :arg comm: MPI communicator. Default to ``MPI_COMM_WORLD``. - :arg _communicate_mpoles_via_allreduce: whether to use MPI allreduce for - communicating multipole expressions. Using MPI allreduce is slower but might - be helpful for debugging purpose. :return: the potentials computed by *expansion_wrangler*. For the distributed implementation, the potentials are gathered and returned on the root rank; @@ -107,23 +95,18 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None, fmm_proc = ProcessLogger(logger, "fmm") recorder = TimingRecorder() - if distributed: - # Get MPI information - if comm is None: - comm = MPI.COMM_WORLD - mpi_rank = comm.Get_rank() - mpi_size = comm.Get_size() - - if not distributed: + if comm is None: src_weight_vecs = [wrangler.reorder_sources(weight) for weight in src_weight_vecs] else: + mpi_rank = comm.Get_rank() + mpi_size = comm.Get_size() if mpi_rank == 0: src_weight_vecs = [global_wrangler.reorder_sources(weight) for weight in src_weight_vecs] src_weight_vecs = wrangler.distribute_source_weights( - src_weight_vecs, src_idx_all_ranks, comm=comm + src_weight_vecs, global_src_idx_all_ranks, comm=comm ) # {{{ "Step 2.1:" Construct local multipoles @@ -150,8 +133,10 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None, # }}} - if distributed: + if comm is not None: if _communicate_mpoles_via_allreduce: + # Use MPI allreduce for communicating multipole expressions. It is slower + # but might be helpful for debugging purposes. mpole_exps_all = np.zeros_like(mpole_exps) comm.Allreduce(mpole_exps, mpole_exps_all) mpole_exps = mpole_exps_all @@ -272,20 +257,29 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None, # {{{ Worker ranks send calculated potentials to the root rank - if distributed: + if comm is not None: + from boxtree.distributed import dtype_to_mpi + from boxtree.distributed import MPITags + from mpi4py import MPI + potentials_mpi_type = dtype_to_mpi(potentials.dtype) if mpi_rank == 0: potentials_all_ranks = np.empty((mpi_size,), dtype=object) potentials_all_ranks[0] = potentials + recv_reqs = [] + for irank in range(1, mpi_size): potentials_all_ranks[irank] = np.empty( - tgt_idx_all_ranks[irank].shape, dtype=potentials.dtype + global_tgt_idx_all_ranks[irank].shape, dtype=potentials.dtype ) - comm.Recv([potentials_all_ranks[irank], potentials_mpi_type], - source=irank, tag=MPITags["GATHER_POTENTIALS"]) + recv_reqs.append( + comm.Irecv([potentials_all_ranks[irank], potentials_mpi_type], + source=irank, tag=MPITags["GATHER_POTENTIALS"])) + + MPI.Request.Waitall(recv_reqs) else: comm.Send([potentials, potentials_mpi_type], dest=0, tag=MPITags["GATHER_POTENTIALS"]) @@ -294,15 +288,15 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None, # {{{ Assemble potentials from worker ranks together on the root rank - if distributed and mpi_rank == 0: + if comm is not None and mpi_rank == 0: potentials = np.empty(global_wrangler.tree.ntargets, dtype=potentials.dtype) for irank in range(mpi_size): - potentials[tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] + potentials[global_tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] # }}} - if distributed: + if comm is not None: result = None if mpi_rank == 0: result = global_wrangler.reorder_potentials(potentials) @@ -464,6 +458,24 @@ class ExpansionWranglerInterface: because some derived FMMs (notably the QBX FMM) do their own reordering. """ + def distribute_source_weights(self, src_weight_vecs, src_idx_all_ranks, comm): + """Used for the distributed implementation. This method transfers needed + source weights from root rank to each worker rank in communicator *comm*. + + This method needs to be called collectively by all ranks in communicator + *comm*. + + :arg src_weight_vecs: a sequence of :class:`numpy.ndarray` of length + ``nsources``, representing the weights of sources on the root rank. + ``None`` on worker ranks. + :arg src_idx_all_ranks: a :class:`list` of length ``nranks``, where the + i-th entry is a :class:`numpy.ndarray` indexed into *source_weights* to + be sent from the root rank to rank *i*. Each entry can be generated by + *generate_local_tree*. ``None`` on worker ranks. + + :return: Received source weights of the current rank. + """ + # }}} @@ -473,7 +485,15 @@ class TimingResult(Mapping): """Interface for returned timing data. This supports accessing timing results via a mapping interface, along with - combining results via :meth:`merge`. + combining results via :meth:`merge` or via add operator. For example, + + .. code-block:: python + + t0 = TimingResult(a=1, b=2) + t1 = TimingResult(a=3, b=4) + t2 = t0 + t1 + t3 = t0.merge(t1) # t3 contains the same content as t2 + t2["a"] # return 4 .. automethod:: merge """ diff --git a/boxtree/tools.py b/boxtree/tools.py index bc69463..7767914 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -41,7 +41,6 @@ import sys # Use offsets in VectorArg by default. VectorArg = partial(_VectorArg, with_offset=True) -ScalarArg = ScalarArg AXIS_NAMES = ("x", "y", "z", "w") @@ -711,8 +710,6 @@ class MaskCompressorKernel(object): @memoize_method def get_matrix_compressor_kernel(self, mask_dtype, list_dtype): from pyopencl.algorithm import ListOfListsBuilder - # Reimport VectorArg to use default with_offset - from pyopencl.tools import VectorArg return ListOfListsBuilder( self.context, @@ -722,7 +719,7 @@ class MaskCompressorKernel(object): ScalarArg(np.int32, "ncols"), ScalarArg(np.int32, "outer_stride"), ScalarArg(np.int32, "inner_stride"), - VectorArg(mask_dtype, "mask"), + _VectorArg(mask_dtype, "mask"), ], name_prefix="compress_matrix") diff --git a/boxtree/traversal.py b/boxtree/traversal.py index 530b404..87ffa9f 100644 --- a/boxtree/traversal.py +++ b/boxtree/traversal.py @@ -1709,30 +1709,27 @@ class FMMTraversalInfo(DeviceDataRecord): return len(self.target_or_target_parent_boxes) def to_device(self, queue, exclude_fields=frozenset()): - exclude_fields = set(exclude_fields) - exclude_fields.add("level_start_source_box_nrs") - exclude_fields.add("level_start_target_box_nrs") - exclude_fields.add("level_start_target_or_target_parent_box_nrs") - exclude_fields.add("level_start_source_parent_box_nrs") - exclude_fields.add("tree") + exclude_fields = exclude_fields | { + "level_start_source_box_nrs", + "level_start_target_box_nrs", + "level_start_target_or_target_parent_box_nrs", + "level_start_source_parent_box_nrs", + "tree"} self.tree = self.tree.to_device(queue) - return super(FMMTraversalInfo, self).to_device( - queue, frozenset(exclude_fields) - ) + return super(FMMTraversalInfo, self).to_device(queue, exclude_fields) def to_host_device_array(self, queue, exclude_fields=frozenset()): - exclude_fields = set(exclude_fields) - exclude_fields.add("level_start_source_box_nrs") - exclude_fields.add("level_start_target_box_nrs") - exclude_fields.add("level_start_target_or_target_parent_box_nrs") - exclude_fields.add("level_start_source_parent_box_nrs") - exclude_fields.add("tree") + exclude_fields = exclude_fields | { + "level_start_source_box_nrs", + "level_start_target_box_nrs", + "level_start_target_or_target_parent_box_nrs", + "level_start_source_parent_box_nrs", + "tree"} return super(FMMTraversalInfo, self).to_host_device_array( - queue, frozenset(exclude_fields) - ) + queue, exclude_fields) # }}} diff --git a/test/test_distributed.py b/test/test_distributed.py index cc0166b..8641d06 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1,3 +1,25 @@ +__copyright__ = "Copyright (C) 2021 Hao Gao" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + import numpy as np import pyopencl as cl import numpy.linalg as la @@ -29,7 +51,8 @@ def set_cache_dir(comm): os.environ["XDG_CACHE_HOME"] = str(cache_home / str(comm.Get_rank())) -def _test_against_shared(dims, nsources, ntargets, dtype): +def _test_against_shared( + dims, nsources, ntargets, dtype, _communicate_mpoles_via_allreduce=False): from mpi4py import MPI # Get the current rank @@ -102,7 +125,8 @@ def _test_against_shared(dims, nsources, ntargets, dtype): timing_data = {} pot_dfmm = distribued_fmm_info.drive_dfmm( - [sources_weights], timing_data=timing_data + [sources_weights], timing_data=timing_data, + _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce ) assert timing_data @@ -125,12 +149,14 @@ def _test_against_shared(dims, nsources, ntargets, dtype): @pytest.mark.mpi -@pytest.mark.parametrize("num_processes, dims, nsources, ntargets", [ - (4, 3, 10000, 10000) -]) -@pytest.mark.skipif(sys.version_info < (3, 5), - reason="distributed implementation requires 3.5 or higher") -def test_against_shared(num_processes, dims, nsources, ntargets): +@pytest.mark.parametrize( + "num_processes, dims, nsources, ntargets, communicate_mpoles_via_allreduce", [ + (4, 3, 10000, 10000, True), + (4, 3, 10000, 10000, False) + ] +) +def test_against_shared( + num_processes, dims, nsources, ntargets, communicate_mpoles_via_allreduce): pytest.importorskip("mpi4py") newenv = os.environ.copy() @@ -138,6 +164,8 @@ def test_against_shared(num_processes, dims, nsources, ntargets): newenv["dims"] = str(dims) newenv["nsources"] = str(nsources) newenv["ntargets"] = str(ntargets) + newenv["communicate_mpoles_via_allreduce"] = \ + str(communicate_mpoles_via_allreduce) newenv["OMP_NUM_THREADS"] = "1" run_mpi(__file__, num_processes, newenv) @@ -209,8 +237,6 @@ def _test_constantone(dims, nsources, ntargets, dtype): @pytest.mark.parametrize("num_processes, dims, nsources, ntargets", [ (4, 3, 10000, 10000) ]) -@pytest.mark.skipif(sys.version_info < (3, 5), - reason="distributed implementation requires 3.5 or higher") def test_constantone(num_processes, dims, nsources, ntargets): pytest.importorskip("mpi4py") @@ -235,7 +261,12 @@ if __name__ == "__main__": nsources = int(os.environ["nsources"]) ntargets = int(os.environ["ntargets"]) - _test_against_shared(dims, nsources, ntargets, dtype) + from distutils.util import strtobool + _communicate_mpoles_via_allreduce = bool( + strtobool(os.environ["communicate_mpoles_via_allreduce"])) + + _test_against_shared( + dims, nsources, ntargets, dtype, _communicate_mpoles_via_allreduce) elif os.environ["PYTEST"] == "2": # Run "test_constantone" test case diff --git a/test/test_tools.py b/test/test_tools.py index f25a1d1..e0a5496 100644 --- a/test/test_tools.py +++ b/test/test_tools.py @@ -127,7 +127,6 @@ def test_masked_list_compression(ctx_getter): assert set(arr_list) == set(arr.nonzero()[0]) -import pytest from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) -- GitLab From 8fd3f044f59eb9e15cfeb838b4f46b6b6d975868 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 25 Mar 2021 00:45:57 -0500 Subject: [PATCH 217/260] Register pytest markers --- pytest.ini | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 pytest.ini diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..a146247 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +markers = + opencl: uses OpenCL + geo_lookup: test geometric lookups + area_query: test area queries -- GitLab From da5dac702af9d0bf5de41127daacf98101253b1b Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 25 Mar 2021 00:47:22 -0500 Subject: [PATCH 218/260] Expansion wranglers: do not store trees --- boxtree/fmm.py | 410 ++++++++++++-------- boxtree/pyfmmlib_integration.py | 655 +++++++++++++++++--------------- boxtree/tools.py | 90 ++--- boxtree/version.py | 2 +- doc/fmm.rst | 21 +- examples/cost_model.py | 11 +- test/test_cost_model.py | 19 +- test/test_fmm.py | 97 ++--- 8 files changed, 720 insertions(+), 585 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 557fec2..763b7a9 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -1,3 +1,15 @@ +""" +.. autofunction:: drive_fmm + +.. autoclass:: TraversalAndWrangler +.. autoclass:: ExpansionWranglerInterface + +.. autoclass:: TimingResult + +.. autoclass:: TimingFuture +""" + + __copyright__ = "Copyright (C) 2012 Andreas Kloeckner" __license__ = """ @@ -20,22 +32,236 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +from abc import ABC, abstractmethod import logging logger = logging.getLogger(__name__) +from collections.abc import Mapping -try: - # Python 3 - from collections.abc import Mapping -except ImportError: - # Python 2 - from collections import Mapping +from pytools import ProcessLogger -from pytools import ProcessLogger +# {{{ expansion wrangler interface + +class TraversalAndWrangler: + """A base class for objects specific to an implementation of the + :class:`ExpansionWranglerInterface` that may hold tree-/geometry-dependent + information. Typically, these objects can be used to host caches for such + information. Via :func:`drive_fmm`, objects of this type are supplied to + every method in the :class:`ExpansionWranglerInterface`. + + .. attribute:: dim + .. attribute:: tree + .. attribute:: traversal + .. attribute:: wrangler + """ + + def __init__(self, traversal, wrangler): + self.dim = traversal.tree.dimensions + self.traversal = traversal + self.wrangler = wrangler + + @property + def tree(self): + return self.traversal.tree + + +class ExpansionWranglerInterface(ABC): + """Abstract expansion handling interface for use with :func:`drive_fmm`. + + See this + `test code `_ + for a very simple sample implementation. + + .. note:: + + Wranglers should not hold a reference (and thereby be specific to) a + :class:`boxtree.Tree` instance. Their purpose is to host caches for + generated translation code that is reusable across trees. + It is OK for expansion wranglers to be specific to a given kernel + (or set of kernels). + + Functions that support returning timing data return a value supporting the + :class:`TimingFuture` interface. + + .. versionchanged:: 2018.1 + + Changed (a subset of) functions to return timing data. + + .. automethod:: tree_dependent_info + + .. rubric:: Array creation + + .. automethod:: multipole_expansion_zeros + .. automethod:: local_expansion_zeros + .. automethod:: output_zeros + + .. rubric:: Particle ordering + + .. automethod:: reorder_sources + .. automethod:: reorder_potentials + + .. rubric:: Translations + + .. automethod:: form_multipoles + .. automethod:: coarsen_multipoles + .. automethod:: eval_direct + .. automethod:: multipole_to_local + .. automethod:: eval_multipoles + .. automethod:: form_locals + .. automethod:: refine_locals + .. automethod:: eval_locals + .. automethod:: finalize_potentials + """ + + @abstractmethod + def multipole_expansion_zeros(self, taw: TraversalAndWrangler): + """Return an expansions array (which must support addition) + capable of holding one multipole or local expansion for every + box in the tree. + """ + + @abstractmethod + def local_expansion_zeros(self, taw: TraversalAndWrangler): + """Return an expansions array (which must support addition) + capable of holding one multipole or local expansion for every + box in the tree. + """ + + @abstractmethod + def output_zeros(self, taw: TraversalAndWrangler): + """Return a potentials array (which must support addition) capable of + holding a potential value for each target in the tree. Note that + :func:`drive_fmm` makes no assumptions about *potential* other than + that it supports addition--it may consist of potentials, gradients of + the potential, or arbitrary other per-target output data. + """ + + @abstractmethod + def reorder_sources(self, taw: TraversalAndWrangler, source_array): + """Return a copy of *source_array* in + :ref:`tree source order `. + *source_array* is in user source order. + """ + + @abstractmethod + def reorder_potentials(self, taw: TraversalAndWrangler, potentials): + """Return a copy of *potentials* in + :ref:`user target order `. + *source_weights* is in tree target order. + """ + + @abstractmethod + def form_multipoles(self, taw: TraversalAndWrangler, + level_start_source_box_nrs, source_boxes, + src_weight_vecs): + """Return an expansions array (compatible with + :meth:`multipole_expansion_zeros`) + containing multipole expansions in *source_boxes* due to sources + with *src_weight_vecs*. + All other expansions must be zero. + + :return: A pair (*mpoles*, *timing_future*). + """ + + @abstractmethod + def coarsen_multipoles(self, taw: TraversalAndWrangler, + level_start_source_parent_box_nrs, + source_parent_boxes, mpoles): + """For each box in *source_parent_boxes*, + gather (and translate) the box's children's multipole expansions in + *mpole* and add the resulting expansion into the box's multipole + expansion in *mpole*. + + :returns: A pair (*mpoles*, *timing_future*). + """ + + @abstractmethod + def eval_direct(self, taw: TraversalAndWrangler, + target_boxes, neighbor_sources_starts, + neighbor_sources_lists, src_weight_vecs): + """For each box in *target_boxes*, evaluate the influence of the + neighbor sources due to *src_weight_vecs*, which use :ref:`csr` and are + indexed like *target_boxes*. + :returns: A pair (*pot*, *timing_future*), where *pot* is a + a new potential array, see :meth:`output_zeros`. + """ + + @abstractmethod + def multipole_to_local(self, taw: TraversalAndWrangler, + level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, + starts, lists, mpole_exps): + """For each box in *target_or_target_parent_boxes*, translate and add + the influence of the multipole expansion in *mpole_exps* into a new + array of local expansions. *starts* and *lists* use :ref:`csr`, and + *starts* is indexed like *target_or_target_parent_boxes*. -def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): + :returns: A pair (*pot*, *timing_future*) where *pot* is + a new (local) expansion array, see :meth:`local_expansion_zeros`. + """ + + @abstractmethod + def eval_multipoles(self, taw: TraversalAndWrangler, + target_boxes_by_source_level, from_sep_smaller_by_level, mpole_exps): + """For a level *i*, each box in *target_boxes_by_source_level[i]*, evaluate + the multipole expansion in *mpole_exps* in the nearby boxes given in + *from_sep_smaller_by_level*, and return a new potential array. + *starts* and *lists* in *from_sep_smaller_by_level[i]* use :ref:`csr` + and *starts* is indexed like *target_boxes_by_source_level[i]*. + + :returns: A pair (*pot*, *timing_future*) where *pot* is a new potential + array, see :meth:`output_zeros`. + """ + + @abstractmethod + def form_locals(self, taw: TraversalAndWrangler, + level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, starts, lists, src_weight_vecs): + """For each box in *target_or_target_parent_boxes*, form local + expansions due to the sources in the nearby boxes given in *starts* and + *lists*, and return a new local expansion array. *starts* and *lists* + use :ref:`csr` and *starts* is indexed like + *target_or_target_parent_boxes*. + + :returns: A pair (*pot*, *timing_future*) where *pot* is a new + local expansion array, see :meth:`local_expansion_zeros`. + """ + + @abstractmethod + def refine_locals(self, taw: TraversalAndWrangler, + level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, local_exps): + """For each box in *child_boxes*, + translate the box's parent's local expansion in *local_exps* and add + the resulting expansion into the box's local expansion in *local_exps*. + + :returns: A pair (*local_exps*, *timing_future*). + """ + + @abstractmethod + def eval_locals(self, taw: TraversalAndWrangler, + level_start_target_box_nrs, target_boxes, local_exps): + """For each box in *target_boxes*, evaluate the local expansion in + *local_exps* and return a new potential array. + + :returns: A pair (*pot*, *timing_future*) where *pot* is a new potential + array, see :meth:`output_zeros`. + """ + + @abstractmethod + def finalize_potentials(self, taw: TraversalAndWrangler, potentials): + """ + Postprocess the reordered potentials. This is where global scaling + factors could be applied. This is distinct from :meth:`reorder_potentials` + because some derived FMMs (notably the QBX FMM) do their own reordering. + """ + +# }}} + + +def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): """Top-level driver routine for a fast multipole calculation. In part, this is intended as a template for custom FMMs, in the sense that @@ -46,7 +272,6 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): Nonetheless, many common applications (such as point-to-point FMMs) can be covered by supplying the right *expansion_wrangler* to this routine. - :arg traversal: A :class:`boxtree.traversal.FMMTraversalInfo` instance. :arg expansion_wrangler: An object exhibiting the :class:`ExpansionWranglerInterface`. :arg src_weight_vecs: A sequence of source 'density/weights/charges'. @@ -58,7 +283,8 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): Returns the potentials computed by *expansion_wrangler*. """ - wrangler = expansion_wrangler + wrangler = taw.wrangler + traversal = taw.traversal # Interface guidelines: Attributes of the tree are assumed to be known # to the expansion wrangler and should not be passed. @@ -66,12 +292,13 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): fmm_proc = ProcessLogger(logger, "fmm") recorder = TimingRecorder() - src_weight_vecs = [wrangler.reorder_sources(weight) for + src_weight_vecs = [wrangler.reorder_sources(taw, weight) for weight in src_weight_vecs] # {{{ "Step 2.1:" Construct local multipoles mpole_exps, timing_future = wrangler.form_multipoles( + taw, traversal.level_start_source_box_nrs, traversal.source_boxes, src_weight_vecs) @@ -83,6 +310,7 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): # {{{ "Step 2.2:" Propagate multipoles upward mpole_exps, timing_future = wrangler.coarsen_multipoles( + taw, traversal.level_start_source_parent_box_nrs, traversal.source_parent_boxes, mpole_exps) @@ -96,6 +324,7 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") potentials, timing_future = wrangler.eval_direct( + taw, traversal.target_boxes, traversal.neighbor_source_boxes_starts, traversal.neighbor_source_boxes_lists, @@ -110,6 +339,7 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local local_exps, timing_future = wrangler.multipole_to_local( + taw, traversal.level_start_target_or_target_parent_box_nrs, traversal.target_or_target_parent_boxes, traversal.from_sep_siblings_starts, @@ -128,6 +358,7 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): # contribution *out* of the downward-propagating local expansions) mpole_result, timing_future = wrangler.eval_multipoles( + taw, traversal.target_boxes_sep_smaller_by_source_level, traversal.from_sep_smaller_by_level, mpole_exps) @@ -143,6 +374,7 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): "('list 3 close')") direct_result, timing_future = wrangler.eval_direct( + taw, traversal.target_boxes, traversal.from_sep_close_smaller_starts, traversal.from_sep_close_smaller_lists, @@ -157,6 +389,7 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") local_result, timing_future = wrangler.form_locals( + taw, traversal.level_start_target_or_target_parent_box_nrs, traversal.target_or_target_parent_boxes, traversal.from_sep_bigger_starts, @@ -169,6 +402,7 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): if traversal.from_sep_close_bigger_starts is not None: direct_result, timing_future = wrangler.eval_direct( + taw, traversal.target_boxes, traversal.from_sep_close_bigger_starts, traversal.from_sep_close_bigger_lists, @@ -183,6 +417,7 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 7:" propagate local_exps downward local_exps, timing_future = wrangler.refine_locals( + taw, traversal.level_start_target_or_target_parent_box_nrs, traversal.target_or_target_parent_boxes, local_exps) @@ -194,6 +429,7 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 8:" evaluate locals local_result, timing_future = wrangler.eval_locals( + taw, traversal.level_start_target_box_nrs, traversal.target_boxes, local_exps) @@ -204,9 +440,9 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): # }}} - result = wrangler.reorder_potentials(potentials) + result = wrangler.reorder_potentials(taw, potentials) - result = wrangler.finalize_potentials(result) + result = wrangler.finalize_potentials(taw, result) fmm_proc.done() @@ -216,154 +452,6 @@ def drive_fmm(traversal, expansion_wrangler, src_weight_vecs, timing_data=None): return result -# {{{ expansion wrangler interface - -class ExpansionWranglerInterface: - """Abstract expansion handling interface for use with :func:`drive_fmm`. - - See this - `test code `_ - for a very simple sample implementation. - - Will usually hold a reference (and thereby be specific to) a - :class:`boxtree.Tree` instance. - - Functions that support returning timing data return a value supporting the - :class:`TimingFuture` interface. - - .. versionchanged:: 2018.1 - - Changed (a subset of) functions to return timing data. - """ - - def multipole_expansion_zeros(self): - """Return an expansions array (which must support addition) - capable of holding one multipole or local expansion for every - box in the tree. - """ - - def local_expansion_zeros(self): - """Return an expansions array (which must support addition) - capable of holding one multipole or local expansion for every - box in the tree. - """ - - def output_zeros(self): - """Return a potentials array (which must support addition) capable of - holding a potential value for each target in the tree. Note that - :func:`drive_fmm` makes no assumptions about *potential* other than - that it supports addition--it may consist of potentials, gradients of - the potential, or arbitrary other per-target output data. - """ - - def reorder_sources(self, source_array): - """Return a copy of *source_array* in - :ref:`tree source order `. - *source_array* is in user source order. - """ - - def reorder_potentials(self, potentials): - """Return a copy of *potentials* in - :ref:`user target order `. - *source_weights* is in tree target order. - """ - - def form_multipoles(self, level_start_source_box_nrs, source_boxes, - src_weight_vecs): - """Return an expansions array (compatible with - :meth:`multipole_expansion_zeros`) - containing multipole expansions in *source_boxes* due to sources - with *src_weight_vecs*. - All other expansions must be zero. - - :return: A pair (*mpoles*, *timing_future*). - """ - - def coarsen_multipoles(self, level_start_source_parent_box_nrs, - source_parent_boxes, mpoles): - """For each box in *source_parent_boxes*, - gather (and translate) the box's children's multipole expansions in - *mpole* and add the resulting expansion into the box's multipole - expansion in *mpole*. - - :returns: A pair (*mpoles*, *timing_future*). - """ - - def eval_direct(self, target_boxes, neighbor_sources_starts, - neighbor_sources_lists, src_weight_vecs): - """For each box in *target_boxes*, evaluate the influence of the - neighbor sources due to *src_weight_vecs*, which use :ref:`csr` and are - indexed like *target_boxes*. - - :returns: A pair (*pot*, *timing_future*), where *pot* is a - a new potential array, see :meth:`output_zeros`. - """ - - def multipole_to_local(self, - level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, - starts, lists, mpole_exps): - """For each box in *target_or_target_parent_boxes*, translate and add - the influence of the multipole expansion in *mpole_exps* into a new - array of local expansions. *starts* and *lists* use :ref:`csr`, and - *starts* is indexed like *target_or_target_parent_boxes*. - - :returns: A pair (*pot*, *timing_future*) where *pot* is - a new (local) expansion array, see :meth:`local_expansion_zeros`. - """ - - def eval_multipoles(self, - target_boxes_by_source_level, from_sep_smaller_by_level, mpole_exps): - """For a level *i*, each box in *target_boxes_by_source_level[i]*, evaluate - the multipole expansion in *mpole_exps* in the nearby boxes given in - *from_sep_smaller_by_level*, and return a new potential array. - *starts* and *lists* in *from_sep_smaller_by_level[i]* use :ref:`csr` - and *starts* is indexed like *target_boxes_by_source_level[i]*. - - :returns: A pair (*pot*, *timing_future*) where *pot* is a new potential - array, see :meth:`output_zeros`. - """ - - def form_locals(self, - level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, starts, lists, src_weight_vecs): - """For each box in *target_or_target_parent_boxes*, form local - expansions due to the sources in the nearby boxes given in *starts* and - *lists*, and return a new local expansion array. *starts* and *lists* - use :ref:`csr` and *starts* is indexed like - *target_or_target_parent_boxes*. - - :returns: A pair (*pot*, *timing_future*) where *pot* is a new - local expansion array, see :meth:`local_expansion_zeros`. - """ - - def refine_locals(self, level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, local_exps): - """For each box in *child_boxes*, - translate the box's parent's local expansion in *local_exps* and add - the resulting expansion into the box's local expansion in *local_exps*. - - :returns: A pair (*local_exps*, *timing_future*). - """ - - def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): - """For each box in *target_boxes*, evaluate the local expansion in - *local_exps* and return a new potential array. - - :returns: A pair (*pot*, *timing_future*) where *pot* is a new potential - array, see :meth:`output_zeros`. - """ - - def finalize_potentials(self, potentials): - """ - Postprocess the reordered potentials. This is where global scaling - factors could be applied. This is distinct from :meth:`reorder_potentials` - because some derived FMMs (notably the QBX FMM) do their own reordering. - """ - -# }}} - - # {{{ timing result class TimingResult(Mapping): diff --git a/boxtree/pyfmmlib_integration.py b/boxtree/pyfmmlib_integration.py index 6bdad04..54739b5 100644 --- a/boxtree/pyfmmlib_integration.py +++ b/boxtree/pyfmmlib_integration.py @@ -1,4 +1,17 @@ -"""Integration between boxtree and pyfmmlib.""" +""" +Integrates :mod:`boxtree` with +`pyfmmlib `_. + +.. autoclass:: FMMLibTraversalAndWrangler +.. autoclass:: FMMLibExpansionWrangler + +Internal bits +^^^^^^^^^^^^^ + +.. autoclass:: FMMLibRotationDataInterface +.. autoclass:: FMMLibRotationData +.. autoclass:: FMMLibRotationDataNotSuppliedWarning +""" __copyright__ = "Copyright (C) 2013 Andreas Kloeckner" @@ -23,17 +36,14 @@ THE SOFTWARE. """ -import numpy as np -from pytools import memoize_method, log_process -from boxtree.tools import return_timing_data - import logging logger = logging.getLogger(__name__) +import numpy as np -__doc__ = """Integrates :mod:`boxtree` with -`pyfmmlib `_. -""" +from pytools import memoize_method, log_process +from boxtree.tools import return_timing_data +from boxtree.fmm import TraversalAndWrangler # {{{ rotation data interface @@ -100,18 +110,15 @@ class FMMLibRotationDataNotSuppliedWarning(UserWarning): # }}} -class FMMLibExpansionWrangler: - """Implements the :class:`boxtree.fmm.ExpansionWranglerInterface` - by using pyfmmlib. +# {{{ tree-dependent wrangler info for fmmlib - Timing results returned by this wrangler contains the values *wall_elapsed* - and (optionally, if supported) *process_elapsed*, which measure wall time - and process time in seconds, respectively. +class FMMLibTraversalAndWrangler(TraversalAndWrangler): + """ + .. automethod:: __init__ """ - # {{{ constructor - def __init__(self, tree, helmholtz_k, fmm_level_to_nterms=None, ifgrad=False, + def __init__(self, traversal, wrangler, fmm_level_to_nterms=None, dipole_vec=None, dipoles_already_reordered=False, nterms=None, optimized_m2l_precomputation_memory_cutoff_bytes=10**8, rotation_data=None): @@ -140,31 +147,22 @@ class FMMLibExpansionWrangler: def fmm_level_to_nterms(tree, level): # noqa pylint:disable=function-redefined return nterms - self.tree = tree + super().__init__(traversal, wrangler) - if helmholtz_k == 0: - self.eqn_letter = "l" - self.kernel_kwargs = {} - self.rscale_factor = 1 - else: - self.eqn_letter = "h" - self.kernel_kwargs = {"zk": helmholtz_k} - self.rscale_factor = abs(helmholtz_k) + tree = traversal.tree + + if wrangler.dim != self.dim: + raise ValueError(f"Expansion wrangler dim ({wrangler.dim}) " + f"does not match tree dim ({self.dim})") self.level_nterms = np.array([ fmm_level_to_nterms(tree, lev) for lev in range(tree.nlevels) ], dtype=np.int32) - if helmholtz_k: + if wrangler.helmholtz_k: logger.info("expansion orders by level used in Helmholtz FMM: %s", self.level_nterms) - self.dtype = np.complex128 - - self.ifgrad = ifgrad - - self.dim = tree.dimensions - self.rotation_data = rotation_data self.rotmat_cutoff_bytes = optimized_m2l_precomputation_memory_cutoff_bytes @@ -174,7 +172,7 @@ class FMMLibExpansionWrangler: warn( "List 2 (multipole-to-local) translations will be " "unoptimized. Supply a rotation_data argument to " - "the wrangler for optimized List 2.", + "FMMLibTraversalAndWrangler for optimized List 2.", FMMLibRotationDataNotSuppliedWarning, stacklevel=2) @@ -182,25 +180,30 @@ class FMMLibExpansionWrangler: else: self.supports_optimized_m2l = False - if dipole_vec is not None: + # FIXME: dipole_vec shouldn't be stored here! Otherwise, we'll recompute + # bunches of tree-dependent stuff for every new dipole vector. + + # It's not super bad because the dipole vectors are typically geometry + # normals and thus change about at the same time as the tree... but there's + # still no reason for them to be here. + self.use_dipoles = dipole_vec is not None + if self.use_dipoles: assert dipole_vec.shape == (self.dim, self.tree.nsources) if not dipoles_already_reordered: - dipole_vec = self.reorder_sources(dipole_vec) + dipole_vec = wrangler.reorder_sources(self, dipole_vec) self.dipole_vec = dipole_vec.copy(order="F") - self.dp_suffix = "_dp" else: self.dipole_vec = None - self.dp_suffix = "" # }}} def level_to_rscale(self, level): - result = self.tree.root_extent * 2 ** -level * self.rscale_factor + result = self.tree.root_extent * 2 ** -level * self.wrangler.rscale_factor if abs(result) > 1: result = 1 - if self.dim == 3 and self.eqn_letter == "l": + if self.dim == 3 and self.wrangler.eqn_letter == "l": # Laplace 3D uses the opposite convention compared to # all other cases. # https://gitlab.tiker.net/inducer/boxtree/merge_requests/81 @@ -218,7 +221,7 @@ class FMMLibExpansionWrangler: common_extra_kwargs = {} - if self.dim == 3 and self.eqn_letter == "h": + if self.dim == 3 and self.wrangler.eqn_letter == "h": nquad = max(6, int(2.5*nterms)) from pyfmmlib import legewhts xnodes, weights = legewhts(nquad, ifwhts=1) @@ -243,6 +246,205 @@ class FMMLibExpansionWrangler: # }}} + # {{{ level starts + + def _expansions_level_starts(self, order_to_size): + result = [0] + for lev in range(self.tree.nlevels): + lev_nboxes = ( + self.tree.level_start_box_nrs[lev+1] + - self.tree.level_start_box_nrs[lev]) + + expn_size = order_to_size(self.level_nterms[lev]) + result.append( + result[-1] + + expn_size * lev_nboxes) + + return result + + @memoize_method + def multipole_expansions_level_starts(self): + from pytools import product + return self._expansions_level_starts( + lambda nterms: product( + self.wrangler.expansion_shape(nterms))) + + @memoize_method + def local_expansions_level_starts(self): + from pytools import product + return self._expansions_level_starts( + lambda nterms: product( + self.wrangler.expansion_shape(nterms))) + + # }}} + + # {{{ views into arrays of expansions + + def multipole_expansions_view(self, mpole_exps, level): + box_start, box_stop = self.tree.level_start_box_nrs[level:level+2] + + expn_start, expn_stop = \ + self.multipole_expansions_level_starts()[level:level+2] + return (box_start, + mpole_exps[expn_start:expn_stop].reshape( + box_stop-box_start, + *self.wrangler.expansion_shape(self.level_nterms[level]))) + + def local_expansions_view(self, local_exps, level): + box_start, box_stop = self.tree.level_start_box_nrs[level:level+2] + + expn_start, expn_stop = \ + self.local_expansions_level_starts()[level:level+2] + return (box_start, + local_exps[expn_start:expn_stop].reshape( + box_stop-box_start, + *self.wrangler.expansion_shape(self.level_nterms[level]))) + + # }}} + + def get_source_kwargs(self, src_weights, pslice): + if self.dipole_vec is None: + return { + "charge": src_weights[pslice], + } + else: + if self.wrangler.eqn_letter == "l" and self.dim == 2: + return { + "dipstr": -src_weights[pslice] * ( + self.dipole_vec[0, pslice] + + 1j * self.dipole_vec[1, pslice]) + } + else: + return { + "dipstr": src_weights[pslice], + "dipvec": self.dipole_vec[:, pslice], + } + + # {{{ source/target particle wrangling + + def _get_source_slice(self, ibox): + pstart = self.tree.box_source_starts[ibox] + return slice( + pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) + + def _get_target_slice(self, ibox): + pstart = self.box_target_starts()[ibox] + return slice( + pstart, pstart + self.box_target_counts_nonchild()[ibox]) + + @memoize_method + def _get_single_sources_array(self): + return np.array([ + self.tree.sources[idim] + for idim in range(self.dim) + ], order="F") + + def _get_sources(self, pslice): + return self._get_single_sources_array()[:, pslice] + + @memoize_method + def _get_single_targets_array(self): + return np.array([ + self.targets()[idim] + for idim in range(self.dim) + ], order="F") + + def _get_targets(self, pslice): + return self._get_single_targets_array()[:, pslice] + + @memoize_method + def _get_single_box_centers_array(self): + return np.array([ + self.tree.box_centers[idim] + for idim in range(self.dim) + ], order="F") + + # }}} + + # {{{ precompute rotation matrices for optimized m2l + + @memoize_method + def m2l_rotation_matrices(self): + # Returns a tuple (rotmatf, rotmatb, rotmat_order), consisting of the + # forward rotation matrices, backward rotation matrices, and the + # translation order of the matrices. rotmat_order is -1 if not + # supported. + + rotmatf = None + rotmatb = None + rotmat_order = -1 + + if not self.supports_optimized_m2l: + return (rotmatf, rotmatb, rotmat_order) + + m2l_rotation_angles = self.rotation_data.m2l_rotation_angles() + + if len(m2l_rotation_angles) == 0: + # The pyfmmlib wrapper may or may not complain if you give it a + # zero-length array. + return (rotmatf, rotmatb, rotmat_order) + + def mem_estimate(order): + # Rotation matrix memory cost estimate. + return (8 + * (order + 1)**2 + * (2*order + 1) + * len(m2l_rotation_angles)) + + # Find the largest order we can use. Because the memory cost of the + # matrices could be large, only precompute them if the cost estimate + # for the order does not exceed the cutoff. + for order in sorted(self.level_nterms, reverse=True): + if mem_estimate(order) < self.rotmat_cutoff_bytes: + rotmat_order = order + break + + if rotmat_order == -1: + return (rotmatf, rotmatb, rotmat_order) + + # Compute the rotation matrices. + from pyfmmlib import rotviarecur3p_init_vec as rotmat_builder + + ier, rotmatf = ( + rotmat_builder(rotmat_order, m2l_rotation_angles)) + assert (0 == ier).all() + + ier, rotmatb = ( + rotmat_builder(rotmat_order, -m2l_rotation_angles)) + assert (0 == ier).all() + + return (rotmatf, rotmatb, rotmat_order) + + # }}} + +# }}} + + +class FMMLibExpansionWrangler: + """Implements the :class:`boxtree.fmm.ExpansionWranglerInterface` + by using pyfmmlib. + + Timing results returned by this wrangler contains the values *wall_elapsed* + and (optionally, if supported) *process_elapsed*, which measure wall time + and process time in seconds, respectively. + """ + + def __init__(self, dim, helmholtz_k, ifgrad=False): + self.dim = dim + self.helmholtz_k = helmholtz_k + self.ifgrad = ifgrad + + if helmholtz_k == 0: + self.eqn_letter = "l" + self.kernel_kwargs = {} + self.rscale_factor = 1 + else: + self.eqn_letter = "h" + self.kernel_kwargs = {"zk": helmholtz_k} + self.rscale_factor = abs(helmholtz_k) + + self.dtype = np.complex128 + # {{{ routine getters def get_routine(self, name, suffix=""): @@ -255,7 +457,7 @@ class FMMLibExpansionWrangler: def get_vec_routine(self, name): return self.get_routine(name, "_vec") - def get_translation_routine(self, name, vec_suffix="_vec"): + def get_translation_routine(self, taw, name, vec_suffix="_vec"): suffix = "" if self.dim == 3: suffix = "quadu" @@ -274,7 +476,7 @@ class FMMLibExpansionWrangler: def wrapper(*args, **kwargs): kwargs.pop("level_for_projection", None) nterms2 = kwargs["nterms2"] - kwargs.update(self.projection_quad_extra_kwargs(nterms=nterms2)) + kwargs.update(taw.projection_quad_extra_kwargs(nterms=nterms2)) val, ier = rout(*args, **kwargs) if (ier != 0).any(): @@ -287,9 +489,10 @@ class FMMLibExpansionWrangler: # update_wrapper(wrapper, rout) return wrapper - def get_direct_eval_routine(self): + def get_direct_eval_routine(self, use_dipoles): if self.dim == 2: - rout = self.get_vec_routine("potgrad%ddall" + self.dp_suffix) + rout = self.get_vec_routine( + "potgrad%ddall" + ("_dp" if use_dipoles else "")) def wrapper(*args, **kwargs): kwargs["ifgrad"] = self.ifgrad @@ -307,7 +510,8 @@ class FMMLibExpansionWrangler: return wrapper elif self.dim == 3: - rout = self.get_vec_routine("potfld%ddall" + self.dp_suffix) + rout = self.get_vec_routine( + "potfld%ddall" + ("_dp" if use_dipoles else "")) def wrapper(*args, **kwargs): kwargs["iffld"] = self.ifgrad @@ -383,70 +587,24 @@ class FMMLibExpansionWrangler: else: raise ValueError("unsupported dimensionality") - def _expansions_level_starts(self, order_to_size): - result = [0] - for lev in range(self.tree.nlevels): - lev_nboxes = ( - self.tree.level_start_box_nrs[lev+1] - - self.tree.level_start_box_nrs[lev]) - - expn_size = order_to_size(self.level_nterms[lev]) - result.append( - result[-1] - + expn_size * lev_nboxes) - - return result - - @memoize_method - def multipole_expansions_level_starts(self): - from pytools import product - return self._expansions_level_starts( - lambda nterms: product(self.expansion_shape(nterms))) - - @memoize_method - def local_expansions_level_starts(self): - from pytools import product - return self._expansions_level_starts( - lambda nterms: product(self.expansion_shape(nterms))) - - def multipole_expansions_view(self, mpole_exps, level): - box_start, box_stop = self.tree.level_start_box_nrs[level:level+2] - - expn_start, expn_stop = \ - self.multipole_expansions_level_starts()[level:level+2] - return (box_start, - mpole_exps[expn_start:expn_stop].reshape( - box_stop-box_start, - *self.expansion_shape(self.level_nterms[level]))) - - def local_expansions_view(self, local_exps, level): - box_start, box_stop = self.tree.level_start_box_nrs[level:level+2] - - expn_start, expn_stop = \ - self.local_expansions_level_starts()[level:level+2] - return (box_start, - local_exps[expn_start:expn_stop].reshape( - box_stop-box_start, - *self.expansion_shape(self.level_nterms[level]))) - - def multipole_expansion_zeros(self): + def multipole_expansion_zeros(self, taw): return np.zeros( - self.multipole_expansions_level_starts()[-1], + taw.multipole_expansions_level_starts()[-1], dtype=self.dtype) - def local_expansion_zeros(self): + def local_expansion_zeros(self, taw): return np.zeros( - self.local_expansions_level_starts()[-1], + taw.local_expansions_level_starts()[-1], dtype=self.dtype) - def output_zeros(self): + def output_zeros(self, taw): if self.ifgrad: from pytools.obj_array import make_obj_array return make_obj_array([ - np.zeros(self.tree.ntargets, self.dtype) - for i in range(1 + self.dim)]) + np.zeros(taw.tree.ntargets, self.dtype) + for i in range(1 + taw.dim)]) else: - return np.zeros(self.tree.ntargets, self.dtype) + return np.zeros(taw.tree.ntargets, self.dtype) def add_potgrad_onto_output(self, output, output_slice, pot, grad): if self.ifgrad: @@ -457,106 +615,47 @@ class FMMLibExpansionWrangler: # }}} - # {{{ source/target particle wrangling - - def _get_source_slice(self, ibox): - pstart = self.tree.box_source_starts[ibox] - return slice( - pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) - - def _get_target_slice(self, ibox): - pstart = self.box_target_starts()[ibox] - return slice( - pstart, pstart + self.box_target_counts_nonchild()[ibox]) - - @memoize_method - def _get_single_sources_array(self): - return np.array([ - self.tree.sources[idim] - for idim in range(self.dim) - ], order="F") - - def _get_sources(self, pslice): - return self._get_single_sources_array()[:, pslice] - - @memoize_method - def _get_single_targets_array(self): - return np.array([ - self.targets()[idim] - for idim in range(self.dim) - ], order="F") - - def _get_targets(self, pslice): - return self._get_single_targets_array()[:, pslice] - - @memoize_method - def _get_single_box_centers_array(self): - return np.array([ - self.tree.box_centers[idim] - for idim in range(self.dim) - ], order="F") - - # }}} - @log_process(logger) - def reorder_sources(self, source_array): - return source_array[..., self.tree.user_source_ids] + def reorder_sources(self, taw, source_array): + return source_array[..., taw.tree.user_source_ids] @log_process(logger) - def reorder_potentials(self, potentials): - return potentials[self.tree.sorted_target_ids] - - def get_source_kwargs(self, src_weights, pslice): - if self.dipole_vec is None: - return { - "charge": src_weights[pslice], - } - else: - if self.eqn_letter == "l" and self.dim == 2: - return { - "dipstr": -src_weights[pslice] * ( - self.dipole_vec[0, pslice] - + 1j * self.dipole_vec[1, pslice]) - } - else: - return { - "dipstr": src_weights[pslice], - "dipvec": self.dipole_vec[:, pslice], - } + def reorder_potentials(self, taw, potentials): + return potentials[taw.tree.sorted_target_ids] @log_process(logger) @return_timing_data - def form_multipoles(self, level_start_source_box_nrs, source_boxes, + def form_multipoles(self, taw, level_start_source_box_nrs, source_boxes, src_weight_vecs): src_weights, = src_weight_vecs - formmp = self.get_routine("%ddformmp" + self.dp_suffix) + formmp = self.get_routine("%ddformmp" + ("_dp" if taw.use_dipoles else "")) - mpoles = self.multipole_expansion_zeros() - for lev in range(self.tree.nlevels): + mpoles = self.multipole_expansion_zeros(taw) + for lev in range(taw.tree.nlevels): start, stop = level_start_source_box_nrs[lev:lev+2] if start == stop: continue - level_start_ibox, mpoles_view = self.multipole_expansions_view( + level_start_ibox, mpoles_view = taw.multipole_expansions_view( mpoles, lev) - rscale = self.level_to_rscale(lev) + rscale = taw.level_to_rscale(lev) for src_ibox in source_boxes[start:stop]: - pslice = self._get_source_slice(src_ibox) + pslice = taw._get_source_slice(src_ibox) if pslice.stop - pslice.start == 0: continue kwargs = {} kwargs.update(self.kernel_kwargs) - kwargs.update(self.get_source_kwargs(src_weights, pslice)) + kwargs.update(taw.get_source_kwargs(src_weights, pslice)) ier, mpole = formmp( rscale=rscale, - source=self._get_sources(pslice), - center=self.tree.box_centers[:, src_ibox], - nterms=self.level_nterms[lev], + source=taw._get_sources(pslice), + center=taw.tree.box_centers[:, src_ibox], + nterms=taw.level_nterms[lev], **kwargs) if ier: @@ -568,11 +667,11 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data - def coarsen_multipoles(self, level_start_source_parent_box_nrs, + def coarsen_multipoles(self, taw, level_start_source_parent_box_nrs, source_parent_boxes, mpoles): - tree = self.tree + tree = taw.tree - mpmp = self.get_translation_routine("%ddmpmp") + mpmp = self.get_translation_routine(taw, "%ddmpmp") # nlevels-1 is the last valid level index # nlevels-2 is the last valid level that could have children @@ -586,12 +685,12 @@ class FMMLibExpansionWrangler: target_level:target_level+2] source_level_start_ibox, source_mpoles_view = \ - self.multipole_expansions_view(mpoles, source_level) + taw.multipole_expansions_view(mpoles, source_level) target_level_start_ibox, target_mpoles_view = \ - self.multipole_expansions_view(mpoles, target_level) + taw.multipole_expansions_view(mpoles, target_level) - source_rscale = self.level_to_rscale(source_level) - target_rscale = self.level_to_rscale(target_level) + source_rscale = taw.level_to_rscale(source_level) + target_rscale = taw.level_to_rscale(target_level) for ibox in source_parent_boxes[start:stop]: parent_center = tree.box_centers[:, ibox] @@ -613,7 +712,7 @@ class FMMLibExpansionWrangler: rscale2=target_rscale, center2=parent_center, - nterms2=self.level_nterms[target_level], + nterms2=taw.level_nterms[target_level], **kwargs) @@ -624,15 +723,15 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data - def eval_direct(self, target_boxes, neighbor_sources_starts, + def eval_direct(self, taw, target_boxes, neighbor_sources_starts, neighbor_sources_lists, src_weight_vecs): src_weights, = src_weight_vecs - output = self.output_zeros() + output = self.output_zeros(taw) - ev = self.get_direct_eval_routine() + ev = self.get_direct_eval_routine(taw.use_dipoles) for itgt_box, tgt_ibox in enumerate(target_boxes): - tgt_pslice = self._get_target_slice(tgt_ibox) + tgt_pslice = taw._get_target_slice(tgt_ibox) if tgt_pslice.stop - tgt_pslice.start == 0: continue @@ -643,18 +742,18 @@ class FMMLibExpansionWrangler: start, end = neighbor_sources_starts[itgt_box:itgt_box+2] for src_ibox in neighbor_sources_lists[start:end]: - src_pslice = self._get_source_slice(src_ibox) + src_pslice = taw._get_source_slice(src_ibox) if src_pslice.stop - src_pslice.start == 0: continue kwargs = {} kwargs.update(self.kernel_kwargs) - kwargs.update(self.get_source_kwargs(src_weights, src_pslice)) + kwargs.update(taw.get_source_kwargs(src_weights, src_pslice)) tmp_pot, tmp_grad = ev( - sources=self._get_sources(src_pslice), - targets=self._get_targets(tgt_pslice), + sources=taw._get_sources(src_pslice), + targets=taw._get_targets(tgt_pslice), **kwargs) tgt_pot_result += tmp_pot @@ -665,98 +764,43 @@ class FMMLibExpansionWrangler: return output - # {{{ precompute rotation matrices for optimized m2l - - @memoize_method - def m2l_rotation_matrices(self): - # Returns a tuple (rotmatf, rotmatb, rotmat_order), consisting of the - # forward rotation matrices, backward rotation matrices, and the - # translation order of the matrices. rotmat_order is -1 if not - # supported. - - rotmatf = None - rotmatb = None - rotmat_order = -1 - - if not self.supports_optimized_m2l: - return (rotmatf, rotmatb, rotmat_order) - - m2l_rotation_angles = self.rotation_data.m2l_rotation_angles() - - if len(m2l_rotation_angles) == 0: - # The pyfmmlib wrapper may or may not complain if you give it a - # zero-length array. - return (rotmatf, rotmatb, rotmat_order) - - def mem_estimate(order): - # Rotation matrix memory cost estimate. - return (8 - * (order + 1)**2 - * (2*order + 1) - * len(m2l_rotation_angles)) - - # Find the largest order we can use. Because the memory cost of the - # matrices could be large, only precompute them if the cost estimate - # for the order does not exceed the cutoff. - for order in sorted(self.level_nterms, reverse=True): - if mem_estimate(order) < self.rotmat_cutoff_bytes: - rotmat_order = order - break - - if rotmat_order == -1: - return (rotmatf, rotmatb, rotmat_order) - - # Compute the rotation matrices. - from pyfmmlib import rotviarecur3p_init_vec as rotmat_builder - - ier, rotmatf = ( - rotmat_builder(rotmat_order, m2l_rotation_angles)) - assert (0 == ier).all() - - ier, rotmatb = ( - rotmat_builder(rotmat_order, -m2l_rotation_angles)) - assert (0 == ier).all() - - return (rotmatf, rotmatb, rotmat_order) - - # }}} - @log_process(logger) @return_timing_data def multipole_to_local(self, - level_start_target_or_target_parent_box_nrs, + taw, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, starts, lists, mpole_exps): - tree = self.tree - local_exps = self.local_expansion_zeros() + tree = taw.tree + local_exps = self.local_expansion_zeros(taw) # Precomputed rotation matrices (matrices of larger order can be used # for translations of smaller order) - rotmatf, rotmatb, rotmat_order = self.m2l_rotation_matrices() + rotmatf, rotmatb, rotmat_order = taw.m2l_rotation_matrices() - for lev in range(self.tree.nlevels): + for lev in range(taw.tree.nlevels): lstart, lstop = level_start_target_or_target_parent_box_nrs[lev:lev+2] if lstart == lstop: continue starts_on_lvl = starts[lstart:lstop+1] - mploc = self.get_translation_routine("%ddmploc", vec_suffix="_imany") + mploc = self.get_translation_routine( + taw, "%ddmploc", vec_suffix="_imany") kwargs = {} # {{{ set up optimized m2l, if applicable - if self.level_nterms[lev] <= rotmat_order: - m2l_rotation_lists = self.rotation_data.m2l_rotation_lists() + if taw.level_nterms[lev] <= rotmat_order: + m2l_rotation_lists = taw.rotation_data.m2l_rotation_lists() assert len(m2l_rotation_lists) == len(lists) mploc = self.get_translation_routine( - "%ddmploc", vec_suffix="2_trunc_imany") + taw, "%ddmploc", vec_suffix="2_trunc_imany") kwargs["ldm"] = rotmat_order - kwargs["nterms"] = self.level_nterms[lev] - kwargs["nterms1"] = self.level_nterms[lev] + kwargs["nterms"] = taw.level_nterms[lev] + kwargs["nterms1"] = taw.level_nterms[lev] kwargs["rotmatf"] = rotmatf kwargs["rotmatf_offsets"] = m2l_rotation_lists @@ -769,9 +813,9 @@ class FMMLibExpansionWrangler: # }}} source_level_start_ibox, source_mpoles_view = \ - self.multipole_expansions_view(mpole_exps, lev) + taw.multipole_expansions_view(mpole_exps, lev) target_level_start_ibox, target_local_exps_view = \ - self.local_expansions_view(local_exps, lev) + taw.local_expansions_view(local_exps, lev) ntgt_boxes = lstop-lstart itgt_box_vec = np.arange(ntgt_boxes) @@ -786,7 +830,7 @@ class FMMLibExpansionWrangler: src_boxes_starts[0] = 0 src_boxes_starts[1:] = np.cumsum(nsrc_boxes_per_tgt_box) - rscale = self.level_to_rscale(lev) + rscale = taw.level_to_rscale(lev) rscale1 = np.ones(nsrc_boxes) * rscale rscale1_offsets = np.arange(nsrc_boxes) @@ -804,7 +848,7 @@ class FMMLibExpansionWrangler: kwargs["ier"] = ier expn2 = np.zeros( - (ntgt_boxes,) + self.expansion_shape(self.level_nterms[lev]), + (ntgt_boxes,) + self.expansion_shape(taw.level_nterms[lev]), dtype=self.dtype) kwargs.update(self.kernel_kwargs) @@ -827,7 +871,7 @@ class FMMLibExpansionWrangler: center2=tree.box_centers[:, tgt_ibox_vec], expn2=expn2.T, - nterms2=self.level_nterms[lev], + nterms2=taw.level_nterms[lev], **kwargs).T @@ -838,21 +882,22 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data def eval_multipoles(self, + taw, target_boxes_by_source_level, sep_smaller_nonsiblings_by_level, mpole_exps): - output = self.output_zeros() + output = self.output_zeros(taw) mpeval = self.get_expn_eval_routine("mp") for isrc_level, ssn in enumerate(sep_smaller_nonsiblings_by_level): source_level_start_ibox, source_mpoles_view = \ - self.multipole_expansions_view(mpole_exps, isrc_level) + taw.multipole_expansions_view(mpole_exps, isrc_level) - rscale = self.level_to_rscale(isrc_level) + rscale = taw.level_to_rscale(isrc_level) for itgt_box, tgt_ibox in \ enumerate(target_boxes_by_source_level[isrc_level]): - tgt_pslice = self._get_target_slice(tgt_ibox) + tgt_pslice = taw._get_target_slice(tgt_ibox) if tgt_pslice.stop - tgt_pslice.start == 0: continue @@ -864,10 +909,10 @@ class FMMLibExpansionWrangler: tmp_pot, tmp_grad = mpeval( rscale=rscale, - center=self.tree.box_centers[:, src_ibox], + center=taw.tree.box_centers[:, src_ibox], expn=source_mpoles_view[ src_ibox - source_level_start_ibox].T, - ztarg=self._get_targets(tgt_pslice), + ztarg=taw._get_targets(tgt_pslice), **self.kernel_kwargs) tgt_pot = tgt_pot + tmp_pot @@ -881,32 +926,34 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data def form_locals(self, + taw, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, starts, lists, src_weight_vecs): src_weights, = src_weight_vecs - local_exps = self.local_expansion_zeros() + local_exps = self.local_expansion_zeros(taw) - formta = self.get_routine("%ddformta" + self.dp_suffix, suffix="_imany") + formta = self.get_routine("%ddformta" + ("_dp" if taw.use_dipoles else ""), + suffix="_imany") - sources = self._get_single_sources_array() + sources = taw._get_single_sources_array() # sources_starts / sources_lists is a CSR list mapping box centers to # lists of starting indices into the sources array. To get the starting # source indices we have to look at box_source_starts. - sources_offsets = self.tree.box_source_starts[lists] + sources_offsets = taw.tree.box_source_starts[lists] # nsources_starts / nsources_lists is a CSR list mapping box centers to # lists of indices into nsources, each of which represents a source # count. - nsources = self.tree.box_source_counts_nonchild + nsources = taw.tree.box_source_counts_nonchild nsources_offsets = lists # centers is indexed into by values of centers_offsets, which is a list # mapping box indices to box center indices. - centers = self._get_single_box_centers_array() + centers = taw._get_single_box_centers_array() - source_kwargs = self.get_source_kwargs(src_weights, slice(None)) + source_kwargs = taw.get_source_kwargs(src_weights, slice(None)) - for lev in range(self.tree.nlevels): + for lev in range(taw.tree.nlevels): lev_start, lev_stop = \ level_start_target_or_target_parent_box_nrs[lev:lev+2] @@ -914,11 +961,11 @@ class FMMLibExpansionWrangler: continue target_box_start, target_local_exps_view = \ - self.local_expansions_view(local_exps, lev) + taw.local_expansions_view(local_exps, lev) centers_offsets = target_or_target_parent_boxes[lev_start:lev_stop] - rscale = self.level_to_rscale(lev) + rscale = taw.level_to_rscale(lev) sources_starts = starts[lev_start:1 + lev_stop] nsources_starts = sources_starts @@ -945,7 +992,7 @@ class FMMLibExpansionWrangler: nsources_offsets=nsources_offsets, centers=centers, centers_offsets=centers_offsets, - nterms=self.level_nterms[lev], + nterms=taw.level_nterms[lev], **kwargs) if ier.any(): @@ -959,32 +1006,32 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data - def refine_locals(self, level_start_target_or_target_parent_box_nrs, + def refine_locals(self, taw, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, local_exps): - locloc = self.get_translation_routine("%ddlocloc") + locloc = self.get_translation_routine(taw, "%ddlocloc") - for target_lev in range(1, self.tree.nlevels): + for target_lev in range(1, taw.tree.nlevels): start, stop = level_start_target_or_target_parent_box_nrs[ target_lev:target_lev+2] source_lev = target_lev - 1 source_level_start_ibox, source_local_exps_view = \ - self.local_expansions_view(local_exps, source_lev) + taw.local_expansions_view(local_exps, source_lev) target_level_start_ibox, target_local_exps_view = \ - self.local_expansions_view(local_exps, target_lev) - source_rscale = self.level_to_rscale(source_lev) - target_rscale = self.level_to_rscale(target_lev) + taw.local_expansions_view(local_exps, target_lev) + source_rscale = taw.level_to_rscale(source_lev) + target_rscale = taw.level_to_rscale(target_lev) for tgt_ibox in target_or_target_parent_boxes[start:stop]: - tgt_center = self.tree.box_centers[:, tgt_ibox] - src_ibox = self.tree.box_parent_ids[tgt_ibox] - src_center = self.tree.box_centers[:, src_ibox] + tgt_center = taw.tree.box_centers[:, tgt_ibox] + src_ibox = taw.tree.box_parent_ids[tgt_ibox] + src_center = taw.tree.box_centers[:, src_ibox] kwargs = {} if self.dim == 3 and self.eqn_letter == "h": - kwargs["radius"] = self.tree.root_extent * 2**(-target_lev) + kwargs["radius"] = taw.tree.root_extent * 2**(-target_lev) kwargs.update(self.kernel_kwargs) tmp_loc_exp = locloc( @@ -995,7 +1042,7 @@ class FMMLibExpansionWrangler: rscale2=target_rscale, center2=tgt_center, - nterms2=self.level_nterms[target_lev], + nterms2=taw.level_nterms[target_lev], **kwargs)[..., 0] @@ -1006,32 +1053,32 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data - def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): - output = self.output_zeros() + def eval_locals(self, taw, level_start_target_box_nrs, target_boxes, local_exps): + output = self.output_zeros(taw) taeval = self.get_expn_eval_routine("ta") - for lev in range(self.tree.nlevels): + for lev in range(taw.tree.nlevels): start, stop = level_start_target_box_nrs[lev:lev+2] if start == stop: continue source_level_start_ibox, source_local_exps_view = \ - self.local_expansions_view(local_exps, lev) + taw.local_expansions_view(local_exps, lev) - rscale = self.level_to_rscale(lev) + rscale = taw.level_to_rscale(lev) for tgt_ibox in target_boxes[start:stop]: - tgt_pslice = self._get_target_slice(tgt_ibox) + tgt_pslice = taw._get_target_slice(tgt_ibox) if tgt_pslice.stop - tgt_pslice.start == 0: continue tmp_pot, tmp_grad = taeval( rscale=rscale, - center=self.tree.box_centers[:, tgt_ibox], + center=taw.tree.box_centers[:, tgt_ibox], expn=source_local_exps_view[ tgt_ibox - source_level_start_ibox].T, - ztarg=self._get_targets(tgt_pslice), + ztarg=taw._get_targets(tgt_pslice), **self.kernel_kwargs) @@ -1041,20 +1088,20 @@ class FMMLibExpansionWrangler: return output @log_process(logger) - def finalize_potentials(self, potential): - if self.eqn_letter == "l" and self.dim == 2: + def finalize_potentials(self, taw, potential): + if self.eqn_letter == "l" and taw.dim == 2: scale_factor = -1/(2*np.pi) - elif self.eqn_letter == "h" and self.dim == 2: + elif self.eqn_letter == "h" and taw.dim == 2: scale_factor = 1 - elif self.eqn_letter in ["l", "h"] and self.dim == 3: + elif self.eqn_letter in ["l", "h"] and taw.dim == 3: scale_factor = 1/(4*np.pi) else: raise NotImplementedError( "scale factor for pyfmmlib %s for %d dimensions" % ( self.eqn_letter, - self.dim)) + taw.dim)) - if self.eqn_letter == "l" and self.dim == 2: + if self.eqn_letter == "l" and taw.dim == 2: potential = potential.real return potential * scale_factor diff --git a/boxtree/tools.py b/boxtree/tools.py index 975b59b..1f70178 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -29,7 +29,7 @@ from pyopencl.tools import dtype_to_c_struct, VectorArg as _VectorArg from pyopencl.tools import ScalarArg # noqa from mako.template import Template from pytools.obj_array import make_obj_array -from boxtree.fmm import TimingFuture, TimingResult +from boxtree.fmm import TimingFuture, TimingResult, TraversalAndWrangler import loopy as lp from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa @@ -636,6 +636,18 @@ class InlineBinarySearch: # {{{ constant one wrangler +class ConstantOneTraversalAndWrangler(TraversalAndWrangler): + def _get_source_slice(self, ibox): + pstart = self.tree.box_source_starts[ibox] + return slice( + pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) + + def _get_target_slice(self, ibox): + pstart = self.tree.box_target_starts[ibox] + return slice( + pstart, pstart + self.tree.box_target_counts_nonchild[ibox]) + + class ConstantOneExpansionWrangler: """This implements the 'analytical routines' for a Green's function that is constant 1 everywhere. For 'charges' of 'ones', this should get every particle @@ -645,53 +657,40 @@ class ConstantOneExpansionWrangler: which counts approximately the number of floating-point operations required. """ - def __init__(self, tree): - self.tree = tree - - def multipole_expansion_zeros(self): - return np.zeros(self.tree.nboxes, dtype=np.float64) + def multipole_expansion_zeros(self, taw): + return np.zeros(taw.tree.nboxes, dtype=np.float64) local_expansion_zeros = multipole_expansion_zeros - def output_zeros(self): - return np.zeros(self.tree.ntargets, dtype=np.float64) - - def _get_source_slice(self, ibox): - pstart = self.tree.box_source_starts[ibox] - return slice( - pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) - - def _get_target_slice(self, ibox): - pstart = self.tree.box_target_starts[ibox] - return slice( - pstart, pstart + self.tree.box_target_counts_nonchild[ibox]) + def output_zeros(self, taw): + return np.zeros(taw.tree.ntargets, dtype=np.float64) - def reorder_sources(self, source_array): - return source_array[self.tree.user_source_ids] + def reorder_sources(self, taw, source_array): + return source_array[taw.tree.user_source_ids] - def reorder_potentials(self, potentials): - return potentials[self.tree.sorted_target_ids] + def reorder_potentials(self, taw, potentials): + return potentials[taw.tree.sorted_target_ids] @staticmethod def timing_future(ops): return DummyTimingFuture.from_op_count(ops) - def form_multipoles(self, level_start_source_box_nrs, source_boxes, + def form_multipoles(self, taw, level_start_source_box_nrs, source_boxes, src_weight_vecs): src_weights, = src_weight_vecs - mpoles = self.multipole_expansion_zeros() + mpoles = self.multipole_expansion_zeros(taw) ops = 0 for ibox in source_boxes: - pslice = self._get_source_slice(ibox) + pslice = taw._get_source_slice(ibox) mpoles[ibox] += np.sum(src_weights[pslice]) ops += src_weights[pslice].size return mpoles, self.timing_future(ops) - def coarsen_multipoles(self, level_start_source_parent_box_nrs, + def coarsen_multipoles(self, taw, level_start_source_parent_box_nrs, source_parent_boxes, mpoles): - tree = self.tree + tree = taw.tree ops = 0 # nlevels-1 is the last valid level index @@ -712,21 +711,21 @@ class ConstantOneExpansionWrangler: return mpoles, self.timing_future(ops) - def eval_direct(self, target_boxes, neighbor_sources_starts, + def eval_direct(self, taw, target_boxes, neighbor_sources_starts, neighbor_sources_lists, src_weight_vecs): src_weights, = src_weight_vecs - pot = self.output_zeros() + pot = self.output_zeros(taw) ops = 0 for itgt_box, tgt_ibox in enumerate(target_boxes): - tgt_pslice = self._get_target_slice(tgt_ibox) + tgt_pslice = taw._get_target_slice(tgt_ibox) src_sum = 0 nsrcs = 0 start, end = neighbor_sources_starts[itgt_box:itgt_box+2] #print "DIR: %s <- %s" % (tgt_ibox, neighbor_sources_lists[start:end]) for src_ibox in neighbor_sources_lists[start:end]: - src_pslice = self._get_source_slice(src_ibox) + src_pslice = taw._get_source_slice(src_ibox) nsrcs += src_weights[src_pslice].size src_sum += np.sum(src_weights[src_pslice]) @@ -737,10 +736,11 @@ class ConstantOneExpansionWrangler: return pot, self.timing_future(ops) def multipole_to_local(self, + taw, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, starts, lists, mpole_exps): - local_exps = self.local_expansion_zeros() + local_exps = self.local_expansion_zeros(taw) ops = 0 for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): @@ -756,16 +756,16 @@ class ConstantOneExpansionWrangler: return local_exps, self.timing_future(ops) - def eval_multipoles(self, + def eval_multipoles(self, taw, target_boxes_by_source_level, from_sep_smaller_nonsiblings_by_level, mpole_exps): - pot = self.output_zeros() + pot = self.output_zeros(taw) ops = 0 for level, ssn in enumerate(from_sep_smaller_nonsiblings_by_level): for itgt_box, tgt_ibox in \ enumerate(target_boxes_by_source_level[level]): - tgt_pslice = self._get_target_slice(tgt_ibox) + tgt_pslice = taw._get_target_slice(tgt_ibox) contrib = 0 @@ -778,11 +778,11 @@ class ConstantOneExpansionWrangler: return pot, self.timing_future(ops) - def form_locals(self, + def form_locals(self, taw, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, starts, lists, src_weight_vecs): src_weights, = src_weight_vecs - local_exps = self.local_expansion_zeros() + local_exps = self.local_expansion_zeros(taw) ops = 0 for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): @@ -792,7 +792,7 @@ class ConstantOneExpansionWrangler: contrib = 0 nsrcs = 0 for src_ibox in lists[start:end]: - src_pslice = self._get_source_slice(src_ibox) + src_pslice = taw._get_source_slice(src_ibox) nsrcs += src_weights[src_pslice].size contrib += np.sum(src_weights[src_pslice]) @@ -802,31 +802,31 @@ class ConstantOneExpansionWrangler: return local_exps, self.timing_future(ops) - def refine_locals(self, level_start_target_or_target_parent_box_nrs, + def refine_locals(self, taw, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, local_exps): ops = 0 - for target_lev in range(1, self.tree.nlevels): + for target_lev in range(1, taw.tree.nlevels): start, stop = level_start_target_or_target_parent_box_nrs[ target_lev:target_lev+2] for ibox in target_or_target_parent_boxes[start:stop]: - local_exps[ibox] += local_exps[self.tree.box_parent_ids[ibox]] + local_exps[ibox] += local_exps[taw.tree.box_parent_ids[ibox]] ops += 1 return local_exps, self.timing_future(ops) - def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): - pot = self.output_zeros() + def eval_locals(self, taw, level_start_target_box_nrs, target_boxes, local_exps): + pot = self.output_zeros(taw) ops = 0 for ibox in target_boxes: - tgt_pslice = self._get_target_slice(ibox) + tgt_pslice = taw._get_target_slice(ibox) pot[tgt_pslice] += local_exps[ibox] ops += pot[tgt_pslice].size return pot, self.timing_future(ops) - def finalize_potentials(self, potentials): + def finalize_potentials(self, taw, potentials): return potentials # }}} diff --git a/boxtree/version.py b/boxtree/version.py index 5cea885..30c9629 100644 --- a/boxtree/version.py +++ b/boxtree/version.py @@ -1,2 +1,2 @@ -VERSION = (2020, 1) +VERSION = (2021, 1) VERSION_TEXT = ".".join(str(i) for i in VERSION) diff --git a/doc/fmm.rst b/doc/fmm.rst index 9ae7034..75f1ad5 100644 --- a/doc/fmm.rst +++ b/doc/fmm.rst @@ -1,30 +1,11 @@ FMM driver ========== -.. module:: boxtree.fmm - -.. autofunction:: drive_fmm - -.. autoclass:: ExpansionWranglerInterface - :members: - :undoc-members: - :member-order: bysource - -.. autoclass:: TimingResult - -.. autoclass:: TimingFuture +.. automodule:: boxtree.fmm Integration with PyFMMLib ------------------------- .. automodule:: boxtree.pyfmmlib_integration -.. autoclass:: FMMLibRotationDataInterface - -.. autoclass:: FMMLibRotationData - -.. autoclass:: FMMLibRotationDataNotSuppliedWarning - -.. autoclass:: FMMLibExpansionWrangler - .. vim: sw=4 diff --git a/examples/cost_model.py b/examples/cost_model.py index 693a287..032b1d4 100644 --- a/examples/cost_model.py +++ b/examples/cost_model.py @@ -24,7 +24,8 @@ def demo_cost_model(): "Currently this script uses process time which only works on Python>=3.3" ) - from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler + from boxtree.pyfmmlib_integration import ( + FMMLibTraversalAndWrangler, FMMLibExpansionWrangler) nsources_list = [1000, 2000, 3000, 4000, 5000] ntargets_list = [1000, 2000, 3000, 4000, 5000] @@ -76,13 +77,15 @@ def demo_cost_model(): # }}} - wrangler = FMMLibExpansionWrangler(trav.tree, 0, fmm_level_to_nterms) - level_to_orders.append(wrangler.level_nterms) + wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, 0) + taw = FMMLibTraversalAndWrangler(trav, wrangler, + fmm_level_to_nterms=fmm_level_to_nterms) + level_to_orders.append(taw.level_nterms) timing_data = {} from boxtree.fmm import drive_fmm src_weights = np.random.rand(tree.nsources).astype(tree.coord_dtype) - drive_fmm(trav, wrangler, (src_weights,), timing_data=timing_data) + drive_fmm(taw, (src_weights,), timing_data=timing_data) timing_results.append(timing_data) diff --git a/test/test_cost_model.py b/test/test_cost_model.py index aa515da..269c9c4 100644 --- a/test/test_cost_model.py +++ b/test/test_cost_model.py @@ -391,7 +391,8 @@ def test_compare_cl_and_py_cost_model(ctx_factory, nsources, ntargets, dims, dty @pytest.mark.opencl def test_estimate_calibration_params(ctx_factory): - from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler + from boxtree.pyfmmlib_integration import ( + FMMLibExpansionWrangler, FMMLibTraversalAndWrangler) nsources_list = [1000, 2000, 3000, 4000] ntargets_list = [1000, 2000, 3000, 4000] @@ -443,13 +444,15 @@ def test_estimate_calibration_params(ctx_factory): # }}} - wrangler = FMMLibExpansionWrangler(trav.tree, 0, fmm_level_to_nterms) - level_to_orders.append(wrangler.level_nterms) + wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, 0) + taw = FMMLibTraversalAndWrangler(trav, wrangler, + fmm_level_to_nterms=fmm_level_to_nterms) + level_to_orders.append(taw.level_nterms) timing_data = {} from boxtree.fmm import drive_fmm src_weights = np.random.rand(tree.nsources).astype(tree.coord_dtype) - drive_fmm(trav, wrangler, (src_weights,), timing_data=timing_data) + drive_fmm(taw, (src_weights,), timing_data=timing_data) timing_results.append(timing_data) @@ -573,13 +576,15 @@ def test_cost_model_op_counts_agree_with_constantone_wrangler( trav_dev, _ = tg(queue, tree, debug=True) trav = trav_dev.get(queue=queue) - from boxtree.tools import ConstantOneExpansionWrangler - wrangler = ConstantOneExpansionWrangler(trav.tree) + from boxtree.tools import ( + ConstantOneExpansionWrangler, ConstantOneTraversalAndWrangler) + wrangler = ConstantOneExpansionWrangler() + taw = ConstantOneTraversalAndWrangler(trav, wrangler) timing_data = {} from boxtree.fmm import drive_fmm src_weights = np.random.rand(tree.nsources).astype(tree.coord_dtype) - drive_fmm(trav, wrangler, (src_weights,), timing_data=timing_data) + drive_fmm(taw, (src_weights,), timing_data=timing_data) cost_model = FMMCostModel( translation_cost_model_factory=OpCountingTranslationCostModel diff --git a/test/test_fmm.py b/test/test_fmm.py index a9d0039..2f57d8b 100644 --- a/test/test_fmm.py +++ b/test/test_fmm.py @@ -36,6 +36,7 @@ from boxtree.tools import ( # noqa: F401 make_surface_particle_array as p_surface, make_uniform_particle_array as p_uniform, particle_array_to_host, + ConstantOneTraversalAndWrangler, ConstantOneExpansionWrangler) import logging @@ -46,7 +47,7 @@ warnings.simplefilter("ignore", FMMLibRotationDataNotSuppliedWarning) # {{{ ref fmmlib pot computation -def get_fmmlib_ref_pot(wrangler, weights, sources_host, targets_host, +def get_fmmlib_ref_pot(taw, weights, sources_host, targets_host, helmholtz_k, dipole_vec=None): dims = sources_host.shape[0] eqn_letter = "h" if helmholtz_k else "l" @@ -81,7 +82,8 @@ def get_fmmlib_ref_pot(wrangler, weights, sources_host, targets_host, if helmholtz_k: kwargs["zk"] = helmholtz_k - return wrangler.finalize_potentials( + return taw.wrangler.finalize_potentials( + taw, fmmlib_routine( sources=sources_host, targets=targets_host, **kwargs)[0] @@ -92,34 +94,38 @@ def get_fmmlib_ref_pot(wrangler, weights, sources_host, targets_host, # {{{ fmm interaction completeness test -class ConstantOneExpansionWranglerWithFilteredTargetsInTreeOrder( - ConstantOneExpansionWrangler): - def __init__(self, tree, filtered_targets): - ConstantOneExpansionWrangler.__init__(self, tree) +class ConstantOneTraversalAndWranglerWithFilteredTargetsInTreeOrder( + ConstantOneTraversalAndWrangler): + def __init__(self, traversal, wrangler, filtered_targets): + super().__init__(traversal, wrangler) self.filtered_targets = filtered_targets - def output_zeros(self): - return np.zeros(self.filtered_targets.nfiltered_targets, dtype=np.float64) - def _get_target_slice(self, ibox): pstart = self.filtered_targets.box_target_starts[ibox] return slice( pstart, pstart + self.filtered_targets.box_target_counts_nonchild[ibox]) - def reorder_potentials(self, potentials): - tree_order_all_potentials = np.zeros(self.tree.ntargets, potentials.dtype) + +class ConstantOneExpansionWranglerWithFilteredTargetsInTreeOrder( + ConstantOneExpansionWrangler): + + def output_zeros(self, taw): + return np.zeros(taw.filtered_targets.nfiltered_targets, dtype=np.float64) + + def reorder_potentials(self, taw, potentials): + tree_order_all_potentials = np.zeros(taw.tree.ntargets, potentials.dtype) tree_order_all_potentials[ - self.filtered_targets.unfiltered_from_filtered_target_indices] \ + taw.filtered_targets.unfiltered_from_filtered_target_indices] \ = potentials - return tree_order_all_potentials[self.tree.sorted_target_ids] + return tree_order_all_potentials[taw.tree.sorted_target_ids] -class ConstantOneExpansionWranglerWithFilteredTargetsInUserOrder( - ConstantOneExpansionWrangler): - def __init__(self, tree, filtered_targets): - ConstantOneExpansionWrangler.__init__(self, tree) +class ConstantOneTraversalAndWranglerWithFilteredTargetsInUserOrder( + ConstantOneTraversalAndWrangler): + def __init__(self, traversal, wrangler, filtered_targets): + super().__init__(traversal, wrangler) self.filtered_targets = filtered_targets def _get_target_slice(self, ibox): @@ -240,27 +246,30 @@ def test_fmm_completeness(ctx_factory, dims, nsources_req, ntargets_req, if filter_kind == "user": filtered_targets = plfilt.filter_target_lists_in_user_order( queue, tree, flags) - wrangler = ConstantOneExpansionWranglerWithFilteredTargetsInUserOrder( - host_tree, filtered_targets.get(queue=queue)) + wrangler = ConstantOneExpansionWrangler() + taw = ConstantOneTraversalAndWranglerWithFilteredTargetsInUserOrder( + host_trav, wrangler, filtered_targets.get(queue=queue)) elif filter_kind == "tree": filtered_targets = plfilt.filter_target_lists_in_tree_order( queue, tree, flags) - wrangler = ConstantOneExpansionWranglerWithFilteredTargetsInTreeOrder( - host_tree, filtered_targets.get(queue=queue)) + wrangler = ConstantOneExpansionWranglerWithFilteredTargetsInTreeOrder() + taw = ConstantOneTraversalAndWranglerWithFilteredTargetsInTreeOrder( + host_trav, wrangler, filtered_targets.get(queue=queue)) else: raise ValueError("unsupported value of 'filter_kind'") else: - wrangler = ConstantOneExpansionWrangler(host_tree) + wrangler = ConstantOneExpansionWrangler() + taw = ConstantOneTraversalAndWrangler(host_trav, wrangler) flags = cl.array.empty(queue, ntargets or nsources, dtype=np.int8) flags.fill(1) if ntargets is None and not filter_kind: # This check only works for targets == sources. assert (wrangler.reorder_potentials( - wrangler.reorder_sources(weights)) == weights).all() + taw, wrangler.reorder_sources(taw, weights)) == weights).all() from boxtree.fmm import drive_fmm - pot = drive_fmm(host_trav, wrangler, (weights,)) + pot = drive_fmm(taw, (weights,)) if filter_kind: pot = pot[flags.get() > 0] @@ -445,16 +454,17 @@ def test_pyfmmlib_fmm(ctx_factory, dims, use_dipoles, helmholtz_k): return result - from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler - wrangler = FMMLibExpansionWrangler( - trav.tree, helmholtz_k, - fmm_level_to_nterms=fmm_level_to_nterms, + from boxtree.pyfmmlib_integration import ( + FMMLibExpansionWrangler, FMMLibTraversalAndWrangler) + wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, helmholtz_k) + taw = FMMLibTraversalAndWrangler( + trav, wrangler, fmm_level_to_nterms=fmm_level_to_nterms, dipole_vec=dipole_vec) from boxtree.fmm import drive_fmm timing_data = {} - pot = drive_fmm(trav, wrangler, (weights,), timing_data=timing_data) + pot = drive_fmm(taw, (weights,), timing_data=timing_data) print(timing_data) assert timing_data @@ -462,7 +472,7 @@ def test_pyfmmlib_fmm(ctx_factory, dims, use_dipoles, helmholtz_k): logger.info("computing direct (reference) result") - ref_pot = get_fmmlib_ref_pot(wrangler, weights, sources_host.T, + ref_pot = get_fmmlib_ref_pot(taw, weights, sources_host.T, targets_host.T, helmholtz_k, dipole_vec) rel_err = la.norm(pot - ref_pot, np.inf) / la.norm(ref_pot, np.inf) @@ -566,26 +576,26 @@ def test_pyfmmlib_numerical_stability(ctx_factory, dims, helmholtz_k, order): weights = np.ones_like(sources[0]) from boxtree.pyfmmlib_integration import ( - FMMLibExpansionWrangler, FMMLibRotationData) + FMMLibExpansionWrangler, FMMLibTraversalAndWrangler, FMMLibRotationData) def fmm_level_to_nterms(tree, lev): return order - wrangler = FMMLibExpansionWrangler( - trav.tree, helmholtz_k, + wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, helmholtz_k) + taw = FMMLibTraversalAndWrangler( + trav, wrangler, fmm_level_to_nterms=fmm_level_to_nterms, rotation_data=FMMLibRotationData(queue, trav)) from boxtree.fmm import drive_fmm - - pot = drive_fmm(trav, wrangler, (weights,)) + pot = drive_fmm(taw, (weights,)) assert not np.isnan(pot).any() # {{{ ref fmmlib computation logger.info("computing direct (reference) result") - ref_pot = get_fmmlib_ref_pot(wrangler, weights, sources, targets, + ref_pot = get_fmmlib_ref_pot(taw, weights, sources, targets, helmholtz_k) rel_err = la.norm(pot - ref_pot, np.inf) / la.norm(ref_pot, np.inf) @@ -772,14 +782,15 @@ def test_fmm_with_optimized_3d_m2l(ctx_factory, nsrcntgts, helmholtz_k, return result from boxtree.pyfmmlib_integration import ( - FMMLibExpansionWrangler, FMMLibRotationData) + FMMLibExpansionWrangler, FMMLibTraversalAndWrangler, FMMLibRotationData) - baseline_wrangler = FMMLibExpansionWrangler( - trav.tree, helmholtz_k, + wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, helmholtz_k) + baseline_taw = FMMLibTraversalAndWrangler( + trav, wrangler, fmm_level_to_nterms=fmm_level_to_nterms) - optimized_wrangler = FMMLibExpansionWrangler( - trav.tree, helmholtz_k, + optimized_taw = FMMLibTraversalAndWrangler( + trav, wrangler, fmm_level_to_nterms=fmm_level_to_nterms, rotation_data=FMMLibRotationData(queue, trav)) @@ -787,11 +798,11 @@ def test_fmm_with_optimized_3d_m2l(ctx_factory, nsrcntgts, helmholtz_k, baseline_timing_data = {} baseline_pot = drive_fmm( - trav, baseline_wrangler, (weights,), timing_data=baseline_timing_data) + baseline_taw, (weights,), timing_data=baseline_timing_data) optimized_timing_data = {} optimized_pot = drive_fmm( - trav, optimized_wrangler, (weights,), timing_data=optimized_timing_data) + optimized_taw, (weights,), timing_data=optimized_timing_data) baseline_time = baseline_timing_data["multipole_to_local"]["process_elapsed"] if baseline_time is not None: -- GitLab From 2139cf5d1b83574d8b68c3c4d45d868b90450cbb Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 25 Mar 2021 00:52:29 -0500 Subject: [PATCH 219/260] Remove (unrealized) Wrangler.tree_dependent_info doc --- boxtree/fmm.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 763b7a9..cabc4b0 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -88,8 +88,6 @@ class ExpansionWranglerInterface(ABC): Changed (a subset of) functions to return timing data. - .. automethod:: tree_dependent_info - .. rubric:: Array creation .. automethod:: multipole_expansion_zeros -- GitLab From fb821d4dd8670030fa9d2b94135f9f9d79500992 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 25 Mar 2021 00:54:12 -0500 Subject: [PATCH 220/260] Add sumpy downstream CI --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7b9558d..27290e4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -82,7 +82,7 @@ jobs: downstream_tests: strategy: matrix: - downstream_project: [pytential] + downstream_project: [sumpy, pytential] name: Tests for downstream project ${{ matrix.downstream_project }} runs-on: ubuntu-latest steps: -- GitLab From 3eac6579190ce738c1384b01767e619e18556db8 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 24 Mar 2021 23:53:54 -0700 Subject: [PATCH 221/260] Address reviewer's comments --- boxtree/tools.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/boxtree/tools.py b/boxtree/tools.py index 7767914..36355e7 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -695,15 +695,13 @@ class MaskCompressorKernel(object): @memoize_method def get_list_compressor_kernel(self, mask_dtype, list_dtype): from pyopencl.algorithm import ListOfListsBuilder - # Reimport VectorArg to use default with_offset - from pyopencl.tools import VectorArg return ListOfListsBuilder( self.context, [("output", list_dtype)], MASK_LIST_COMPRESSOR_BODY, [ - VectorArg(mask_dtype, "mask"), + _VectorArg(mask_dtype, "mask"), ], name_prefix="compress_list") @@ -762,11 +760,17 @@ class MaskCompressorKernel(object): # }}} -# {{{ all-reduce +# {{{ Communication pattern for partial multipole expansions class AllReduceCommPattern(object): - """Describes a tree-like communication pattern for allreduce. Supports efficient - allreduce between an arbitrary number of processes. + """Describes a tree-like communication pattern for exchanging and reducing + multipole expansions. Supports an arbitrary number of processes. + + Communication of multipoles will be break down into stages. At each stage, + :meth:`sources()` and :meth:`sinks()` obtain the lists of ranks for receiving and + sending multipoles. :meth:`messages()` can be used for determining boxes whose + multipole expansions need to be sent during the current stage. Use :meth:`advance()` + to advance to the next stage. """ def __init__(self, rank, size): @@ -819,8 +823,9 @@ class AllReduceCommPattern(object): return set([partner]) def messages(self): - """Return the range of relevant messages to send to the sinks. This is returned - as a [start, end) pair. By design, it is a consecutive range. + """Return a range of ranks, such that the multipole expansions used by + responsible boxes of these ranks are sent to the sinks. This is returned as + a [start, end) pair. By design, it is a consecutive range. """ if self.rank < self.midpoint: return (self.midpoint, self.right) @@ -1080,7 +1085,7 @@ def run_mpi(script, num_processes, env): subprocess.run(command, env=env, check=True) else: - raise RuntimeError("Unrecognized MPI implementation") + raise NotImplementedError("Unrecognized MPI implementation") # }}} -- GitLab From 1b87a239933ee5cb7667d892a22a7a276e57283d Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 25 Mar 2021 00:08:00 -0700 Subject: [PATCH 222/260] Accept both host and device array in ImmutableHostDeviceArray --- boxtree/tools.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/boxtree/tools.py b/boxtree/tools.py index 36355e7..8ba29b0 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -365,11 +365,8 @@ class DeviceDataRecord(Record): transformed to `ImmutableHostDeviceArray`. """ def _to_host_device_array(attr): - if isinstance(attr, np.ndarray): + if isinstance(attr, (np.ndarray, cl.array.Array)): return ImmutableHostDeviceArray(queue, attr) - if isinstance(attr, cl.array.Array): - host_array = attr.get(queue=queue) - return ImmutableHostDeviceArray(queue, host_array) else: return attr @@ -769,8 +766,8 @@ class AllReduceCommPattern(object): Communication of multipoles will be break down into stages. At each stage, :meth:`sources()` and :meth:`sinks()` obtain the lists of ranks for receiving and sending multipoles. :meth:`messages()` can be used for determining boxes whose - multipole expansions need to be sent during the current stage. Use :meth:`advance()` - to advance to the next stage. + multipole expansions need to be sent during the current stage. Use + :meth:`advance()` to advance to the next stage. """ def __init__(self, rank, size): @@ -1101,11 +1098,16 @@ class ImmutableHostDeviceArray: @TODO: Once available, replace this implementation with PyOpenCL's in-house implementation. """ - def __init__(self, queue, host_array): + def __init__(self, queue, array): self.queue = queue - self.host_array = host_array + self.host_array = None self.device_array = None + if isinstance(array, np.ndarray): + self.host_array = array + elif isinstance(array, cl.array.Array): + self.device_array = array + def with_queue(self, queue): self.queue = queue @@ -1120,6 +1122,8 @@ class ImmutableHostDeviceArray: @property def host(self): + if self.host_array is None: + self.host_array = self.device_array.get(self.queue) return self.host_array @property -- GitLab From 9770b13fb51451b36aefb8561624917a04f4b927 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 25 Mar 2021 08:27:21 -0500 Subject: [PATCH 223/260] Fix test failures for tree/wrangler refactor --- test/test_fmm.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/test_fmm.py b/test/test_fmm.py index 2f57d8b..6bcd809 100644 --- a/test/test_fmm.py +++ b/test/test_fmm.py @@ -660,11 +660,11 @@ def test_interaction_list_particle_count_thresholding(ctx_factory, enable_extent weights_sum = np.sum(weights) host_trav = trav.get(queue=queue) - host_tree = host_trav.tree - wrangler = ConstantOneExpansionWrangler(host_tree) + wrangler = ConstantOneExpansionWrangler() + taw = ConstantOneTraversalAndWrangler(host_trav, wrangler) - pot = drive_fmm(host_trav, wrangler, (weights,)) + pot = drive_fmm(taw, (weights,)) assert (pot == weights_sum).all() @@ -717,11 +717,11 @@ def test_fmm_float32(ctx_factory, enable_extents): weights_sum = np.sum(weights) host_trav = trav.get(queue=queue) - host_tree = host_trav.tree - wrangler = ConstantOneExpansionWrangler(host_tree) + wrangler = ConstantOneExpansionWrangler() + taw = ConstantOneTraversalAndWrangler(host_trav, wrangler) - pot = drive_fmm(host_trav, wrangler, (weights,)) + pot = drive_fmm(taw, (weights,)) assert (pot == weights_sum).all() -- GitLab From 53b148805adc2ff26a48a636b6b42fdb4541aa90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Sat, 10 Apr 2021 16:21:20 -0500 Subject: [PATCH 224/260] Create fmm.py --- boxtree/fmm.py | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index cabc4b0..681db04 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -43,6 +43,33 @@ from pytools import ProcessLogger # {{{ expansion wrangler interface +# Design considerations: +# +# - Making the wrangler contain/permit it to depend the tree (which was previously the case) +# forces code caches (say, for translations) to be thrown away every time the FMM is run on +# a different tree. +# +# - Essentially every wrangler had grown some dependency on tree information. Separating +# out this information in a more "official" way seemed like a reasonable idea. +# +# - Since some of the tree-dependent information in the wrangler also depended on the +# traversal (while also being specific to each wrangler type), it seemed to make +# to make sense to create this class that explicitly depends on both to host this data. +# +# - Since drive_fmm previously took a wrangler and a traversal as an argument, this +# object (which contains both) became the natural new argument type to drive_fmm. +# +# - Since the expansion wrangler becomes a pure 'code container', every method in +# the wrangler is provided access to the TraversalAndWrangler. If the translation +# methods existed in TraversalAndWrangler, then at least another set of cooperating +# "code getter" methods would be required in the wrangler. This is the chief +# downside (to my mind) of the old 'wrangler+code container' design. +# +# - The wrangler methods obviously don't need to be told what wrangler to use +# (but are told this anyway by way of being passed a 'taw'). This is redundant +# and a bit clunky, but I found this to be an acceptable downside. +# +# -AK, as part of https://github.com/inducer/boxtree/pull/29 class TraversalAndWrangler: """A base class for objects specific to an implementation of the :class:`ExpansionWranglerInterface` that may hold tree-/geometry-dependent @@ -65,7 +92,7 @@ class TraversalAndWrangler: def tree(self): return self.traversal.tree - + class ExpansionWranglerInterface(ABC): """Abstract expansion handling interface for use with :func:`drive_fmm`. -- GitLab From 1f25e1547820e4135336796010cd136c79d6d491 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Sat, 10 Apr 2021 16:33:08 -0500 Subject: [PATCH 225/260] More justification for TraversalAndWrangler design --- boxtree/fmm.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 681db04..d6ef835 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -60,15 +60,23 @@ from pytools import ProcessLogger # object (which contains both) became the natural new argument type to drive_fmm. # # - Since the expansion wrangler becomes a pure 'code container', every method in -# the wrangler is provided access to the TraversalAndWrangler. If the translation -# methods existed in TraversalAndWrangler, then at least another set of cooperating -# "code getter" methods would be required in the wrangler. This is the chief -# downside (to my mind) of the old 'wrangler+code container' design. +# the wrangler is provided access to the TraversalAndWrangler. # # - The wrangler methods obviously don't need to be told what wrangler to use # (but are told this anyway by way of being passed a 'taw'). This is redundant # and a bit clunky, but I found this to be an acceptable downside. # +# - In many cases (look no further than the fmmlib wrangler), wranglers were +# provided tree-specific arguments by the user. As a result, +# TraversalAndWrangler (aka the only thing that's allowed to know both +# the tree and the wrangler) needs to be created by the user, to retain +# the ability to provide these parameters. +# +# - Since wranglers (which may depend on the kernel but not the tree) +# may also do (kernel-specific) pre-computation, and since the +# lifetime of the tree-dependent TraversalAndWrangler and the wrangler are +# naturally different, it follows that both must be exposed to the user. +# # -AK, as part of https://github.com/inducer/boxtree/pull/29 class TraversalAndWrangler: """A base class for objects specific to an implementation of the -- GitLab From d41fcdc46a76fc263b138decf2482f03aa4677b8 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sat, 10 Apr 2021 16:36:33 -0500 Subject: [PATCH 226/260] Placate flake8 --- boxtree/fmm.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index d6ef835..3718f54 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -45,16 +45,18 @@ from pytools import ProcessLogger # Design considerations: # -# - Making the wrangler contain/permit it to depend the tree (which was previously the case) -# forces code caches (say, for translations) to be thrown away every time the FMM is run on -# a different tree. +# - Making the wrangler contain/permit it to depend the tree (which was +# previously the case) forces code caches (say, for translations) to be thrown +# away every time the FMM is run on a different tree. # -# - Essentially every wrangler had grown some dependency on tree information. Separating -# out this information in a more "official" way seemed like a reasonable idea. +# - Essentially every wrangler had grown some dependency on tree information. +# Separating out this information in a more "official" way seemed like a +# reasonable idea. # # - Since some of the tree-dependent information in the wrangler also depended on the # traversal (while also being specific to each wrangler type), it seemed to make -# to make sense to create this class that explicitly depends on both to host this data. +# to make sense to create this class that explicitly depends on both to host +# this data. # # - Since drive_fmm previously took a wrangler and a traversal as an argument, this # object (which contains both) became the natural new argument type to drive_fmm. @@ -100,7 +102,7 @@ class TraversalAndWrangler: def tree(self): return self.traversal.tree - + class ExpansionWranglerInterface(ABC): """Abstract expansion handling interface for use with :func:`drive_fmm`. -- GitLab From 0383c0575164e06f2e4829a45ae2ac4f31c6604d Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 25 Apr 2021 16:37:13 -0500 Subject: [PATCH 227/260] Back out ill-fated TraversalAndWrangler, introduce TreeIndependentDataForWrangler, introduce boxtree.{timing,constant_one} --- boxtree/constant_one.py | 233 ++++++++++++ boxtree/fmm.py | 243 +++---------- boxtree/pyfmmlib_integration.py | 609 ++++++++++++++++---------------- boxtree/timing.py | 170 +++++++++ boxtree/tools.py | 247 ------------- doc/index.rst | 1 + doc/tools.rst | 6 + examples/cost_model.py | 11 +- test/test_cost_model.py | 22 +- test/test_fmm.py | 119 +++---- 10 files changed, 854 insertions(+), 807 deletions(-) create mode 100644 boxtree/constant_one.py create mode 100644 boxtree/timing.py create mode 100644 doc/tools.rst diff --git a/boxtree/constant_one.py b/boxtree/constant_one.py new file mode 100644 index 0000000..5590617 --- /dev/null +++ b/boxtree/constant_one.py @@ -0,0 +1,233 @@ +""" +.. autoclass:: ConstantOneTreeIndependentDataForWrangler +.. autoclass:: ConstantOneExpansionWrangler +""" + +__copyright__ = "Copyright (C) 2013 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import numpy as np +from boxtree.fmm import TreeIndependentDataForWrangler, ExpansionWranglerInterface +from boxtree.timing import DummyTimingFuture + + +# {{{ constant one wrangler + +class ConstantOneTreeIndependentDataForWrangler(TreeIndependentDataForWrangler): + """ + .. automethod:: __init__ + """ + + +class ConstantOneExpansionWrangler(ExpansionWranglerInterface): + """This implements the 'analytical routines' for a Green's function that is + constant 1 everywhere. For 'charges' of 'ones', this should get every particle + a copy of the particle count. + + Timing results returned by this wrangler contain the field *ops_elapsed*, + which counts approximately the number of floating-point operations required. + """ + + def _get_source_slice(self, ibox): + pstart = self.tree.box_source_starts[ibox] + return slice( + pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) + + def _get_target_slice(self, ibox): + pstart = self.tree.box_target_starts[ibox] + return slice( + pstart, pstart + self.tree.box_target_counts_nonchild[ibox]) + + def multipole_expansion_zeros(self): + return np.zeros(self.tree.nboxes, dtype=np.float64) + + local_expansion_zeros = multipole_expansion_zeros + + def output_zeros(self): + return np.zeros(self.tree.ntargets, dtype=np.float64) + + def reorder_sources(self, source_array): + return source_array[self.tree.user_source_ids] + + def reorder_potentials(self, potentials): + return potentials[self.tree.sorted_target_ids] + + @staticmethod + def timing_future(ops): + return DummyTimingFuture.from_op_count(ops) + + def form_multipoles(self, level_start_source_box_nrs, source_boxes, + src_weight_vecs): + src_weights, = src_weight_vecs + mpoles = self.multipole_expansion_zeros() + ops = 0 + + for ibox in source_boxes: + pslice = self._get_source_slice(ibox) + mpoles[ibox] += np.sum(src_weights[pslice]) + ops += src_weights[pslice].size + + return mpoles, self.timing_future(ops) + + def coarsen_multipoles(self, level_start_source_parent_box_nrs, + source_parent_boxes, mpoles): + tree = self.tree + ops = 0 + + # nlevels-1 is the last valid level index + # nlevels-2 is the last valid level that could have children + # + # 3 is the last relevant source_level. + # 2 is the last relevant target_level. + # (because no level 1 box will be well-separated from another) + for source_level in range(tree.nlevels-1, 2, -1): + target_level = source_level - 1 + start, stop = level_start_source_parent_box_nrs[ + target_level:target_level+2] + for ibox in source_parent_boxes[start:stop]: + for child in tree.box_child_ids[:, ibox]: + if child: + mpoles[ibox] += mpoles[child] + ops += 1 + + return mpoles, self.timing_future(ops) + + def eval_direct(self, target_boxes, neighbor_sources_starts, + neighbor_sources_lists, src_weight_vecs): + src_weights, = src_weight_vecs + pot = self.output_zeros() + ops = 0 + + for itgt_box, tgt_ibox in enumerate(target_boxes): + tgt_pslice = self._get_target_slice(tgt_ibox) + + src_sum = 0 + nsrcs = 0 + start, end = neighbor_sources_starts[itgt_box:itgt_box+2] + #print "DIR: %s <- %s" % (tgt_ibox, neighbor_sources_lists[start:end]) + for src_ibox in neighbor_sources_lists[start:end]: + src_pslice = self._get_source_slice(src_ibox) + nsrcs += src_weights[src_pslice].size + + src_sum += np.sum(src_weights[src_pslice]) + + pot[tgt_pslice] = src_sum + ops += pot[tgt_pslice].size * nsrcs + + return pot, self.timing_future(ops) + + def multipole_to_local(self, + level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, + starts, lists, mpole_exps): + local_exps = self.local_expansion_zeros() + ops = 0 + + for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): + start, end = starts[itgt_box:itgt_box+2] + + contrib = 0 + #print tgt_ibox, "<-", lists[start:end] + for src_ibox in lists[start:end]: + contrib += mpole_exps[src_ibox] + ops += 1 + + local_exps[tgt_ibox] += contrib + + return local_exps, self.timing_future(ops) + + def eval_multipoles(self, + target_boxes_by_source_level, from_sep_smaller_nonsiblings_by_level, + mpole_exps): + pot = self.output_zeros() + ops = 0 + + for level, ssn in enumerate(from_sep_smaller_nonsiblings_by_level): + for itgt_box, tgt_ibox in \ + enumerate(target_boxes_by_source_level[level]): + tgt_pslice = self._get_target_slice(tgt_ibox) + + contrib = 0 + + start, end = ssn.starts[itgt_box:itgt_box+2] + for src_ibox in ssn.lists[start:end]: + contrib += mpole_exps[src_ibox] + + pot[tgt_pslice] += contrib + ops += pot[tgt_pslice].size * (end - start) + + return pot, self.timing_future(ops) + + def form_locals(self, + level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, starts, lists, src_weight_vecs): + src_weights, = src_weight_vecs + local_exps = self.local_expansion_zeros() + ops = 0 + + for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): + start, end = starts[itgt_box:itgt_box+2] + + #print "LIST 4", tgt_ibox, "<-", lists[start:end] + contrib = 0 + nsrcs = 0 + for src_ibox in lists[start:end]: + src_pslice = self._get_source_slice(src_ibox) + nsrcs += src_weights[src_pslice].size + + contrib += np.sum(src_weights[src_pslice]) + + local_exps[tgt_ibox] += contrib + ops += nsrcs + + return local_exps, self.timing_future(ops) + + def refine_locals(self, level_start_target_or_target_parent_box_nrs, + target_or_target_parent_boxes, local_exps): + ops = 0 + + for target_lev in range(1, self.tree.nlevels): + start, stop = level_start_target_or_target_parent_box_nrs[ + target_lev:target_lev+2] + for ibox in target_or_target_parent_boxes[start:stop]: + local_exps[ibox] += local_exps[self.tree.box_parent_ids[ibox]] + ops += 1 + + return local_exps, self.timing_future(ops) + + def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): + pot = self.output_zeros() + ops = 0 + + for ibox in target_boxes: + tgt_pslice = self._get_target_slice(ibox) + pot[tgt_pslice] += local_exps[ibox] + ops += pot[tgt_pslice].size + + return pot, self.timing_future(ops) + + def finalize_potentials(self, potentials): + return potentials + +# }}} + +# vim: foldmethod=marker:filetype=pyopencl diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 3718f54..677680f 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -1,12 +1,8 @@ """ .. autofunction:: drive_fmm -.. autoclass:: TraversalAndWrangler +.. autoclass:: TreeIndependentDataForWrangler .. autoclass:: ExpansionWranglerInterface - -.. autoclass:: TimingResult - -.. autoclass:: TimingFuture """ @@ -35,7 +31,8 @@ THE SOFTWARE. from abc import ABC, abstractmethod import logging logger = logging.getLogger(__name__) -from collections.abc import Mapping +from boxtree.tree import Tree +from boxtree.traversal import FMMTraversalInfo from pytools import ProcessLogger @@ -43,64 +40,14 @@ from pytools import ProcessLogger # {{{ expansion wrangler interface -# Design considerations: -# -# - Making the wrangler contain/permit it to depend the tree (which was -# previously the case) forces code caches (say, for translations) to be thrown -# away every time the FMM is run on a different tree. -# -# - Essentially every wrangler had grown some dependency on tree information. -# Separating out this information in a more "official" way seemed like a -# reasonable idea. -# -# - Since some of the tree-dependent information in the wrangler also depended on the -# traversal (while also being specific to each wrangler type), it seemed to make -# to make sense to create this class that explicitly depends on both to host -# this data. -# -# - Since drive_fmm previously took a wrangler and a traversal as an argument, this -# object (which contains both) became the natural new argument type to drive_fmm. -# -# - Since the expansion wrangler becomes a pure 'code container', every method in -# the wrangler is provided access to the TraversalAndWrangler. -# -# - The wrangler methods obviously don't need to be told what wrangler to use -# (but are told this anyway by way of being passed a 'taw'). This is redundant -# and a bit clunky, but I found this to be an acceptable downside. -# -# - In many cases (look no further than the fmmlib wrangler), wranglers were -# provided tree-specific arguments by the user. As a result, -# TraversalAndWrangler (aka the only thing that's allowed to know both -# the tree and the wrangler) needs to be created by the user, to retain -# the ability to provide these parameters. -# -# - Since wranglers (which may depend on the kernel but not the tree) -# may also do (kernel-specific) pre-computation, and since the -# lifetime of the tree-dependent TraversalAndWrangler and the wrangler are -# naturally different, it follows that both must be exposed to the user. -# -# -AK, as part of https://github.com/inducer/boxtree/pull/29 -class TraversalAndWrangler: - """A base class for objects specific to an implementation of the - :class:`ExpansionWranglerInterface` that may hold tree-/geometry-dependent - information. Typically, these objects can be used to host caches for such - information. Via :func:`drive_fmm`, objects of this type are supplied to - every method in the :class:`ExpansionWranglerInterface`. - - .. attribute:: dim - .. attribute:: tree - .. attribute:: traversal - .. attribute:: wrangler - """ - - def __init__(self, traversal, wrangler): - self.dim = traversal.tree.dimensions - self.traversal = traversal - self.wrangler = wrangler +class TreeIndependentDataForWrangler: + """An object that can be used to store information for efficient + wrangler execution that depends on the kernel but not the tree and/or + the traversal. - @property - def tree(self): - return self.traversal.tree + Examples of such data include generated code for carrying out + translations. + """ class ExpansionWranglerInterface(ABC): @@ -119,12 +66,23 @@ class ExpansionWranglerInterface(ABC): (or set of kernels). Functions that support returning timing data return a value supporting the - :class:`TimingFuture` interface. + :class:`~boxtree.timing.TimingFuture` interface. .. versionchanged:: 2018.1 Changed (a subset of) functions to return timing data. + .. attribute:: tree_indep + + An instance of (a typically wrangler-dependent subclass of) + :class:`TreeIndependentDataForWrangler`. + + .. attribute:: traversal + + An instance of :class:`~boxtree.traversal.FMMTraversalInfo`. + + .. autoattribute:: tree + .. rubric:: Array creation .. automethod:: multipole_expansion_zeros @@ -149,22 +107,31 @@ class ExpansionWranglerInterface(ABC): .. automethod:: finalize_potentials """ + def __init__(self, tree_indep: TreeIndependentDataForWrangler, + traversal: FMMTraversalInfo): + self.tree_indep = tree_indep + self.traversal = traversal + + @property + def tree(self) -> Tree: + return self.traversal.tree + @abstractmethod - def multipole_expansion_zeros(self, taw: TraversalAndWrangler): + def multipole_expansion_zeros(self): """Return an expansions array (which must support addition) capable of holding one multipole or local expansion for every box in the tree. """ @abstractmethod - def local_expansion_zeros(self, taw: TraversalAndWrangler): + def local_expansion_zeros(self): """Return an expansions array (which must support addition) capable of holding one multipole or local expansion for every box in the tree. """ @abstractmethod - def output_zeros(self, taw: TraversalAndWrangler): + def output_zeros(self): """Return a potentials array (which must support addition) capable of holding a potential value for each target in the tree. Note that :func:`drive_fmm` makes no assumptions about *potential* other than @@ -173,21 +140,23 @@ class ExpansionWranglerInterface(ABC): """ @abstractmethod - def reorder_sources(self, taw: TraversalAndWrangler, source_array): + def reorder_sources(self, source_array): """Return a copy of *source_array* in :ref:`tree source order `. *source_array* is in user source order. """ @abstractmethod - def reorder_potentials(self, taw: TraversalAndWrangler, potentials): + def reorder_potentials(self, potentials): """Return a copy of *potentials* in :ref:`user target order `. *source_weights* is in tree target order. """ + # {{{ translations + @abstractmethod - def form_multipoles(self, taw: TraversalAndWrangler, + def form_multipoles(self, level_start_source_box_nrs, source_boxes, src_weight_vecs): """Return an expansions array (compatible with @@ -200,7 +169,7 @@ class ExpansionWranglerInterface(ABC): """ @abstractmethod - def coarsen_multipoles(self, taw: TraversalAndWrangler, + def coarsen_multipoles(self, level_start_source_parent_box_nrs, source_parent_boxes, mpoles): """For each box in *source_parent_boxes*, @@ -212,7 +181,7 @@ class ExpansionWranglerInterface(ABC): """ @abstractmethod - def eval_direct(self, taw: TraversalAndWrangler, + def eval_direct(self, target_boxes, neighbor_sources_starts, neighbor_sources_lists, src_weight_vecs): """For each box in *target_boxes*, evaluate the influence of the @@ -224,7 +193,7 @@ class ExpansionWranglerInterface(ABC): """ @abstractmethod - def multipole_to_local(self, taw: TraversalAndWrangler, + def multipole_to_local(self, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, starts, lists, mpole_exps): @@ -238,7 +207,7 @@ class ExpansionWranglerInterface(ABC): """ @abstractmethod - def eval_multipoles(self, taw: TraversalAndWrangler, + def eval_multipoles(self, target_boxes_by_source_level, from_sep_smaller_by_level, mpole_exps): """For a level *i*, each box in *target_boxes_by_source_level[i]*, evaluate the multipole expansion in *mpole_exps* in the nearby boxes given in @@ -251,7 +220,7 @@ class ExpansionWranglerInterface(ABC): """ @abstractmethod - def form_locals(self, taw: TraversalAndWrangler, + def form_locals(self, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, starts, lists, src_weight_vecs): """For each box in *target_or_target_parent_boxes*, form local @@ -265,7 +234,7 @@ class ExpansionWranglerInterface(ABC): """ @abstractmethod - def refine_locals(self, taw: TraversalAndWrangler, + def refine_locals(self, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, local_exps): """For each box in *child_boxes*, @@ -276,7 +245,7 @@ class ExpansionWranglerInterface(ABC): """ @abstractmethod - def eval_locals(self, taw: TraversalAndWrangler, + def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): """For each box in *target_boxes*, evaluate the local expansion in *local_exps* and return a new potential array. @@ -285,8 +254,10 @@ class ExpansionWranglerInterface(ABC): array, see :meth:`output_zeros`. """ + # }}} + @abstractmethod - def finalize_potentials(self, taw: TraversalAndWrangler, potentials): + def finalize_potentials(self, potentials): """ Postprocess the reordered potentials. This is where global scaling factors could be applied. This is distinct from :meth:`reorder_potentials` @@ -296,7 +267,8 @@ class ExpansionWranglerInterface(ABC): # }}} -def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): +def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, + timing_data=None): """Top-level driver routine for a fast multipole calculation. In part, this is intended as a template for custom FMMs, in the sense that @@ -313,27 +285,27 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): Passed unmodified to *expansion_wrangler*. :arg timing_data: Either *None*, or a :class:`dict` that is populated with timing information for the stages of the algorithm (in the form of - :class:`TimingResult`), if such information is available. + :class:`~boxtree.timing.TimingResult`), if such information is available. Returns the potentials computed by *expansion_wrangler*. """ - wrangler = taw.wrangler - traversal = taw.traversal + + traversal = wrangler.traversal # Interface guidelines: Attributes of the tree are assumed to be known # to the expansion wrangler and should not be passed. fmm_proc = ProcessLogger(logger, "fmm") + from boxtree.timing import TimingRecorder recorder = TimingRecorder() - src_weight_vecs = [wrangler.reorder_sources(taw, weight) for + src_weight_vecs = [wrangler.reorder_sources(weight) for weight in src_weight_vecs] # {{{ "Step 2.1:" Construct local multipoles mpole_exps, timing_future = wrangler.form_multipoles( - taw, traversal.level_start_source_box_nrs, traversal.source_boxes, src_weight_vecs) @@ -345,7 +317,6 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): # {{{ "Step 2.2:" Propagate multipoles upward mpole_exps, timing_future = wrangler.coarsen_multipoles( - taw, traversal.level_start_source_parent_box_nrs, traversal.source_parent_boxes, mpole_exps) @@ -359,7 +330,6 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") potentials, timing_future = wrangler.eval_direct( - taw, traversal.target_boxes, traversal.neighbor_source_boxes_starts, traversal.neighbor_source_boxes_lists, @@ -374,7 +344,6 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 4:" translate separated siblings' ("list 2") mpoles to local local_exps, timing_future = wrangler.multipole_to_local( - taw, traversal.level_start_target_or_target_parent_box_nrs, traversal.target_or_target_parent_boxes, traversal.from_sep_siblings_starts, @@ -393,7 +362,6 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): # contribution *out* of the downward-propagating local expansions) mpole_result, timing_future = wrangler.eval_multipoles( - taw, traversal.target_boxes_sep_smaller_by_source_level, traversal.from_sep_smaller_by_level, mpole_exps) @@ -409,7 +377,6 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): "('list 3 close')") direct_result, timing_future = wrangler.eval_direct( - taw, traversal.target_boxes, traversal.from_sep_close_smaller_starts, traversal.from_sep_close_smaller_lists, @@ -424,7 +391,6 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 6:" form locals for separated bigger source boxes ("list 4") local_result, timing_future = wrangler.form_locals( - taw, traversal.level_start_target_or_target_parent_box_nrs, traversal.target_or_target_parent_boxes, traversal.from_sep_bigger_starts, @@ -437,7 +403,6 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): if traversal.from_sep_close_bigger_starts is not None: direct_result, timing_future = wrangler.eval_direct( - taw, traversal.target_boxes, traversal.from_sep_close_bigger_starts, traversal.from_sep_close_bigger_lists, @@ -452,7 +417,6 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 7:" propagate local_exps downward local_exps, timing_future = wrangler.refine_locals( - taw, traversal.level_start_target_or_target_parent_box_nrs, traversal.target_or_target_parent_boxes, local_exps) @@ -464,7 +428,6 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): # {{{ "Stage 8:" evaluate locals local_result, timing_future = wrangler.eval_locals( - taw, traversal.level_start_target_box_nrs, traversal.target_boxes, local_exps) @@ -475,9 +438,9 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): # }}} - result = wrangler.reorder_potentials(taw, potentials) + result = wrangler.reorder_potentials(potentials) - result = wrangler.finalize_potentials(taw, result) + result = wrangler.finalize_potentials(result) fmm_proc.done() @@ -487,94 +450,4 @@ def drive_fmm(taw: TraversalAndWrangler, src_weight_vecs, timing_data=None): return result -# {{{ timing result - -class TimingResult(Mapping): - """Interface for returned timing data. - - This supports accessing timing results via a mapping interface, along with - combining results via :meth:`merge`. - - .. automethod:: merge - """ - - def __init__(self, *args, **kwargs): - """See constructor for :class:`dict`.""" - self._mapping = dict(*args, **kwargs) - - def __getitem__(self, key): - return self._mapping[key] - - def __iter__(self): - return iter(self._mapping) - - def __len__(self): - return len(self._mapping) - - def merge(self, other): - """Merge this result with another by adding together common fields.""" - result = {} - - for key in self: - val = self.get(key) - other_val = other.get(key) - - if val is None or other_val is None: - continue - - result[key] = val + other_val - - return type(self)(result) - -# }}} - - -# {{{ timing future - -class TimingFuture: - """Returns timing data for a potentially asynchronous operation. - - .. automethod:: result - .. automethod:: done - """ - - def result(self): - """Return a :class:`TimingResult`. May block.""" - raise NotImplementedError - - def done(self): - """Return *True* if the operation is complete.""" - raise NotImplementedError - -# }}} - - -# {{{ timing recorder - -class TimingRecorder: - - def __init__(self): - from collections import defaultdict - self.futures = defaultdict(list) - - def add(self, description, future): - self.futures[description].append(future) - - def summarize(self): - result = {} - - for description, futures_list in self.futures.items(): - futures = iter(futures_list) - - timing_result = next(futures).result() - for future in futures: - timing_result = timing_result.merge(future.result()) - - result[description] = timing_result - - return result - -# }}} - - # vim: filetype=pyopencl:fdm=marker diff --git a/boxtree/pyfmmlib_integration.py b/boxtree/pyfmmlib_integration.py index 54739b5..6ef855f 100644 --- a/boxtree/pyfmmlib_integration.py +++ b/boxtree/pyfmmlib_integration.py @@ -2,7 +2,7 @@ Integrates :mod:`boxtree` with `pyfmmlib `_. -.. autoclass:: FMMLibTraversalAndWrangler +.. autoclass:: FMMLibTreeIndependentDataForWrangler .. autoclass:: FMMLibExpansionWrangler Internal bits @@ -42,8 +42,8 @@ logger = logging.getLogger(__name__) import numpy as np from pytools import memoize_method, log_process -from boxtree.tools import return_timing_data -from boxtree.fmm import TraversalAndWrangler +from boxtree.timing import return_timing_data +from boxtree.fmm import TreeIndependentDataForWrangler, ExpansionWranglerInterface # {{{ rotation data interface @@ -110,15 +110,174 @@ class FMMLibRotationDataNotSuppliedWarning(UserWarning): # }}} -# {{{ tree-dependent wrangler info for fmmlib +# {{{ tree-independent data for wrangler -class FMMLibTraversalAndWrangler(TraversalAndWrangler): +class FMMLibTreeIndependentDataForWrangler(TreeIndependentDataForWrangler): """ .. automethod:: __init__ """ + + def __init__(self, dim, helmholtz_k, ifgrad=False): + self.dim = dim + self.helmholtz_k = helmholtz_k + self.ifgrad = ifgrad + + if helmholtz_k == 0: + self.eqn_letter = "l" + self.kernel_kwargs = {} + self.rscale_factor = 1 + else: + self.eqn_letter = "h" + self.kernel_kwargs = {"zk": helmholtz_k} + self.rscale_factor = abs(helmholtz_k) + + self.dtype = np.complex128 + + # {{{ routine getters + + def get_routine(self, name, suffix=""): + import pyfmmlib + return getattr(pyfmmlib, "{}{}{}".format( + self.eqn_letter, + name % self.dim, + suffix)) + + def get_vec_routine(self, name): + return self.get_routine(name, "_vec") + + def get_translation_routine(self, wrangler, name, vec_suffix="_vec"): + suffix = "" + if self.dim == 3: + suffix = "quadu" + suffix += vec_suffix + + rout = self.get_routine(name, suffix) + + if self.dim == 2: + def wrapper(*args, **kwargs): + # not used + kwargs.pop("level_for_projection", None) + + return rout(*args, **kwargs) + else: + + def wrapper(*args, **kwargs): + kwargs.pop("level_for_projection", None) + nterms2 = kwargs["nterms2"] + kwargs.update(wrangler.projection_quad_extra_kwargs(nterms=nterms2)) + + val, ier = rout(*args, **kwargs) + if (ier != 0).any(): + raise RuntimeError("%s failed with nonzero ier" % name) + + return val + + # Doesn't work in in Py2 + # from functools import update_wrapper + # update_wrapper(wrapper, rout) + return wrapper + + def get_direct_eval_routine(self, use_dipoles): + if self.dim == 2: + rout = self.get_vec_routine( + "potgrad%ddall" + ("_dp" if use_dipoles else "")) + + def wrapper(*args, **kwargs): + kwargs["ifgrad"] = self.ifgrad + kwargs["ifhess"] = False + pot, grad, hess = rout(*args, **kwargs) + + if not self.ifgrad: + grad = 0 + + return pot, grad + + # Doesn't work in in Py2 + # from functools import update_wrapper + # update_wrapper(wrapper, rout) + return wrapper + + elif self.dim == 3: + rout = self.get_vec_routine( + "potfld%ddall" + ("_dp" if use_dipoles else "")) + + def wrapper(*args, **kwargs): + kwargs["iffld"] = self.ifgrad + pot, fld = rout(*args, **kwargs) + if self.ifgrad: + grad = -fld + else: + grad = 0 + return pot, grad + + # Doesn't work in in Py2 + # from functools import update_wrapper + # update_wrapper(wrapper, rout) + return wrapper + else: + raise ValueError("unsupported dimensionality") + + def get_expn_eval_routine(self, expn_kind): + name = "%%dd%seval" % expn_kind + rout = self.get_routine(name, "_vec") + + if self.dim == 2: + def wrapper(*args, **kwargs): + kwargs["ifgrad"] = self.ifgrad + kwargs["ifhess"] = False + + pot, grad, hess = rout(*args, **kwargs) + if not self.ifgrad: + grad = 0 + + return pot, grad + + # Doesn't work in in Py2 + # from functools import update_wrapper + # update_wrapper(wrapper, rout) + return wrapper + + elif self.dim == 3: + def wrapper(*args, **kwargs): + kwargs["iffld"] = self.ifgrad + pot, fld, ier = rout(*args, **kwargs) + + if (ier != 0).any(): + raise RuntimeError("%s failed with nonzero ier" % name) + + if self.ifgrad: + grad = -fld + else: + grad = 0 + + return pot, grad + + # Doesn't work in in Py2 + # from functools import update_wrapper + # update_wrapper(wrapper, rout) + return wrapper + else: + raise ValueError("unsupported dimensionality") + + # }}} + +# }}} + + +# {{{ wrangler + +class FMMLibExpansionWrangler(ExpansionWranglerInterface): + """Implements the :class:`boxtree.fmm.ExpansionWranglerInterface` + by using pyfmmlib. + + Timing results returned by this wrangler contains the values *wall_elapsed* + and (optionally, if supported) *process_elapsed*, which measure wall time + and process time in seconds, respectively. + """ + # {{{ constructor - def __init__(self, traversal, wrangler, fmm_level_to_nterms=None, + def __init__(self, tree_indep, traversal, fmm_level_to_nterms=None, dipole_vec=None, dipoles_already_reordered=False, nterms=None, optimized_m2l_precomputation_memory_cutoff_bytes=10**8, rotation_data=None): @@ -147,19 +306,19 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): def fmm_level_to_nterms(tree, level): # noqa pylint:disable=function-redefined return nterms - super().__init__(traversal, wrangler) + super().__init__(tree_indep, traversal) tree = traversal.tree - if wrangler.dim != self.dim: - raise ValueError(f"Expansion wrangler dim ({wrangler.dim}) " - f"does not match tree dim ({self.dim})") + if tree_indep.dim != tree.dimensions: + raise ValueError(f"Kernel dim ({tree_indep.dim}) " + f"does not match tree dim ({tree.dimensions})") self.level_nterms = np.array([ fmm_level_to_nterms(tree, lev) for lev in range(tree.nlevels) ], dtype=np.int32) - if wrangler.helmholtz_k: + if tree_indep.helmholtz_k: logger.info("expansion orders by level used in Helmholtz FMM: %s", self.level_nterms) @@ -172,7 +331,7 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): warn( "List 2 (multipole-to-local) translations will be " "unoptimized. Supply a rotation_data argument to " - "FMMLibTraversalAndWrangler for optimized List 2.", + "FMMLibExpansionWrangler for optimized List 2.", FMMLibRotationDataNotSuppliedWarning, stacklevel=2) @@ -191,7 +350,7 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): assert dipole_vec.shape == (self.dim, self.tree.nsources) if not dipoles_already_reordered: - dipole_vec = wrangler.reorder_sources(self, dipole_vec) + dipole_vec = self.reorder_sources(dipole_vec) self.dipole_vec = dipole_vec.copy(order="F") else: @@ -199,11 +358,15 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): # }}} + @property + def dim(self): + return self.tree.dimensions + def level_to_rscale(self, level): - result = self.tree.root_extent * 2 ** -level * self.wrangler.rscale_factor + result = self.tree.root_extent * 2 ** -level * self.tree_indep.rscale_factor if abs(result) > 1: result = 1 - if self.dim == 3 and self.wrangler.eqn_letter == "l": + if self.dim == 3 and self.tree_indep.eqn_letter == "l": # Laplace 3D uses the opposite convention compared to # all other cases. # https://gitlab.tiker.net/inducer/boxtree/merge_requests/81 @@ -221,7 +384,7 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): common_extra_kwargs = {} - if self.dim == 3 and self.wrangler.eqn_letter == "h": + if self.dim == 3 and self.tree_indep.eqn_letter == "h": nquad = max(6, int(2.5*nterms)) from pyfmmlib import legewhts xnodes, weights = legewhts(nquad, ifwhts=1) @@ -267,14 +430,14 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): from pytools import product return self._expansions_level_starts( lambda nterms: product( - self.wrangler.expansion_shape(nterms))) + self.expansion_shape(nterms))) @memoize_method def local_expansions_level_starts(self): from pytools import product return self._expansions_level_starts( lambda nterms: product( - self.wrangler.expansion_shape(nterms))) + self.expansion_shape(nterms))) # }}} @@ -288,7 +451,7 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): return (box_start, mpole_exps[expn_start:expn_stop].reshape( box_stop-box_start, - *self.wrangler.expansion_shape(self.level_nterms[level]))) + *self.expansion_shape(self.level_nterms[level]))) def local_expansions_view(self, local_exps, level): box_start, box_stop = self.tree.level_start_box_nrs[level:level+2] @@ -298,7 +461,7 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): return (box_start, local_exps[expn_start:expn_stop].reshape( box_stop-box_start, - *self.wrangler.expansion_shape(self.level_nterms[level]))) + *self.expansion_shape(self.level_nterms[level]))) # }}} @@ -308,7 +471,7 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): "charge": src_weights[pslice], } else: - if self.wrangler.eqn_letter == "l" and self.dim == 2: + if self.tree_indep.eqn_letter == "l" and self.dim == 2: return { "dipstr": -src_weights[pslice] * ( self.dipole_vec[0, pslice] @@ -417,168 +580,12 @@ class FMMLibTraversalAndWrangler(TraversalAndWrangler): # }}} -# }}} - - -class FMMLibExpansionWrangler: - """Implements the :class:`boxtree.fmm.ExpansionWranglerInterface` - by using pyfmmlib. - - Timing results returned by this wrangler contains the values *wall_elapsed* - and (optionally, if supported) *process_elapsed*, which measure wall time - and process time in seconds, respectively. - """ - - def __init__(self, dim, helmholtz_k, ifgrad=False): - self.dim = dim - self.helmholtz_k = helmholtz_k - self.ifgrad = ifgrad - - if helmholtz_k == 0: - self.eqn_letter = "l" - self.kernel_kwargs = {} - self.rscale_factor = 1 - else: - self.eqn_letter = "h" - self.kernel_kwargs = {"zk": helmholtz_k} - self.rscale_factor = abs(helmholtz_k) - - self.dtype = np.complex128 - - # {{{ routine getters - - def get_routine(self, name, suffix=""): - import pyfmmlib - return getattr(pyfmmlib, "{}{}{}".format( - self.eqn_letter, - name % self.dim, - suffix)) - - def get_vec_routine(self, name): - return self.get_routine(name, "_vec") - - def get_translation_routine(self, taw, name, vec_suffix="_vec"): - suffix = "" - if self.dim == 3: - suffix = "quadu" - suffix += vec_suffix - - rout = self.get_routine(name, suffix) - - if self.dim == 2: - def wrapper(*args, **kwargs): - # not used - kwargs.pop("level_for_projection", None) - - return rout(*args, **kwargs) - else: - - def wrapper(*args, **kwargs): - kwargs.pop("level_for_projection", None) - nterms2 = kwargs["nterms2"] - kwargs.update(taw.projection_quad_extra_kwargs(nterms=nterms2)) - - val, ier = rout(*args, **kwargs) - if (ier != 0).any(): - raise RuntimeError("%s failed with nonzero ier" % name) - - return val - - # Doesn't work in in Py2 - # from functools import update_wrapper - # update_wrapper(wrapper, rout) - return wrapper - - def get_direct_eval_routine(self, use_dipoles): - if self.dim == 2: - rout = self.get_vec_routine( - "potgrad%ddall" + ("_dp" if use_dipoles else "")) - - def wrapper(*args, **kwargs): - kwargs["ifgrad"] = self.ifgrad - kwargs["ifhess"] = False - pot, grad, hess = rout(*args, **kwargs) - - if not self.ifgrad: - grad = 0 - - return pot, grad - - # Doesn't work in in Py2 - # from functools import update_wrapper - # update_wrapper(wrapper, rout) - return wrapper - - elif self.dim == 3: - rout = self.get_vec_routine( - "potfld%ddall" + ("_dp" if use_dipoles else "")) - - def wrapper(*args, **kwargs): - kwargs["iffld"] = self.ifgrad - pot, fld = rout(*args, **kwargs) - if self.ifgrad: - grad = -fld - else: - grad = 0 - return pot, grad - - # Doesn't work in in Py2 - # from functools import update_wrapper - # update_wrapper(wrapper, rout) - return wrapper - else: - raise ValueError("unsupported dimensionality") - - def get_expn_eval_routine(self, expn_kind): - name = "%%dd%seval" % expn_kind - rout = self.get_routine(name, "_vec") - - if self.dim == 2: - def wrapper(*args, **kwargs): - kwargs["ifgrad"] = self.ifgrad - kwargs["ifhess"] = False - - pot, grad, hess = rout(*args, **kwargs) - if not self.ifgrad: - grad = 0 - - return pot, grad - - # Doesn't work in in Py2 - # from functools import update_wrapper - # update_wrapper(wrapper, rout) - return wrapper - - elif self.dim == 3: - def wrapper(*args, **kwargs): - kwargs["iffld"] = self.ifgrad - pot, fld, ier = rout(*args, **kwargs) - - if (ier != 0).any(): - raise RuntimeError("%s failed with nonzero ier" % name) - - if self.ifgrad: - grad = -fld - else: - grad = 0 - - return pot, grad - - # Doesn't work in in Py2 - # from functools import update_wrapper - # update_wrapper(wrapper, rout) - return wrapper - else: - raise ValueError("unsupported dimensionality") - - # }}} - # {{{ data vector utilities def expansion_shape(self, nterms): - if self.dim == 2 and self.eqn_letter == "l": + if self.dim == 2 and self.tree_indep.eqn_letter == "l": return (nterms+1,) - elif self.dim == 2 and self.eqn_letter == "h": + elif self.dim == 2 and self.tree_indep.eqn_letter == "h": return (2*nterms+1,) elif self.dim == 3: # This is the transpose of the Fortran format, to @@ -587,27 +594,27 @@ class FMMLibExpansionWrangler: else: raise ValueError("unsupported dimensionality") - def multipole_expansion_zeros(self, taw): + def multipole_expansion_zeros(self): return np.zeros( - taw.multipole_expansions_level_starts()[-1], - dtype=self.dtype) + self.multipole_expansions_level_starts()[-1], + dtype=self.tree_indep.dtype) - def local_expansion_zeros(self, taw): + def local_expansion_zeros(self): return np.zeros( - taw.local_expansions_level_starts()[-1], - dtype=self.dtype) + self.local_expansions_level_starts()[-1], + dtype=self.tree_indep.dtype) - def output_zeros(self, taw): - if self.ifgrad: + def output_zeros(self): + if self.tree_indep.ifgrad: from pytools.obj_array import make_obj_array return make_obj_array([ - np.zeros(taw.tree.ntargets, self.dtype) - for i in range(1 + taw.dim)]) + np.zeros(self.tree.ntargets, self.tree_indep.dtype) + for i in range(1 + self.dim)]) else: - return np.zeros(taw.tree.ntargets, self.dtype) + return np.zeros(self.tree.ntargets, self.tree_indep.dtype) def add_potgrad_onto_output(self, output, output_slice, pot, grad): - if self.ifgrad: + if self.tree_indep.ifgrad: output[0, output_slice] += pot output[1:, output_slice] += grad else: @@ -616,46 +623,47 @@ class FMMLibExpansionWrangler: # }}} @log_process(logger) - def reorder_sources(self, taw, source_array): - return source_array[..., taw.tree.user_source_ids] + def reorder_sources(self, source_array): + return source_array[..., self.tree.user_source_ids] @log_process(logger) - def reorder_potentials(self, taw, potentials): - return potentials[taw.tree.sorted_target_ids] + def reorder_potentials(self, potentials): + return potentials[self.tree.sorted_target_ids] @log_process(logger) @return_timing_data - def form_multipoles(self, taw, level_start_source_box_nrs, source_boxes, + def form_multipoles(self, level_start_source_box_nrs, source_boxes, src_weight_vecs): src_weights, = src_weight_vecs - formmp = self.get_routine("%ddformmp" + ("_dp" if taw.use_dipoles else "")) + formmp = self.tree_indep.get_routine( + "%ddformmp" + ("_dp" if self.use_dipoles else "")) - mpoles = self.multipole_expansion_zeros(taw) - for lev in range(taw.tree.nlevels): + mpoles = self.multipole_expansion_zeros() + for lev in range(self.tree.nlevels): start, stop = level_start_source_box_nrs[lev:lev+2] if start == stop: continue - level_start_ibox, mpoles_view = taw.multipole_expansions_view( + level_start_ibox, mpoles_view = self.multipole_expansions_view( mpoles, lev) - rscale = taw.level_to_rscale(lev) + rscale = self.level_to_rscale(lev) for src_ibox in source_boxes[start:stop]: - pslice = taw._get_source_slice(src_ibox) + pslice = self._get_source_slice(src_ibox) if pslice.stop - pslice.start == 0: continue kwargs = {} - kwargs.update(self.kernel_kwargs) - kwargs.update(taw.get_source_kwargs(src_weights, pslice)) + kwargs.update(self.tree_indep.kernel_kwargs) + kwargs.update(self.get_source_kwargs(src_weights, pslice)) ier, mpole = formmp( rscale=rscale, - source=taw._get_sources(pslice), - center=taw.tree.box_centers[:, src_ibox], - nterms=taw.level_nterms[lev], + source=self._get_sources(pslice), + center=self.tree.box_centers[:, src_ibox], + nterms=self.level_nterms[lev], **kwargs) if ier: @@ -667,11 +675,11 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data - def coarsen_multipoles(self, taw, level_start_source_parent_box_nrs, + def coarsen_multipoles(self, level_start_source_parent_box_nrs, source_parent_boxes, mpoles): - tree = taw.tree + tree = self.tree - mpmp = self.get_translation_routine(taw, "%ddmpmp") + mpmp = self.tree_indep.get_translation_routine(self, "%ddmpmp") # nlevels-1 is the last valid level index # nlevels-2 is the last valid level that could have children @@ -685,12 +693,12 @@ class FMMLibExpansionWrangler: target_level:target_level+2] source_level_start_ibox, source_mpoles_view = \ - taw.multipole_expansions_view(mpoles, source_level) + self.multipole_expansions_view(mpoles, source_level) target_level_start_ibox, target_mpoles_view = \ - taw.multipole_expansions_view(mpoles, target_level) + self.multipole_expansions_view(mpoles, target_level) - source_rscale = taw.level_to_rscale(source_level) - target_rscale = taw.level_to_rscale(target_level) + source_rscale = self.level_to_rscale(source_level) + target_rscale = self.level_to_rscale(target_level) for ibox in source_parent_boxes[start:stop]: parent_center = tree.box_centers[:, ibox] @@ -699,10 +707,10 @@ class FMMLibExpansionWrangler: child_center = tree.box_centers[:, child] kwargs = {} - if self.dim == 3 and self.eqn_letter == "h": + if self.dim == 3 and self.tree_indep.eqn_letter == "h": kwargs["radius"] = tree.root_extent * 2**(-target_level) - kwargs.update(self.kernel_kwargs) + kwargs.update(self.tree_indep.kernel_kwargs) new_mp = mpmp( rscale1=source_rscale, @@ -712,7 +720,7 @@ class FMMLibExpansionWrangler: rscale2=target_rscale, center2=parent_center, - nterms2=taw.level_nterms[target_level], + nterms2=self.level_nterms[target_level], **kwargs) @@ -723,37 +731,38 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data - def eval_direct(self, taw, target_boxes, neighbor_sources_starts, + def eval_direct(self, target_boxes, neighbor_sources_starts, neighbor_sources_lists, src_weight_vecs): src_weights, = src_weight_vecs - output = self.output_zeros(taw) + output = self.output_zeros() - ev = self.get_direct_eval_routine(taw.use_dipoles) + ev = self.tree_indep.get_direct_eval_routine(self.use_dipoles) for itgt_box, tgt_ibox in enumerate(target_boxes): - tgt_pslice = taw._get_target_slice(tgt_ibox) + tgt_pslice = self._get_target_slice(tgt_ibox) if tgt_pslice.stop - tgt_pslice.start == 0: continue - #tgt_result = np.zeros(tgt_pslice.stop - tgt_pslice.start, self.dtype) + # tgt_result = np.zeros( + # tgt_pslice.stop - tgt_pslice.start, self.tree_indep.dtype) tgt_pot_result = 0 tgt_grad_result = 0 start, end = neighbor_sources_starts[itgt_box:itgt_box+2] for src_ibox in neighbor_sources_lists[start:end]: - src_pslice = taw._get_source_slice(src_ibox) + src_pslice = self._get_source_slice(src_ibox) if src_pslice.stop - src_pslice.start == 0: continue kwargs = {} - kwargs.update(self.kernel_kwargs) - kwargs.update(taw.get_source_kwargs(src_weights, src_pslice)) + kwargs.update(self.tree_indep.kernel_kwargs) + kwargs.update(self.get_source_kwargs(src_weights, src_pslice)) tmp_pot, tmp_grad = ev( - sources=taw._get_sources(src_pslice), - targets=taw._get_targets(tgt_pslice), + sources=self._get_sources(src_pslice), + targets=self._get_targets(tgt_pslice), **kwargs) tgt_pot_result += tmp_pot @@ -767,40 +776,40 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data def multipole_to_local(self, - taw, level_start_target_or_target_parent_box_nrs, + level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, starts, lists, mpole_exps): - tree = taw.tree - local_exps = self.local_expansion_zeros(taw) + tree = self.tree + local_exps = self.local_expansion_zeros() # Precomputed rotation matrices (matrices of larger order can be used # for translations of smaller order) - rotmatf, rotmatb, rotmat_order = taw.m2l_rotation_matrices() + rotmatf, rotmatb, rotmat_order = self.m2l_rotation_matrices() - for lev in range(taw.tree.nlevels): + for lev in range(self.tree.nlevels): lstart, lstop = level_start_target_or_target_parent_box_nrs[lev:lev+2] if lstart == lstop: continue starts_on_lvl = starts[lstart:lstop+1] - mploc = self.get_translation_routine( - taw, "%ddmploc", vec_suffix="_imany") + mploc = self.tree_indep.get_translation_routine( + self, "%ddmploc", vec_suffix="_imany") kwargs = {} # {{{ set up optimized m2l, if applicable - if taw.level_nterms[lev] <= rotmat_order: - m2l_rotation_lists = taw.rotation_data.m2l_rotation_lists() + if self.level_nterms[lev] <= rotmat_order: + m2l_rotation_lists = self.rotation_data.m2l_rotation_lists() assert len(m2l_rotation_lists) == len(lists) - mploc = self.get_translation_routine( - taw, "%ddmploc", vec_suffix="2_trunc_imany") + mploc = self.tree_indep.get_translation_routine( + self, "%ddmploc", vec_suffix="2_trunc_imany") kwargs["ldm"] = rotmat_order - kwargs["nterms"] = taw.level_nterms[lev] - kwargs["nterms1"] = taw.level_nterms[lev] + kwargs["nterms"] = self.level_nterms[lev] + kwargs["nterms1"] = self.level_nterms[lev] kwargs["rotmatf"] = rotmatf kwargs["rotmatf_offsets"] = m2l_rotation_lists @@ -813,9 +822,9 @@ class FMMLibExpansionWrangler: # }}} source_level_start_ibox, source_mpoles_view = \ - taw.multipole_expansions_view(mpole_exps, lev) + self.multipole_expansions_view(mpole_exps, lev) target_level_start_ibox, target_local_exps_view = \ - taw.local_expansions_view(local_exps, lev) + self.local_expansions_view(local_exps, lev) ntgt_boxes = lstop-lstart itgt_box_vec = np.arange(ntgt_boxes) @@ -830,12 +839,12 @@ class FMMLibExpansionWrangler: src_boxes_starts[0] = 0 src_boxes_starts[1:] = np.cumsum(nsrc_boxes_per_tgt_box) - rscale = taw.level_to_rscale(lev) + rscale = self.level_to_rscale(lev) rscale1 = np.ones(nsrc_boxes) * rscale rscale1_offsets = np.arange(nsrc_boxes) - if self.dim == 3 and self.eqn_letter == "h": + if self.dim == 3 and self.tree_indep.eqn_letter == "h": kwargs["radius"] = ( tree.root_extent * 2**(-lev) * np.ones(ntgt_boxes)) @@ -848,10 +857,10 @@ class FMMLibExpansionWrangler: kwargs["ier"] = ier expn2 = np.zeros( - (ntgt_boxes,) + self.expansion_shape(taw.level_nterms[lev]), - dtype=self.dtype) + (ntgt_boxes,) + self.expansion_shape(self.level_nterms[lev]), + dtype=self.tree_indep.dtype) - kwargs.update(self.kernel_kwargs) + kwargs.update(self.tree_indep.kernel_kwargs) expn2 = mploc( rscale1=rscale1, @@ -871,7 +880,7 @@ class FMMLibExpansionWrangler: center2=tree.box_centers[:, tgt_ibox_vec], expn2=expn2.T, - nterms2=taw.level_nterms[lev], + nterms2=self.level_nterms[lev], **kwargs).T @@ -882,22 +891,21 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data def eval_multipoles(self, - taw, target_boxes_by_source_level, sep_smaller_nonsiblings_by_level, mpole_exps): - output = self.output_zeros(taw) + output = self.output_zeros() - mpeval = self.get_expn_eval_routine("mp") + mpeval = self.tree_indep.get_expn_eval_routine("mp") for isrc_level, ssn in enumerate(sep_smaller_nonsiblings_by_level): source_level_start_ibox, source_mpoles_view = \ - taw.multipole_expansions_view(mpole_exps, isrc_level) + self.multipole_expansions_view(mpole_exps, isrc_level) - rscale = taw.level_to_rscale(isrc_level) + rscale = self.level_to_rscale(isrc_level) for itgt_box, tgt_ibox in \ enumerate(target_boxes_by_source_level[isrc_level]): - tgt_pslice = taw._get_target_slice(tgt_ibox) + tgt_pslice = self._get_target_slice(tgt_ibox) if tgt_pslice.stop - tgt_pslice.start == 0: continue @@ -909,11 +917,11 @@ class FMMLibExpansionWrangler: tmp_pot, tmp_grad = mpeval( rscale=rscale, - center=taw.tree.box_centers[:, src_ibox], + center=self.tree.box_centers[:, src_ibox], expn=source_mpoles_view[ src_ibox - source_level_start_ibox].T, - ztarg=taw._get_targets(tgt_pslice), - **self.kernel_kwargs) + ztarg=self._get_targets(tgt_pslice), + **self.tree_indep.kernel_kwargs) tgt_pot = tgt_pot + tmp_pot tgt_grad = tgt_grad + tmp_grad @@ -926,34 +934,33 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data def form_locals(self, - taw, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, starts, lists, src_weight_vecs): src_weights, = src_weight_vecs - local_exps = self.local_expansion_zeros(taw) + local_exps = self.local_expansion_zeros() - formta = self.get_routine("%ddformta" + ("_dp" if taw.use_dipoles else ""), - suffix="_imany") + formta = self.tree_indep.get_routine( + "%ddformta" + ("_dp" if self.use_dipoles else ""), suffix="_imany") - sources = taw._get_single_sources_array() + sources = self._get_single_sources_array() # sources_starts / sources_lists is a CSR list mapping box centers to # lists of starting indices into the sources array. To get the starting # source indices we have to look at box_source_starts. - sources_offsets = taw.tree.box_source_starts[lists] + sources_offsets = self.tree.box_source_starts[lists] # nsources_starts / nsources_lists is a CSR list mapping box centers to # lists of indices into nsources, each of which represents a source # count. - nsources = taw.tree.box_source_counts_nonchild + nsources = self.tree.box_source_counts_nonchild nsources_offsets = lists # centers is indexed into by values of centers_offsets, which is a list # mapping box indices to box center indices. - centers = taw._get_single_box_centers_array() + centers = self._get_single_box_centers_array() - source_kwargs = taw.get_source_kwargs(src_weights, slice(None)) + source_kwargs = self.get_source_kwargs(src_weights, slice(None)) - for lev in range(taw.tree.nlevels): + for lev in range(self.tree.nlevels): lev_start, lev_stop = \ level_start_target_or_target_parent_box_nrs[lev:lev+2] @@ -961,17 +968,17 @@ class FMMLibExpansionWrangler: continue target_box_start, target_local_exps_view = \ - taw.local_expansions_view(local_exps, lev) + self.local_expansions_view(local_exps, lev) centers_offsets = target_or_target_parent_boxes[lev_start:lev_stop] - rscale = taw.level_to_rscale(lev) + rscale = self.level_to_rscale(lev) sources_starts = starts[lev_start:1 + lev_stop] nsources_starts = sources_starts kwargs = {} - kwargs.update(self.kernel_kwargs) + kwargs.update(self.tree_indep.kernel_kwargs) for key, val in source_kwargs.items(): kwargs[key] = val # Add CSR lists mapping box centers to lists of starting positions @@ -992,7 +999,7 @@ class FMMLibExpansionWrangler: nsources_offsets=nsources_offsets, centers=centers, centers_offsets=centers_offsets, - nterms=taw.level_nterms[lev], + nterms=self.level_nterms[lev], **kwargs) if ier.any(): @@ -1006,34 +1013,34 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data - def refine_locals(self, taw, level_start_target_or_target_parent_box_nrs, + def refine_locals(self, level_start_target_or_target_parent_box_nrs, target_or_target_parent_boxes, local_exps): - locloc = self.get_translation_routine(taw, "%ddlocloc") + locloc = self.tree_indep.get_translation_routine(self, "%ddlocloc") - for target_lev in range(1, taw.tree.nlevels): + for target_lev in range(1, self.tree.nlevels): start, stop = level_start_target_or_target_parent_box_nrs[ target_lev:target_lev+2] source_lev = target_lev - 1 source_level_start_ibox, source_local_exps_view = \ - taw.local_expansions_view(local_exps, source_lev) + self.local_expansions_view(local_exps, source_lev) target_level_start_ibox, target_local_exps_view = \ - taw.local_expansions_view(local_exps, target_lev) - source_rscale = taw.level_to_rscale(source_lev) - target_rscale = taw.level_to_rscale(target_lev) + self.local_expansions_view(local_exps, target_lev) + source_rscale = self.level_to_rscale(source_lev) + target_rscale = self.level_to_rscale(target_lev) for tgt_ibox in target_or_target_parent_boxes[start:stop]: - tgt_center = taw.tree.box_centers[:, tgt_ibox] - src_ibox = taw.tree.box_parent_ids[tgt_ibox] - src_center = taw.tree.box_centers[:, src_ibox] + tgt_center = self.tree.box_centers[:, tgt_ibox] + src_ibox = self.tree.box_parent_ids[tgt_ibox] + src_center = self.tree.box_centers[:, src_ibox] kwargs = {} - if self.dim == 3 and self.eqn_letter == "h": - kwargs["radius"] = taw.tree.root_extent * 2**(-target_lev) + if self.dim == 3 and self.tree_indep.eqn_letter == "h": + kwargs["radius"] = self.tree.root_extent * 2**(-target_lev) - kwargs.update(self.kernel_kwargs) + kwargs.update(self.tree_indep.kernel_kwargs) tmp_loc_exp = locloc( rscale1=source_rscale, center1=src_center, @@ -1042,7 +1049,7 @@ class FMMLibExpansionWrangler: rscale2=target_rscale, center2=tgt_center, - nterms2=taw.level_nterms[target_lev], + nterms2=self.level_nterms[target_lev], **kwargs)[..., 0] @@ -1053,34 +1060,34 @@ class FMMLibExpansionWrangler: @log_process(logger) @return_timing_data - def eval_locals(self, taw, level_start_target_box_nrs, target_boxes, local_exps): - output = self.output_zeros(taw) - taeval = self.get_expn_eval_routine("ta") + def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps): + output = self.output_zeros() + taeval = self.tree_indep.get_expn_eval_routine("ta") - for lev in range(taw.tree.nlevels): + for lev in range(self.tree.nlevels): start, stop = level_start_target_box_nrs[lev:lev+2] if start == stop: continue source_level_start_ibox, source_local_exps_view = \ - taw.local_expansions_view(local_exps, lev) + self.local_expansions_view(local_exps, lev) - rscale = taw.level_to_rscale(lev) + rscale = self.level_to_rscale(lev) for tgt_ibox in target_boxes[start:stop]: - tgt_pslice = taw._get_target_slice(tgt_ibox) + tgt_pslice = self._get_target_slice(tgt_ibox) if tgt_pslice.stop - tgt_pslice.start == 0: continue tmp_pot, tmp_grad = taeval( rscale=rscale, - center=taw.tree.box_centers[:, tgt_ibox], + center=self.tree.box_centers[:, tgt_ibox], expn=source_local_exps_view[ tgt_ibox - source_level_start_ibox].T, - ztarg=taw._get_targets(tgt_pslice), + ztarg=self._get_targets(tgt_pslice), - **self.kernel_kwargs) + **self.tree_indep.kernel_kwargs) self.add_potgrad_onto_output( output, tgt_pslice, tmp_pot, tmp_grad) @@ -1088,20 +1095,20 @@ class FMMLibExpansionWrangler: return output @log_process(logger) - def finalize_potentials(self, taw, potential): - if self.eqn_letter == "l" and taw.dim == 2: + def finalize_potentials(self, potential): + if self.tree_indep.eqn_letter == "l" and self.dim == 2: scale_factor = -1/(2*np.pi) - elif self.eqn_letter == "h" and taw.dim == 2: + elif self.tree_indep.eqn_letter == "h" and self.dim == 2: scale_factor = 1 - elif self.eqn_letter in ["l", "h"] and taw.dim == 3: + elif self.tree_indep.eqn_letter in ["l", "h"] and self.dim == 3: scale_factor = 1/(4*np.pi) else: raise NotImplementedError( "scale factor for pyfmmlib %s for %d dimensions" % ( self.eqn_letter, - taw.dim)) + self.dim)) - if self.eqn_letter == "l" and taw.dim == 2: + if self.tree_indep.eqn_letter == "l" and self.dim == 2: potential = potential.real return potential * scale_factor diff --git a/boxtree/timing.py b/boxtree/timing.py new file mode 100644 index 0000000..be49d0f --- /dev/null +++ b/boxtree/timing.py @@ -0,0 +1,170 @@ +""" +.. autoclass:: TimingResult + +.. autoclass:: TimingFuture +""" + +__copyright__ = "Copyright (C) 2012 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + + +from collections.abc import Mapping + + +# {{{ timing result + +class TimingResult(Mapping): + """Interface for returned timing data. + + This supports accessing timing results via a mapping interface, along with + combining results via :meth:`merge`. + + .. automethod:: merge + """ + + def __init__(self, *args, **kwargs): + """See constructor for :class:`dict`.""" + self._mapping = dict(*args, **kwargs) + + def __getitem__(self, key): + return self._mapping[key] + + def __iter__(self): + return iter(self._mapping) + + def __len__(self): + return len(self._mapping) + + def merge(self, other): + """Merge this result with another by adding together common fields.""" + result = {} + + for key in self: + val = self.get(key) + other_val = other.get(key) + + if val is None or other_val is None: + continue + + result[key] = val + other_val + + return type(self)(result) + +# }}} + + +# {{{ timing future + +class TimingFuture: + """Returns timing data for a potentially asynchronous operation. + + .. automethod:: result + .. automethod:: done + """ + + def result(self): + """Return a :class:`TimingResult`. May block.""" + raise NotImplementedError + + def done(self): + """Return *True* if the operation is complete.""" + raise NotImplementedError + +# }}} + + +# {{{ timing recorder + +class TimingRecorder: + + def __init__(self): + from collections import defaultdict + self.futures = defaultdict(list) + + def add(self, description, future): + self.futures[description].append(future) + + def summarize(self): + result = {} + + for description, futures_list in self.futures.items(): + futures = iter(futures_list) + + timing_result = next(futures).result() + for future in futures: + timing_result = timing_result.merge(future.result()) + + result[description] = timing_result + + return result + +# }}} + + +# {{{ time recording tool + +class DummyTimingFuture(TimingFuture): + @classmethod + def from_timer(cls, timer): + return cls(wall_elapsed=timer.wall_elapsed, + process_elapsed=timer.process_elapsed) + + @classmethod + def from_op_count(cls, op_count): + return cls(ops_elapsed=op_count) + + def __init__(self, *args, **kwargs): + self._result = TimingResult(*args, **kwargs) + + def result(self): + return self._result + + def done(self): + return True + + +def return_timing_data(wrapped): + """A decorator for recording timing data for a function call. + + The decorated function returns a tuple (*retval*, *timing_future*) + where *retval* is the original return value and *timing_future* + supports the timing data future interface in :mod:`boxtree.fmm`. + """ + + from pytools import ProcessTimer + + def wrapper(*args, **kwargs): + timer = ProcessTimer() + retval = wrapped(*args, **kwargs) + timer.done() + + future = DummyTimingFuture.from_timer(timer) + return (retval, future) + + from functools import update_wrapper + new_wrapper = update_wrapper(wrapper, wrapped) + + return new_wrapper + +# }}} + + diff --git a/boxtree/tools.py b/boxtree/tools.py index 1f70178..93c2906 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -29,7 +29,6 @@ from pyopencl.tools import dtype_to_c_struct, VectorArg as _VectorArg from pyopencl.tools import ScalarArg # noqa from mako.template import Template from pytools.obj_array import make_obj_array -from boxtree.fmm import TimingFuture, TimingResult, TraversalAndWrangler import loopy as lp from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa @@ -529,55 +528,6 @@ class MapValuesKernel: # }}} -# {{{ time recording tool - -class DummyTimingFuture(TimingFuture): - - @classmethod - def from_timer(cls, timer): - return cls(wall_elapsed=timer.wall_elapsed, - process_elapsed=timer.process_elapsed) - - @classmethod - def from_op_count(cls, op_count): - return cls(ops_elapsed=op_count) - - def __init__(self, *args, **kwargs): - self._result = TimingResult(*args, **kwargs) - - def result(self): - return self._result - - def done(self): - return True - - -def return_timing_data(wrapped): - """A decorator for recording timing data for a function call. - - The decorated function returns a tuple (*retval*, *timing_future*) - where *retval* is the original return value and *timing_future* - supports the timing data future interface in :mod:`boxtree.fmm`. - """ - - from pytools import ProcessTimer - - def wrapper(*args, **kwargs): - timer = ProcessTimer() - retval = wrapped(*args, **kwargs) - timer.done() - - future = DummyTimingFuture.from_timer(timer) - return (retval, future) - - from functools import update_wrapper - new_wrapper = update_wrapper(wrapper, wrapped) - - return new_wrapper - -# }}} - - # {{{ binary search from mako.template import Template @@ -634,201 +584,4 @@ class InlineBinarySearch: # }}} -# {{{ constant one wrangler - -class ConstantOneTraversalAndWrangler(TraversalAndWrangler): - def _get_source_slice(self, ibox): - pstart = self.tree.box_source_starts[ibox] - return slice( - pstart, pstart + self.tree.box_source_counts_nonchild[ibox]) - - def _get_target_slice(self, ibox): - pstart = self.tree.box_target_starts[ibox] - return slice( - pstart, pstart + self.tree.box_target_counts_nonchild[ibox]) - - -class ConstantOneExpansionWrangler: - """This implements the 'analytical routines' for a Green's function that is - constant 1 everywhere. For 'charges' of 'ones', this should get every particle - a copy of the particle count. - - Timing results returned by this wrangler contain the field *ops_elapsed*, - which counts approximately the number of floating-point operations required. - """ - - def multipole_expansion_zeros(self, taw): - return np.zeros(taw.tree.nboxes, dtype=np.float64) - - local_expansion_zeros = multipole_expansion_zeros - - def output_zeros(self, taw): - return np.zeros(taw.tree.ntargets, dtype=np.float64) - - def reorder_sources(self, taw, source_array): - return source_array[taw.tree.user_source_ids] - - def reorder_potentials(self, taw, potentials): - return potentials[taw.tree.sorted_target_ids] - - @staticmethod - def timing_future(ops): - return DummyTimingFuture.from_op_count(ops) - - def form_multipoles(self, taw, level_start_source_box_nrs, source_boxes, - src_weight_vecs): - src_weights, = src_weight_vecs - mpoles = self.multipole_expansion_zeros(taw) - ops = 0 - - for ibox in source_boxes: - pslice = taw._get_source_slice(ibox) - mpoles[ibox] += np.sum(src_weights[pslice]) - ops += src_weights[pslice].size - - return mpoles, self.timing_future(ops) - - def coarsen_multipoles(self, taw, level_start_source_parent_box_nrs, - source_parent_boxes, mpoles): - tree = taw.tree - ops = 0 - - # nlevels-1 is the last valid level index - # nlevels-2 is the last valid level that could have children - # - # 3 is the last relevant source_level. - # 2 is the last relevant target_level. - # (because no level 1 box will be well-separated from another) - for source_level in range(tree.nlevels-1, 2, -1): - target_level = source_level - 1 - start, stop = level_start_source_parent_box_nrs[ - target_level:target_level+2] - for ibox in source_parent_boxes[start:stop]: - for child in tree.box_child_ids[:, ibox]: - if child: - mpoles[ibox] += mpoles[child] - ops += 1 - - return mpoles, self.timing_future(ops) - - def eval_direct(self, taw, target_boxes, neighbor_sources_starts, - neighbor_sources_lists, src_weight_vecs): - src_weights, = src_weight_vecs - pot = self.output_zeros(taw) - ops = 0 - - for itgt_box, tgt_ibox in enumerate(target_boxes): - tgt_pslice = taw._get_target_slice(tgt_ibox) - - src_sum = 0 - nsrcs = 0 - start, end = neighbor_sources_starts[itgt_box:itgt_box+2] - #print "DIR: %s <- %s" % (tgt_ibox, neighbor_sources_lists[start:end]) - for src_ibox in neighbor_sources_lists[start:end]: - src_pslice = taw._get_source_slice(src_ibox) - nsrcs += src_weights[src_pslice].size - - src_sum += np.sum(src_weights[src_pslice]) - - pot[tgt_pslice] = src_sum - ops += pot[tgt_pslice].size * nsrcs - - return pot, self.timing_future(ops) - - def multipole_to_local(self, - taw, - level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, - starts, lists, mpole_exps): - local_exps = self.local_expansion_zeros(taw) - ops = 0 - - for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): - start, end = starts[itgt_box:itgt_box+2] - - contrib = 0 - #print tgt_ibox, "<-", lists[start:end] - for src_ibox in lists[start:end]: - contrib += mpole_exps[src_ibox] - ops += 1 - - local_exps[tgt_ibox] += contrib - - return local_exps, self.timing_future(ops) - - def eval_multipoles(self, taw, - target_boxes_by_source_level, from_sep_smaller_nonsiblings_by_level, - mpole_exps): - pot = self.output_zeros(taw) - ops = 0 - - for level, ssn in enumerate(from_sep_smaller_nonsiblings_by_level): - for itgt_box, tgt_ibox in \ - enumerate(target_boxes_by_source_level[level]): - tgt_pslice = taw._get_target_slice(tgt_ibox) - - contrib = 0 - - start, end = ssn.starts[itgt_box:itgt_box+2] - for src_ibox in ssn.lists[start:end]: - contrib += mpole_exps[src_ibox] - - pot[tgt_pslice] += contrib - ops += pot[tgt_pslice].size * (end - start) - - return pot, self.timing_future(ops) - - def form_locals(self, taw, - level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, starts, lists, src_weight_vecs): - src_weights, = src_weight_vecs - local_exps = self.local_expansion_zeros(taw) - ops = 0 - - for itgt_box, tgt_ibox in enumerate(target_or_target_parent_boxes): - start, end = starts[itgt_box:itgt_box+2] - - #print "LIST 4", tgt_ibox, "<-", lists[start:end] - contrib = 0 - nsrcs = 0 - for src_ibox in lists[start:end]: - src_pslice = taw._get_source_slice(src_ibox) - nsrcs += src_weights[src_pslice].size - - contrib += np.sum(src_weights[src_pslice]) - - local_exps[tgt_ibox] += contrib - ops += nsrcs - - return local_exps, self.timing_future(ops) - - def refine_locals(self, taw, level_start_target_or_target_parent_box_nrs, - target_or_target_parent_boxes, local_exps): - ops = 0 - - for target_lev in range(1, taw.tree.nlevels): - start, stop = level_start_target_or_target_parent_box_nrs[ - target_lev:target_lev+2] - for ibox in target_or_target_parent_boxes[start:stop]: - local_exps[ibox] += local_exps[taw.tree.box_parent_ids[ibox]] - ops += 1 - - return local_exps, self.timing_future(ops) - - def eval_locals(self, taw, level_start_target_box_nrs, target_boxes, local_exps): - pot = self.output_zeros(taw) - ops = 0 - - for ibox in target_boxes: - tgt_pslice = taw._get_target_slice(ibox) - pot[tgt_pslice] += local_exps[ibox] - ops += pot[tgt_pslice].size - - return pot, self.timing_future(ops) - - def finalize_potentials(self, taw, potentials): - return potentials - -# }}} - # vim: foldmethod=marker:filetype=pyopencl diff --git a/doc/index.rst b/doc/index.rst index ac452ba..87ece52 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -41,6 +41,7 @@ Overview fmm lookup cost + tools misc 🚀 Github 💾 Download Releases diff --git a/doc/tools.rst b/doc/tools.rst new file mode 100644 index 0000000..6db9bc7 --- /dev/null +++ b/doc/tools.rst @@ -0,0 +1,6 @@ +Utility Functionality +===================== + +.. automodule:: boxtree.timing + +.. automodule:: boxtree.constant_one diff --git a/examples/cost_model.py b/examples/cost_model.py index 032b1d4..e7a686f 100644 --- a/examples/cost_model.py +++ b/examples/cost_model.py @@ -25,7 +25,8 @@ def demo_cost_model(): ) from boxtree.pyfmmlib_integration import ( - FMMLibTraversalAndWrangler, FMMLibExpansionWrangler) + FMMLibTreeIndependentDataForWrangler, + FMMLibExpansionWrangler) nsources_list = [1000, 2000, 3000, 4000, 5000] ntargets_list = [1000, 2000, 3000, 4000, 5000] @@ -77,15 +78,15 @@ def demo_cost_model(): # }}} - wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, 0) - taw = FMMLibTraversalAndWrangler(trav, wrangler, + tree_indep = FMMLibTreeIndependentDataForWrangler(trav.tree.dimensions, 0) + wrangler = FMMLibExpansionWrangler(tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms) - level_to_orders.append(taw.level_nterms) + level_to_orders.append(wrangler.level_nterms) timing_data = {} from boxtree.fmm import drive_fmm src_weights = np.random.rand(tree.nsources).astype(tree.coord_dtype) - drive_fmm(taw, (src_weights,), timing_data=timing_data) + drive_fmm(wrangler, (src_weights,), timing_data=timing_data) timing_results.append(timing_data) diff --git a/test/test_cost_model.py b/test/test_cost_model.py index 269c9c4..cabb9cc 100644 --- a/test/test_cost_model.py +++ b/test/test_cost_model.py @@ -392,7 +392,8 @@ def test_compare_cl_and_py_cost_model(ctx_factory, nsources, ntargets, dims, dty @pytest.mark.opencl def test_estimate_calibration_params(ctx_factory): from boxtree.pyfmmlib_integration import ( - FMMLibExpansionWrangler, FMMLibTraversalAndWrangler) + FMMLibTreeIndependentDataForWrangler, + FMMLibExpansionWrangler) nsources_list = [1000, 2000, 3000, 4000] ntargets_list = [1000, 2000, 3000, 4000] @@ -444,15 +445,15 @@ def test_estimate_calibration_params(ctx_factory): # }}} - wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, 0) - taw = FMMLibTraversalAndWrangler(trav, wrangler, + tree_indep = FMMLibTreeIndependentDataForWrangler(trav.tree.dimensions, 0) + wrangler = FMMLibExpansionWrangler(tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms) - level_to_orders.append(taw.level_nterms) + level_to_orders.append(wrangler.level_nterms) timing_data = {} from boxtree.fmm import drive_fmm src_weights = np.random.rand(tree.nsources).astype(tree.coord_dtype) - drive_fmm(taw, (src_weights,), timing_data=timing_data) + drive_fmm(wrangler, (src_weights,), timing_data=timing_data) timing_results.append(timing_data) @@ -576,15 +577,16 @@ def test_cost_model_op_counts_agree_with_constantone_wrangler( trav_dev, _ = tg(queue, tree, debug=True) trav = trav_dev.get(queue=queue) - from boxtree.tools import ( - ConstantOneExpansionWrangler, ConstantOneTraversalAndWrangler) - wrangler = ConstantOneExpansionWrangler() - taw = ConstantOneTraversalAndWrangler(trav, wrangler) + from boxtree.constant_one import ( + ConstantOneTreeIndependentDataForWrangler, + ConstantOneExpansionWrangler) + tree_indep = ConstantOneTreeIndependentDataForWrangler() + wrangler = ConstantOneExpansionWrangler(tree_indep, trav) timing_data = {} from boxtree.fmm import drive_fmm src_weights = np.random.rand(tree.nsources).astype(tree.coord_dtype) - drive_fmm(taw, (src_weights,), timing_data=timing_data) + drive_fmm(wrangler, (src_weights,), timing_data=timing_data) cost_model = FMMCostModel( translation_cost_model_factory=OpCountingTranslationCostModel diff --git a/test/test_fmm.py b/test/test_fmm.py index 6bcd809..1dacb4c 100644 --- a/test/test_fmm.py +++ b/test/test_fmm.py @@ -35,8 +35,9 @@ from boxtree.tools import ( # noqa: F401 make_normal_particle_array as p_normal, make_surface_particle_array as p_surface, make_uniform_particle_array as p_uniform, - particle_array_to_host, - ConstantOneTraversalAndWrangler, + particle_array_to_host) +from boxtree.constant_one import ( + ConstantOneTreeIndependentDataForWrangler, ConstantOneExpansionWrangler) import logging @@ -47,7 +48,7 @@ warnings.simplefilter("ignore", FMMLibRotationDataNotSuppliedWarning) # {{{ ref fmmlib pot computation -def get_fmmlib_ref_pot(taw, weights, sources_host, targets_host, +def get_fmmlib_ref_pot(wrangler, weights, sources_host, targets_host, helmholtz_k, dipole_vec=None): dims = sources_host.shape[0] eqn_letter = "h" if helmholtz_k else "l" @@ -82,8 +83,7 @@ def get_fmmlib_ref_pot(taw, weights, sources_host, targets_host, if helmholtz_k: kwargs["zk"] = helmholtz_k - return taw.wrangler.finalize_potentials( - taw, + return wrangler.finalize_potentials( fmmlib_routine( sources=sources_host, targets=targets_host, **kwargs)[0] @@ -94,10 +94,11 @@ def get_fmmlib_ref_pot(taw, weights, sources_host, targets_host, # {{{ fmm interaction completeness test -class ConstantOneTraversalAndWranglerWithFilteredTargetsInTreeOrder( - ConstantOneTraversalAndWrangler): - def __init__(self, traversal, wrangler, filtered_targets): - super().__init__(traversal, wrangler) +class ConstantOneExpansionWranglerWithFilteredTargetsInTreeOrder( + ConstantOneExpansionWrangler): + + def __init__(self, tree_indep, traversal, filtered_targets): + super().__init__(tree_indep, traversal) self.filtered_targets = filtered_targets def _get_target_slice(self, ibox): @@ -106,26 +107,22 @@ class ConstantOneTraversalAndWranglerWithFilteredTargetsInTreeOrder( pstart, pstart + self.filtered_targets.box_target_counts_nonchild[ibox]) + def output_zeros(self): + return np.zeros(self.filtered_targets.nfiltered_targets, dtype=np.float64) -class ConstantOneExpansionWranglerWithFilteredTargetsInTreeOrder( - ConstantOneExpansionWrangler): - - def output_zeros(self, taw): - return np.zeros(taw.filtered_targets.nfiltered_targets, dtype=np.float64) - - def reorder_potentials(self, taw, potentials): - tree_order_all_potentials = np.zeros(taw.tree.ntargets, potentials.dtype) + def reorder_potentials(self, potentials): + tree_order_all_potentials = np.zeros(self.tree.ntargets, potentials.dtype) tree_order_all_potentials[ - taw.filtered_targets.unfiltered_from_filtered_target_indices] \ + self.filtered_targets.unfiltered_from_filtered_target_indices] \ = potentials - return tree_order_all_potentials[taw.tree.sorted_target_ids] + return tree_order_all_potentials[self.tree.sorted_target_ids] -class ConstantOneTraversalAndWranglerWithFilteredTargetsInUserOrder( - ConstantOneTraversalAndWrangler): - def __init__(self, traversal, wrangler, filtered_targets): - super().__init__(traversal, wrangler) +class ConstantOneExpansionWranglerWithFilteredTargetsInUserOrder( + ConstantOneExpansionWrangler): + def __init__(self, tree_indep, traversal, filtered_targets): + super().__init__(tree_indep, traversal) self.filtered_targets = filtered_targets def _get_target_slice(self, ibox): @@ -240,36 +237,35 @@ def test_fmm_completeness(ctx_factory, dims, nsources_req, ntargets_req, from boxtree.tree import ParticleListFilter plfilt = ParticleListFilter(ctx) + tree_indep = ConstantOneTreeIndependentDataForWrangler() + if filter_kind: flags = rng.uniform(queue, ntargets or nsources, np.int32, a=0, b=2) \ .astype(np.int8) if filter_kind == "user": filtered_targets = plfilt.filter_target_lists_in_user_order( queue, tree, flags) - wrangler = ConstantOneExpansionWrangler() - taw = ConstantOneTraversalAndWranglerWithFilteredTargetsInUserOrder( - host_trav, wrangler, filtered_targets.get(queue=queue)) + wrangler = ConstantOneExpansionWranglerWithFilteredTargetsInUserOrder( + tree_indep, host_trav, filtered_targets.get(queue=queue)) elif filter_kind == "tree": filtered_targets = plfilt.filter_target_lists_in_tree_order( queue, tree, flags) - wrangler = ConstantOneExpansionWranglerWithFilteredTargetsInTreeOrder() - taw = ConstantOneTraversalAndWranglerWithFilteredTargetsInTreeOrder( - host_trav, wrangler, filtered_targets.get(queue=queue)) + wrangler = ConstantOneExpansionWranglerWithFilteredTargetsInTreeOrder( + tree_indep, host_trav, filtered_targets.get(queue=queue)) else: raise ValueError("unsupported value of 'filter_kind'") else: - wrangler = ConstantOneExpansionWrangler() - taw = ConstantOneTraversalAndWrangler(host_trav, wrangler) + wrangler = ConstantOneExpansionWrangler(tree_indep, host_trav) flags = cl.array.empty(queue, ntargets or nsources, dtype=np.int8) flags.fill(1) if ntargets is None and not filter_kind: # This check only works for targets == sources. assert (wrangler.reorder_potentials( - taw, wrangler.reorder_sources(taw, weights)) == weights).all() + wrangler.reorder_sources(weights)) == weights).all() from boxtree.fmm import drive_fmm - pot = drive_fmm(taw, (weights,)) + pot = drive_fmm(wrangler, (weights,)) if filter_kind: pot = pot[flags.get() > 0] @@ -455,16 +451,17 @@ def test_pyfmmlib_fmm(ctx_factory, dims, use_dipoles, helmholtz_k): return result from boxtree.pyfmmlib_integration import ( - FMMLibExpansionWrangler, FMMLibTraversalAndWrangler) - wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, helmholtz_k) - taw = FMMLibTraversalAndWrangler( - trav, wrangler, fmm_level_to_nterms=fmm_level_to_nterms, + FMMLibTreeIndependentDataForWrangler, FMMLibExpansionWrangler) + tree_indep = FMMLibTreeIndependentDataForWrangler( + trav.tree.dimensions, helmholtz_k) + wrangler = FMMLibExpansionWrangler( + tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms, dipole_vec=dipole_vec) from boxtree.fmm import drive_fmm timing_data = {} - pot = drive_fmm(taw, (weights,), timing_data=timing_data) + pot = drive_fmm(wrangler, (weights,), timing_data=timing_data) print(timing_data) assert timing_data @@ -472,7 +469,7 @@ def test_pyfmmlib_fmm(ctx_factory, dims, use_dipoles, helmholtz_k): logger.info("computing direct (reference) result") - ref_pot = get_fmmlib_ref_pot(taw, weights, sources_host.T, + ref_pot = get_fmmlib_ref_pot(wrangler, weights, sources_host.T, targets_host.T, helmholtz_k, dipole_vec) rel_err = la.norm(pot - ref_pot, np.inf) / la.norm(ref_pot, np.inf) @@ -576,26 +573,28 @@ def test_pyfmmlib_numerical_stability(ctx_factory, dims, helmholtz_k, order): weights = np.ones_like(sources[0]) from boxtree.pyfmmlib_integration import ( - FMMLibExpansionWrangler, FMMLibTraversalAndWrangler, FMMLibRotationData) + FMMLibTreeIndependentDataForWrangler, + FMMLibExpansionWrangler, FMMLibRotationData) def fmm_level_to_nterms(tree, lev): return order - wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, helmholtz_k) - taw = FMMLibTraversalAndWrangler( - trav, wrangler, + tree_indep = FMMLibTreeIndependentDataForWrangler( + trav.tree.dimensions, helmholtz_k) + wrangler = FMMLibExpansionWrangler( + tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms, rotation_data=FMMLibRotationData(queue, trav)) from boxtree.fmm import drive_fmm - pot = drive_fmm(taw, (weights,)) + pot = drive_fmm(wrangler, (weights,)) assert not np.isnan(pot).any() # {{{ ref fmmlib computation logger.info("computing direct (reference) result") - ref_pot = get_fmmlib_ref_pot(taw, weights, sources, targets, + ref_pot = get_fmmlib_ref_pot(wrangler, weights, sources, targets, helmholtz_k) rel_err = la.norm(pot - ref_pot, np.inf) / la.norm(ref_pot, np.inf) @@ -661,10 +660,10 @@ def test_interaction_list_particle_count_thresholding(ctx_factory, enable_extent host_trav = trav.get(queue=queue) - wrangler = ConstantOneExpansionWrangler() - taw = ConstantOneTraversalAndWrangler(host_trav, wrangler) + tree_indep = ConstantOneTreeIndependentDataForWrangler() + wrangler = ConstantOneExpansionWrangler(tree_indep, host_trav) - pot = drive_fmm(taw, (weights,)) + pot = drive_fmm(wrangler, (weights,)) assert (pot == weights_sum).all() @@ -718,10 +717,10 @@ def test_fmm_float32(ctx_factory, enable_extents): host_trav = trav.get(queue=queue) - wrangler = ConstantOneExpansionWrangler() - taw = ConstantOneTraversalAndWrangler(host_trav, wrangler) + tree_indep = ConstantOneTreeIndependentDataForWrangler() + wrangler = ConstantOneExpansionWrangler(tree_indep, host_trav) - pot = drive_fmm(taw, (weights,)) + pot = drive_fmm(wrangler, (weights,)) assert (pot == weights_sum).all() @@ -782,15 +781,17 @@ def test_fmm_with_optimized_3d_m2l(ctx_factory, nsrcntgts, helmholtz_k, return result from boxtree.pyfmmlib_integration import ( - FMMLibExpansionWrangler, FMMLibTraversalAndWrangler, FMMLibRotationData) + FMMLibTreeIndependentDataForWrangler, + FMMLibExpansionWrangler, FMMLibRotationData) - wrangler = FMMLibExpansionWrangler(trav.tree.dimensions, helmholtz_k) - baseline_taw = FMMLibTraversalAndWrangler( - trav, wrangler, + tree_indep = FMMLibTreeIndependentDataForWrangler( + trav.tree.dimensions, helmholtz_k) + baseline_wrangler = FMMLibExpansionWrangler( + tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms) - optimized_taw = FMMLibTraversalAndWrangler( - trav, wrangler, + optimized_wrangler = FMMLibExpansionWrangler( + tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms, rotation_data=FMMLibRotationData(queue, trav)) @@ -798,11 +799,11 @@ def test_fmm_with_optimized_3d_m2l(ctx_factory, nsrcntgts, helmholtz_k, baseline_timing_data = {} baseline_pot = drive_fmm( - baseline_taw, (weights,), timing_data=baseline_timing_data) + baseline_wrangler, (weights,), timing_data=baseline_timing_data) optimized_timing_data = {} optimized_pot = drive_fmm( - optimized_taw, (weights,), timing_data=optimized_timing_data) + optimized_wrangler, (weights,), timing_data=optimized_timing_data) baseline_time = baseline_timing_data["multipole_to_local"]["process_elapsed"] if baseline_time is not None: -- GitLab From c3635eab3ac9dbd262f1a360586f1a99b14db0c2 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 25 Apr 2021 16:48:55 -0500 Subject: [PATCH 228/260] Fix pylint/flake8 for tree-indep data for wrangler --- boxtree/pyfmmlib_integration.py | 4 +++- boxtree/timing.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/boxtree/pyfmmlib_integration.py b/boxtree/pyfmmlib_integration.py index 6ef855f..a5cd787 100644 --- a/boxtree/pyfmmlib_integration.py +++ b/boxtree/pyfmmlib_integration.py @@ -1105,7 +1105,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): else: raise NotImplementedError( "scale factor for pyfmmlib %s for %d dimensions" % ( - self.eqn_letter, + self.tree_indep.eqn_letter, self.dim)) if self.tree_indep.eqn_letter == "l" and self.dim == 2: @@ -1113,5 +1113,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): return potential * scale_factor +# }}} + # vim: foldmethod=marker diff --git a/boxtree/timing.py b/boxtree/timing.py index be49d0f..e3bad59 100644 --- a/boxtree/timing.py +++ b/boxtree/timing.py @@ -168,3 +168,4 @@ def return_timing_data(wrapped): # }}} +# vim: foldmethod=marker -- GitLab From 7604d83162c94cf0d340a42ef711e8d6e8b4bfb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 5 May 2021 11:33:19 -0500 Subject: [PATCH 229/260] Update the may-hold-tree comment in the wrangler docstring --- boxtree/fmm.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 677680f..6c24c4d 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -47,6 +47,14 @@ class TreeIndependentDataForWrangler: Examples of such data include generated code for carrying out translations. + + .. note:: + + Instances of this type should not hold a reference (and thereby be + specific to) a :class:`boxtree.Tree` instance. Their purpose is to + host caches for generated translation code that is reusable across + trees. It is OK for these instances to be specific to a given kernel + (or set of kernels). """ @@ -59,11 +67,10 @@ class ExpansionWranglerInterface(ABC): .. note:: - Wranglers should not hold a reference (and thereby be specific to) a - :class:`boxtree.Tree` instance. Their purpose is to host caches for - generated translation code that is reusable across trees. - It is OK for expansion wranglers to be specific to a given kernel - (or set of kernels). + Wranglers may hold a reference (and thereby be specific to) a + :class:`boxtree.Tree` instance. + :class:`TreeIndependentDataForWrangler` exists to hold data that + is more broadly reusable. Functions that support returning timing data return a value supporting the :class:`~boxtree.timing.TimingFuture` interface. -- GitLab From 6e40c7f58ab46a2a2de8dcb3bfaee54cc4d75ad5 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 16 Jun 2021 19:38:56 -0500 Subject: [PATCH 230/260] Fix incorrect merge of drive_fmm docstring --- boxtree/fmm.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index f07cafb..d362906 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -294,9 +294,6 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, :class:`~boxtree.timing.TimingResult`), if such information is available. Returns the potentials computed by *expansion_wrangler*. - - .. automethod:: __init__ - .. automethod:: merge """ traversal = wrangler.traversal -- GitLab From 486ce78f84d9518c810e61b6d554b211dd27b696 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 28 Jun 2021 15:58:59 -0500 Subject: [PATCH 231/260] Remove *zeros methods from wrangler interface --- boxtree/fmm.py | 40 +++++---------------------------- boxtree/pyfmmlib_integration.py | 16 +++++++++++++ 2 files changed, 22 insertions(+), 34 deletions(-) diff --git a/boxtree/fmm.py b/boxtree/fmm.py index d362906..d49f79f 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -91,10 +91,6 @@ class ExpansionWranglerInterface(ABC): .. rubric:: Array creation - .. automethod:: multipole_expansion_zeros - .. automethod:: local_expansion_zeros - .. automethod:: output_zeros - .. rubric:: Particle ordering .. automethod:: reorder_sources @@ -122,29 +118,6 @@ class ExpansionWranglerInterface(ABC): def tree(self) -> Tree: return self.traversal.tree - @abstractmethod - def multipole_expansion_zeros(self): - """Return an expansions array (which must support addition) - capable of holding one multipole or local expansion for every - box in the tree. - """ - - @abstractmethod - def local_expansion_zeros(self): - """Return an expansions array (which must support addition) - capable of holding one multipole or local expansion for every - box in the tree. - """ - - @abstractmethod - def output_zeros(self): - """Return a potentials array (which must support addition) capable of - holding a potential value for each target in the tree. Note that - :func:`drive_fmm` makes no assumptions about *potential* other than - that it supports addition--it may consist of potentials, gradients of - the potential, or arbitrary other per-target output data. - """ - @abstractmethod def reorder_sources(self, source_array): """Return a copy of *source_array* in @@ -165,8 +138,7 @@ class ExpansionWranglerInterface(ABC): def form_multipoles(self, level_start_source_box_nrs, source_boxes, src_weight_vecs): - """Return an expansions array (compatible with - :meth:`multipole_expansion_zeros`) + """Return an expansions array containing multipole expansions in *source_boxes* due to sources with *src_weight_vecs*. All other expansions must be zero. @@ -195,7 +167,7 @@ class ExpansionWranglerInterface(ABC): indexed like *target_boxes*. :returns: A pair (*pot*, *timing_future*), where *pot* is a - a new potential array, see :meth:`output_zeros`. + a new potential array. """ @abstractmethod @@ -209,7 +181,7 @@ class ExpansionWranglerInterface(ABC): *starts* is indexed like *target_or_target_parent_boxes*. :returns: A pair (*pot*, *timing_future*) where *pot* is - a new (local) expansion array, see :meth:`local_expansion_zeros`. + a new (local) expansion array. """ @abstractmethod @@ -222,7 +194,7 @@ class ExpansionWranglerInterface(ABC): and *starts* is indexed like *target_boxes_by_source_level[i]*. :returns: A pair (*pot*, *timing_future*) where *pot* is a new potential - array, see :meth:`output_zeros`. + array. """ @abstractmethod @@ -236,7 +208,7 @@ class ExpansionWranglerInterface(ABC): *target_or_target_parent_boxes*. :returns: A pair (*pot*, *timing_future*) where *pot* is a new - local expansion array, see :meth:`local_expansion_zeros`. + local expansion array. """ @abstractmethod @@ -257,7 +229,7 @@ class ExpansionWranglerInterface(ABC): *local_exps* and return a new potential array. :returns: A pair (*pot*, *timing_future*) where *pot* is a new potential - array, see :meth:`output_zeros`. + array. """ # }}} diff --git a/boxtree/pyfmmlib_integration.py b/boxtree/pyfmmlib_integration.py index da9086f..009ac40 100644 --- a/boxtree/pyfmmlib_integration.py +++ b/boxtree/pyfmmlib_integration.py @@ -598,16 +598,32 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): raise ValueError("unsupported dimensionality") def multipole_expansion_zeros(self): + """Return an expansions array (which must support addition) + capable of holding one multipole or local expansion for every + box in the tree. + """ + return np.zeros( self.multipole_expansions_level_starts()[-1], dtype=self.tree_indep.dtype) def local_expansion_zeros(self): + """Return an expansions array (which must support addition) + capable of holding one multipole or local expansion for every + box in the tree. + """ return np.zeros( self.local_expansions_level_starts()[-1], dtype=self.tree_indep.dtype) def output_zeros(self): + """Return a potentials array (which must support addition) capable of + holding a potential value for each target in the tree. Note that + :func:`drive_fmm` makes no assumptions about *potential* other than + that it supports addition--it may consist of potentials, gradients of + the potential, or arbitrary other per-target output data. + """ + if self.tree_indep.ifgrad: from pytools.obj_array import make_obj_array return make_obj_array([ -- GitLab From b7c0a229a1e59cbd3f0b25ccc637fc66e03f87dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Mon, 28 Jun 2021 16:20:55 -0500 Subject: [PATCH 232/260] Tweak sumpy downstream to use appropriate branch --- .github/workflows/ci.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bbedefb..bc1b39f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -90,7 +90,12 @@ jobs: env: DOWNSTREAM_PROJECT: ${{ matrix.downstream_project }} run: | - git clone "https://github.com/inducer/$DOWNSTREAM_PROJECT.git" + if [[ "$DOWNSTREAM_PROJECT" = "sumpy" ]] && [[ "$GITHUB_HEAD_REF" = "no-tree-in-wrangler"]]; then + with_echo git clone "https://github.com/inducer/$DOWNSTREAM_PROJECT.git" -b wrangler-refactor + else + with_echo git clone "https://github.com/inducer/$DOWNSTREAM_PROJECT.git" + fi + cd "$DOWNSTREAM_PROJECT" echo "*** $DOWNSTREAM_PROJECT version: $(git rev-parse --short HEAD)" sed -i "/egg=boxtree/ c git+file://$(readlink -f ..)#egg=boxtree" requirements.txt -- GitLab From 885e29b421c4611d052f88efc111c0423e42097f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Mon, 28 Jun 2021 16:38:46 -0500 Subject: [PATCH 233/260] Fix downstream Github CI script --- .github/workflows/ci.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bc1b39f..5594519 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -90,6 +90,9 @@ jobs: env: DOWNSTREAM_PROJECT: ${{ matrix.downstream_project }} run: | + curl -L -O https://tiker.net/ci-support-v0 + . ./ci-support-v0 + if [[ "$DOWNSTREAM_PROJECT" = "sumpy" ]] && [[ "$GITHUB_HEAD_REF" = "no-tree-in-wrangler"]]; then with_echo git clone "https://github.com/inducer/$DOWNSTREAM_PROJECT.git" -b wrangler-refactor else @@ -102,8 +105,6 @@ jobs: export CONDA_ENVIRONMENT=.test-conda-env-py3.yml # Avoid slow or complicated tests in downstream projects export PYTEST_ADDOPTS="-k 'not (slowtest or octave or mpi)'" - curl -L -O https://gitlab.tiker.net/inducer/ci-support/raw/main/ci-support.sh - . ./ci-support.sh build_py_project_in_conda_env test_py_project -- GitLab From e56d24a74ed67894b30a7b395d76ca5324fe2aa9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Mon, 28 Jun 2021 16:52:06 -0500 Subject: [PATCH 234/260] Fix downstream CI script syntax --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5594519..b72a2da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -93,7 +93,7 @@ jobs: curl -L -O https://tiker.net/ci-support-v0 . ./ci-support-v0 - if [[ "$DOWNSTREAM_PROJECT" = "sumpy" ]] && [[ "$GITHUB_HEAD_REF" = "no-tree-in-wrangler"]]; then + if [[ "$DOWNSTREAM_PROJECT" = "sumpy" ]] && [[ "$GITHUB_HEAD_REF" = "no-tree-in-wrangler" ]]; then with_echo git clone "https://github.com/inducer/$DOWNSTREAM_PROJECT.git" -b wrangler-refactor else with_echo git clone "https://github.com/inducer/$DOWNSTREAM_PROJECT.git" -- GitLab From 02a635dcb23fae524bb6e7417c93e41d66a5a91a Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 30 Jun 2021 19:00:37 -0500 Subject: [PATCH 235/260] Refactor so that FMMLibTreeIndependentDataForWrangler knows kernel but not Helmholtz k --- boxtree/pyfmmlib_integration.py | 68 ++++++++++++++++++++++++--------- examples/cost_model.py | 4 +- test/test_cost_model.py | 4 +- test/test_fmm.py | 22 +++++++---- 4 files changed, 70 insertions(+), 28 deletions(-) diff --git a/boxtree/pyfmmlib_integration.py b/boxtree/pyfmmlib_integration.py index 009ac40..ffc6c52 100644 --- a/boxtree/pyfmmlib_integration.py +++ b/boxtree/pyfmmlib_integration.py @@ -38,6 +38,7 @@ THE SOFTWARE. import logging logger = logging.getLogger(__name__) +import enum import numpy as np @@ -113,6 +114,12 @@ class FMMLibRotationDataNotSuppliedWarning(UserWarning): # }}} +@enum.unique +class Kernel(enum.Enum): + LAPLACE = enum.auto() + HELMHOLTZ = enum.auto() + + # {{{ tree-independent data for wrangler class FMMLibTreeIndependentDataForWrangler(TreeIndependentDataForWrangler): @@ -120,19 +127,17 @@ class FMMLibTreeIndependentDataForWrangler(TreeIndependentDataForWrangler): .. automethod:: __init__ """ - def __init__(self, dim, helmholtz_k, ifgrad=False): + def __init__(self, dim, kernel, ifgrad=False): self.dim = dim - self.helmholtz_k = helmholtz_k self.ifgrad = ifgrad + self.kernel = kernel - if helmholtz_k == 0: + if kernel == Kernel.LAPLACE: self.eqn_letter = "l" - self.kernel_kwargs = {} - self.rscale_factor = 1 - else: + elif kernel == Kernel.HELMHOLTZ: self.eqn_letter = "h" - self.kernel_kwargs = {"zk": helmholtz_k} - self.rscale_factor = abs(helmholtz_k) + else: + raise ValueError(kernel) self.dtype = np.complex128 @@ -280,7 +285,8 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): # {{{ constructor - def __init__(self, tree_indep, traversal, fmm_level_to_nterms=None, + def __init__(self, tree_indep, traversal, *, + helmholtz_k=None, fmm_level_to_nterms=None, dipole_vec=None, dipoles_already_reordered=False, nterms=None, optimized_m2l_precomputation_memory_cutoff_bytes=10**8, rotation_data=None): @@ -311,6 +317,30 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): super().__init__(tree_indep, traversal) + if tree_indep.kernel == Kernel.LAPLACE: + self.kernel_kwargs = {} + self.rscale_factor = 1 + + if helmholtz_k: + raise ValueError( + "helmholtz_k must be zero or unspecified for Laplace") + + helmholtz_k = 0 + + elif tree_indep.kernel == Kernel.HELMHOLTZ: + self.kernel_kwargs = {"zk": helmholtz_k} + + if not helmholtz_k: + raise ValueError( + "helmholtz_k must be specified and nonzero") + + self.rscale_factor = abs(helmholtz_k) + + else: + raise ValueError(tree_indep.kernel) + + self.helmholtz_k = helmholtz_k + tree = traversal.tree if tree_indep.dim != tree.dimensions: @@ -321,7 +351,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): fmm_level_to_nterms(tree, lev) for lev in range(tree.nlevels) ], dtype=np.int32) - if tree_indep.helmholtz_k: + if tree_indep.kernel == Kernel.HELMHOLTZ: logger.info("expansion orders by level used in Helmholtz FMM: %s", self.level_nterms) @@ -366,7 +396,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): return self.tree.dimensions def level_to_rscale(self, level): - result = self.tree.root_extent * 2 ** -level * self.tree_indep.rscale_factor + result = self.tree.root_extent * 2 ** -level * self.rscale_factor if abs(result) > 1: result = 1 if self.dim == 3 and self.tree_indep.eqn_letter == "l": @@ -675,7 +705,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): continue kwargs = {} - kwargs.update(self.tree_indep.kernel_kwargs) + kwargs.update(self.kernel_kwargs) kwargs.update(self.get_source_kwargs(src_weights, pslice)) ier, mpole = formmp( @@ -729,7 +759,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): if self.dim == 3 and self.tree_indep.eqn_letter == "h": kwargs["radius"] = tree.root_extent * 2**(-target_level) - kwargs.update(self.tree_indep.kernel_kwargs) + kwargs.update(self.kernel_kwargs) new_mp = mpmp( rscale1=source_rscale, @@ -776,7 +806,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): continue kwargs = {} - kwargs.update(self.tree_indep.kernel_kwargs) + kwargs.update(self.kernel_kwargs) kwargs.update(self.get_source_kwargs(src_weights, src_pslice)) tmp_pot, tmp_grad = ev( @@ -879,7 +909,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): (ntgt_boxes,) + self.expansion_shape(self.level_nterms[lev]), dtype=self.tree_indep.dtype) - kwargs.update(self.tree_indep.kernel_kwargs) + kwargs.update(self.kernel_kwargs) expn2 = mploc( rscale1=rscale1, @@ -940,7 +970,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): expn=source_mpoles_view[ src_ibox - source_level_start_ibox].T, ztarg=self._get_targets(tgt_pslice), - **self.tree_indep.kernel_kwargs) + **self.kernel_kwargs) tgt_pot = tgt_pot + tmp_pot tgt_grad = tgt_grad + tmp_grad @@ -997,7 +1027,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): nsources_starts = sources_starts kwargs = {} - kwargs.update(self.tree_indep.kernel_kwargs) + kwargs.update(self.kernel_kwargs) for key, val in source_kwargs.items(): kwargs[key] = val # Add CSR lists mapping box centers to lists of starting positions @@ -1059,7 +1089,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): if self.dim == 3 and self.tree_indep.eqn_letter == "h": kwargs["radius"] = self.tree.root_extent * 2**(-target_lev) - kwargs.update(self.tree_indep.kernel_kwargs) + kwargs.update(self.kernel_kwargs) tmp_loc_exp = locloc( rscale1=source_rscale, center1=src_center, @@ -1106,7 +1136,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): tgt_ibox - source_level_start_ibox].T, ztarg=self._get_targets(tgt_pslice), - **self.tree_indep.kernel_kwargs) + **self.kernel_kwargs) self.add_potgrad_onto_output( output, tgt_pslice, tmp_pot, tmp_grad) diff --git a/examples/cost_model.py b/examples/cost_model.py index e7a686f..35398a8 100644 --- a/examples/cost_model.py +++ b/examples/cost_model.py @@ -25,6 +25,7 @@ def demo_cost_model(): ) from boxtree.pyfmmlib_integration import ( + Kernel, FMMLibTreeIndependentDataForWrangler, FMMLibExpansionWrangler) @@ -78,7 +79,8 @@ def demo_cost_model(): # }}} - tree_indep = FMMLibTreeIndependentDataForWrangler(trav.tree.dimensions, 0) + tree_indep = FMMLibTreeIndependentDataForWrangler( + trav.tree.dimensions, Kernel.LAPLACE) wrangler = FMMLibExpansionWrangler(tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms) level_to_orders.append(wrangler.level_nterms) diff --git a/test/test_cost_model.py b/test/test_cost_model.py index cabb9cc..7be4d09 100644 --- a/test/test_cost_model.py +++ b/test/test_cost_model.py @@ -392,6 +392,7 @@ def test_compare_cl_and_py_cost_model(ctx_factory, nsources, ntargets, dims, dty @pytest.mark.opencl def test_estimate_calibration_params(ctx_factory): from boxtree.pyfmmlib_integration import ( + Kernel, FMMLibTreeIndependentDataForWrangler, FMMLibExpansionWrangler) @@ -445,7 +446,8 @@ def test_estimate_calibration_params(ctx_factory): # }}} - tree_indep = FMMLibTreeIndependentDataForWrangler(trav.tree.dimensions, 0) + tree_indep = FMMLibTreeIndependentDataForWrangler( + trav.tree.dimensions, Kernel.LAPLACE) wrangler = FMMLibExpansionWrangler(tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms) level_to_orders.append(wrangler.level_nterms) diff --git a/test/test_fmm.py b/test/test_fmm.py index 1dacb4c..e86c451 100644 --- a/test/test_fmm.py +++ b/test/test_fmm.py @@ -451,11 +451,14 @@ def test_pyfmmlib_fmm(ctx_factory, dims, use_dipoles, helmholtz_k): return result from boxtree.pyfmmlib_integration import ( - FMMLibTreeIndependentDataForWrangler, FMMLibExpansionWrangler) + Kernel, FMMLibTreeIndependentDataForWrangler, FMMLibExpansionWrangler) tree_indep = FMMLibTreeIndependentDataForWrangler( - trav.tree.dimensions, helmholtz_k) + trav.tree.dimensions, + Kernel.HELMHOLTZ if helmholtz_k else Kernel.LAPLACE) wrangler = FMMLibExpansionWrangler( - tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms, + tree_indep, trav, + helmholtz_k=helmholtz_k, + fmm_level_to_nterms=fmm_level_to_nterms, dipole_vec=dipole_vec) from boxtree.fmm import drive_fmm @@ -573,16 +576,18 @@ def test_pyfmmlib_numerical_stability(ctx_factory, dims, helmholtz_k, order): weights = np.ones_like(sources[0]) from boxtree.pyfmmlib_integration import ( - FMMLibTreeIndependentDataForWrangler, + Kernel, FMMLibTreeIndependentDataForWrangler, FMMLibExpansionWrangler, FMMLibRotationData) def fmm_level_to_nterms(tree, lev): return order tree_indep = FMMLibTreeIndependentDataForWrangler( - trav.tree.dimensions, helmholtz_k) + trav.tree.dimensions, + Kernel.HELMHOLTZ if helmholtz_k else Kernel.LAPLACE) wrangler = FMMLibExpansionWrangler( tree_indep, trav, + helmholtz_k=helmholtz_k, fmm_level_to_nterms=fmm_level_to_nterms, rotation_data=FMMLibRotationData(queue, trav)) @@ -781,17 +786,20 @@ def test_fmm_with_optimized_3d_m2l(ctx_factory, nsrcntgts, helmholtz_k, return result from boxtree.pyfmmlib_integration import ( - FMMLibTreeIndependentDataForWrangler, + Kernel, FMMLibTreeIndependentDataForWrangler, FMMLibExpansionWrangler, FMMLibRotationData) tree_indep = FMMLibTreeIndependentDataForWrangler( - trav.tree.dimensions, helmholtz_k) + trav.tree.dimensions, + Kernel.HELMHOLTZ if helmholtz_k else Kernel.LAPLACE) baseline_wrangler = FMMLibExpansionWrangler( tree_indep, trav, + helmholtz_k=helmholtz_k, fmm_level_to_nterms=fmm_level_to_nterms) optimized_wrangler = FMMLibExpansionWrangler( tree_indep, trav, + helmholtz_k=helmholtz_k, fmm_level_to_nterms=fmm_level_to_nterms, rotation_data=FMMLibRotationData(queue, trav)) -- GitLab From e6dd8a17e3e7ff8f4f59050b604622c18bc8db6d Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 30 Jun 2021 19:11:11 -0500 Subject: [PATCH 236/260] Adjust downstream pytential CI for wrangler-refactor branch --- .github/workflows/ci.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b72a2da..3995df7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -93,11 +93,12 @@ jobs: curl -L -O https://tiker.net/ci-support-v0 . ./ci-support-v0 - if [[ "$DOWNSTREAM_PROJECT" = "sumpy" ]] && [[ "$GITHUB_HEAD_REF" = "no-tree-in-wrangler" ]]; then + #if [[ "$DOWNSTREAM_PROJECT" = "sumpy" ]] && [[ "$GITHUB_HEAD_REF" = "no-tree-in-wrangler" ]]; then + if [[ "$GITHUB_HEAD_REF" = "no-tree-in-wrangler" ]]; then with_echo git clone "https://github.com/inducer/$DOWNSTREAM_PROJECT.git" -b wrangler-refactor else with_echo git clone "https://github.com/inducer/$DOWNSTREAM_PROJECT.git" - fi + fi cd "$DOWNSTREAM_PROJECT" echo "*** $DOWNSTREAM_PROJECT version: $(git rev-parse --short HEAD)" -- GitLab From 5d13e04b510d6cf430f5c7779513f864512263fe Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 30 Jun 2021 19:46:44 -0500 Subject: [PATCH 237/260] Add template_ary arg to finalize_potentials --- boxtree/constant_one.py | 2 +- boxtree/fmm.py | 11 +++++++++-- boxtree/pyfmmlib_integration.py | 2 +- test/test_fmm.py | 4 ++-- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/boxtree/constant_one.py b/boxtree/constant_one.py index 5590617..83b527f 100644 --- a/boxtree/constant_one.py +++ b/boxtree/constant_one.py @@ -225,7 +225,7 @@ class ConstantOneExpansionWrangler(ExpansionWranglerInterface): return pot, self.timing_future(ops) - def finalize_potentials(self, potentials): + def finalize_potentials(self, potentials, template_ary): return potentials # }}} diff --git a/boxtree/fmm.py b/boxtree/fmm.py index d49f79f..5c606b8 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -235,11 +235,18 @@ class ExpansionWranglerInterface(ABC): # }}} @abstractmethod - def finalize_potentials(self, potentials): + def finalize_potentials(self, potentials, template_ary): """ Postprocess the reordered potentials. This is where global scaling factors could be applied. This is distinct from :meth:`reorder_potentials` because some derived FMMs (notably the QBX FMM) do their own reordering. + + :arg template_ary: If the array type used inside of the FMM + is different from the array type used by the user (e.g. + :class:`boxtree.pyfmmlib_integration.FMMLibExpansionWrangler` + uses :class:`numpy.ndarray` internally, this array can be used + to help convert the output back to the user's array + type (typically :class:`pyopencl.array.Array`). """ # }}} @@ -417,7 +424,7 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, result = wrangler.reorder_potentials(potentials) - result = wrangler.finalize_potentials(result) + result = wrangler.finalize_potentials(result, template_ary=src_weight_vecs[0]) fmm_proc.done() diff --git a/boxtree/pyfmmlib_integration.py b/boxtree/pyfmmlib_integration.py index ffc6c52..f7a59c6 100644 --- a/boxtree/pyfmmlib_integration.py +++ b/boxtree/pyfmmlib_integration.py @@ -1144,7 +1144,7 @@ class FMMLibExpansionWrangler(ExpansionWranglerInterface): return output @log_process(logger) - def finalize_potentials(self, potential): + def finalize_potentials(self, potential, template_ary): if self.tree_indep.eqn_letter == "l" and self.dim == 2: scale_factor = -1/(2*np.pi) elif self.tree_indep.eqn_letter == "h" and self.dim == 2: diff --git a/test/test_fmm.py b/test/test_fmm.py index e86c451..902d471 100644 --- a/test/test_fmm.py +++ b/test/test_fmm.py @@ -86,8 +86,8 @@ def get_fmmlib_ref_pot(wrangler, weights, sources_host, targets_host, return wrangler.finalize_potentials( fmmlib_routine( sources=sources_host, targets=targets_host, - **kwargs)[0] - ) + **kwargs)[0], + template_ary=weights) # }}} -- GitLab From 838da18446853e794285627ee86159492cdbedf6 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 11 Aug 2021 00:07:39 -0700 Subject: [PATCH 238/260] Remove global_wrangler from drive_fmm interface --- boxtree/distributed/__init__.py | 53 +++++++++--------------------- boxtree/distributed/calculation.py | 18 +++++++--- boxtree/fmm.py | 22 +++++-------- test/test_distributed.py | 40 +++++++++++++--------- 4 files changed, 62 insertions(+), 71 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 4504e1b..698dedd 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -130,21 +130,6 @@ def dtype_to_mpi(dtype): class DistributedFMMRunner(object): - """ - .. attribute:: global_wrangler - - An :class:`boxtree.fmm.ExpansionWranglerInterface` object containing - reference to the global tree object on host memory and is used for - distributing and collecting density/potential between the root and worker - ranks. - - .. attribute:: local_wrangler - - An :class:`boxtree.fmm.ExpansionWranglerInterface` object containing - reference to the local tree object on host memory and is used for local FMM - operations. - """ - def __init__(self, queue, global_tree_dev, traversal_builder, wrangler_factory, @@ -159,8 +144,8 @@ class DistributedFMMRunner(object): :class:`pyopencl.CommandQueue` object and a :class:`boxtree.Tree` object, and generates a :class:`boxtree.traversal.FMMTraversalInfo` object from the tree using the command queue. - :arg wrangler_factory: an object which, when called, takes a - :class:`boxtree.traversal.FMMTraversalInfo` object and returns an + :arg wrangler_factory: an object which, when called, takes the local + traversal and the global traversal objects and returns an :class:`boxtree.fmm.ExpansionWranglerInterface` object. :arg calibration_params: Calibration parameters for the cost model, if supplied. The cost model is used for estimating the execution time of @@ -180,13 +165,7 @@ class DistributedFMMRunner(object): global_tree_dev = global_tree.to_device(queue).with_queue(queue) global_trav_dev, _ = traversal_builder(queue, global_tree_dev) - self.global_trav = global_trav_dev.get(queue) - - # }}} - - # {{{ Get global wrangler - - self.global_wrangler = wrangler_factory(self.global_trav) + global_trav = global_trav_dev.get(queue) # }}} @@ -201,15 +180,19 @@ class DistributedFMMRunner(object): calibration_params = \ FMMCostModel.get_unit_calibration_params() + # We need to construct a wrangler in order to access `level_nterms` + global_wrangler = wrangler_factory(global_trav, global_trav) + cost_per_box = cost_model.cost_per_box( - queue, global_trav_dev, self.global_wrangler.level_nterms, + # Currently only pyfmmlib has `level_nterms` field. + # See https://gitlab.tiker.net/inducer/boxtree/-/issues/25. + queue, global_trav_dev, global_wrangler.level_nterms, calibration_params ).get() from boxtree.distributed.partition import partition_work responsible_boxes_list = partition_work( - cost_per_box, self.global_trav, comm.Get_size() - ) + cost_per_box, global_trav, comm.Get_size()) # It is assumed that, even if each rank computes `responsible_boxes_list` # independently, it should be the same across ranks. @@ -220,8 +203,7 @@ class DistributedFMMRunner(object): from boxtree.distributed.local_tree import generate_local_tree self.local_tree, self.src_idx, self.tgt_idx = generate_local_tree( - queue, self.global_trav, responsible_boxes_list[mpi_rank] - ) + queue, global_trav, responsible_boxes_list[mpi_rank]) # }}} @@ -238,18 +220,14 @@ class DistributedFMMRunner(object): local_trav = generate_local_travs( queue, self.local_tree, traversal_builder, box_bounding_box={ - "min": self.global_trav.box_target_bounding_box_min, - "max": self.global_trav.box_target_bounding_box_max + "min": global_trav.box_target_bounding_box_min, + "max": global_trav.box_target_bounding_box_max } ) # }}} - # {{{ Get local wrangler - - self.local_wrangler = wrangler_factory(local_trav.get(None)) - - # }}} + self.wrangler = wrangler_factory(local_trav.get(None), global_trav) def drive_dfmm( self, source_weights, _communicate_mpoles_via_allreduce=False, @@ -258,10 +236,9 @@ class DistributedFMMRunner(object): """ from boxtree.fmm import drive_fmm return drive_fmm( - self.local_wrangler, source_weights, + self.wrangler, source_weights, timing_data=timing_data, comm=self.comm, - global_wrangler=self.global_wrangler, global_src_idx_all_ranks=self.src_idx_all_ranks, global_tgt_idx_all_ranks=self.tgt_idx_all_ranks, _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index b602d5d..69e324c 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -39,9 +39,11 @@ logger = logging.getLogger(__name__) # {{{ Distributed FMM wrangler +# TODO: mark this as abstract, and add distributed specific interfaces class DistributedExpansionWrangler: - def __init__(self, queue): + def __init__(self, queue, global_traversal): self.queue = queue + self.global_traversal = global_traversal def distribute_source_weights( self, src_weight_vecs, src_idx_all_ranks, comm=MPI.COMM_WORLD): @@ -340,10 +342,18 @@ class DistributedExpansionWrangler: class DistributedFMMLibExpansionWrangler( FMMLibExpansionWrangler, DistributedExpansionWrangler): def __init__( - self, queue, tree_indep, traversal, fmm_level_to_nterms=None, **kwargs): - DistributedExpansionWrangler.__init__(self, queue) + self, queue, tree_indep, local_traversal, global_traversal, + fmm_level_to_nterms=None, **kwargs): + DistributedExpansionWrangler.__init__(self, queue, global_traversal) FMMLibExpansionWrangler.__init__( - self, tree_indep, traversal, + self, tree_indep, local_traversal, fmm_level_to_nterms=fmm_level_to_nterms, **kwargs) + #TODO: use log_process like FMMLibExpansionWrangler? + def reorder_global_sources(self, source_array): + return source_array[..., self.global_traversal.tree.user_source_ids] + + def reorder_global_potentials(self, potentials): + return potentials[self.global_traversal.tree.sorted_target_ids] + # }}} diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 802910a..af31671 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -256,7 +256,6 @@ class ExpansionWranglerInterface(ABC): def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, timing_data=None, comm=None, - global_wrangler: ExpansionWranglerInterface = None, global_src_idx_all_ranks=None, global_tgt_idx_all_ranks=None, _communicate_mpoles_via_allreduce=False): """Top-level driver routine for a fast multipole calculation. @@ -275,18 +274,14 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, :arg expansion_wrangler: An object exhibiting the :class:`ExpansionWranglerInterface`. For distributed implementation, this - wrangler should reference the local tree on each rank. + wrangler should be a subclass of + :class:`boxtree.distributed.calculation.DistributedExpansionWrangler`. :arg src_weight_vecs: A sequence of source 'density/weights/charges'. Passed unmodified to *expansion_wrangler*. For distributed implementation, this argument is only significant on the root rank. :arg timing_data: Either *None*, or a :class:`dict` that is populated with timing information for the stages of the algorithm (in the form of :class:`~boxtree.timing.TimingResult`), if such information is available. - :arg global_wrangler: An object exhibiting the - :class:`ExpansionWranglerInterface`. This wrangler should reference the - global tree, which is used for assembling partial results from - worker ranks together. This argument is only significant for distributed - implementation and on the root rank. :arg global_src_idx_all_ranks: a :class:`list` of length ``nranks``, where the i-th entry is a :class:`numpy.ndarray` representing the global indices of sources in the local tree on rank *i*. Each entry can be returned from @@ -318,9 +313,7 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, mpi_rank = comm.Get_rank() mpi_size = comm.Get_size() if mpi_rank == 0: - global_traversal = global_wrangler.traversal - - src_weight_vecs = [global_wrangler.reorder_sources(weight) + src_weight_vecs = [wrangler.reorder_global_sources(weight) for weight in src_weight_vecs] src_weight_vecs = wrangler.distribute_source_weights( @@ -506,7 +499,8 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, # {{{ Assemble potentials from worker ranks together on the root rank if comm is not None and mpi_rank == 0: - potentials = np.empty(global_traversal.tree.ntargets, dtype=potentials.dtype) + potentials = np.empty( + wrangler.global_traversal.tree.ntargets, dtype=potentials.dtype) for irank in range(mpi_size): potentials[global_tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] @@ -516,9 +510,9 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, if comm is not None: result = None if mpi_rank == 0: - result = global_wrangler.reorder_potentials(potentials) - result = global_wrangler.finalize_potentials( - result, template_ary=src_weight_vecs[0]) + result = wrangler.reorder_global_potentials(potentials) + result = wrangler.finalize_potentials( + result, template_ary=src_weight_vecs[0]) else: result = wrangler.reorder_potentials(potentials) result = wrangler.finalize_potentials( diff --git a/test/test_distributed.py b/test/test_distributed.py index c07f551..e9c6eca 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -63,7 +63,7 @@ def _test_against_shared( set_cache_dir(comm) # Initialize arguments for worker processes - tree = None + global_tree_dev = None sources_weights = None helmholtz_k = 0 @@ -99,15 +99,16 @@ def _test_against_shared( # Build the tree and interaction lists from boxtree import TreeBuilder tb = TreeBuilder(ctx) - tree, _ = tb(queue, sources, targets=targets, target_radii=target_radii, - stick_out_factor=0.25, max_particles_in_box=30, debug=True) + global_tree_dev, _ = tb( + queue, sources, targets=targets, target_radii=target_radii, + stick_out_factor=0.25, max_particles_in_box=30, debug=True) - d_trav, _ = tg(queue, tree, debug=True) - trav = d_trav.get(queue=queue) + d_trav, _ = tg(queue, global_tree_dev, debug=True) + global_traversal_host = d_trav.get(queue=queue) # Get pyfmmlib expansion wrangler wrangler = FMMLibExpansionWrangler( - tree_indep, trav, fmm_level_to_nterms=fmm_level_to_nterms) + tree_indep, global_traversal_host, fmm_level_to_nterms=fmm_level_to_nterms) # Compute FMM using shared memory parallelism from boxtree.fmm import drive_fmm @@ -115,16 +116,17 @@ def _test_against_shared( # Compute FMM using distributed memory parallelism - def wrangler_factory(traversal): + def wrangler_factory(local_traversal, global_traversal): from boxtree.distributed.calculation import \ DistributedFMMLibExpansionWrangler return DistributedFMMLibExpansionWrangler( - queue, tree_indep, traversal, fmm_level_to_nterms=fmm_level_to_nterms) + queue, tree_indep, local_traversal, global_traversal, + fmm_level_to_nterms=fmm_level_to_nterms) from boxtree.distributed import DistributedFMMRunner distribued_fmm_info = DistributedFMMRunner( - queue, tree, tg, wrangler_factory, comm=comm + queue, global_tree_dev, tg, wrangler_factory, comm=comm ) timing_data = {} @@ -180,10 +182,17 @@ def _test_constantone(dims, nsources, ntargets, dtype): class ConstantOneExpansionWrangler( ConstantOneExpansionWranglerBase, DistributedExpansionWrangler): - def __init__(self, queue, tree_indep, traversal): - DistributedExpansionWrangler.__init__(self, queue) - ConstantOneExpansionWranglerBase.__init__(self, tree_indep, traversal) - self.level_nterms = np.ones(traversal.tree.nlevels, dtype=np.int32) + def __init__(self, queue, tree_indep, local_traversal, global_traversal): + DistributedExpansionWrangler.__init__(self, queue, global_traversal) + ConstantOneExpansionWranglerBase.__init__( + self, tree_indep, local_traversal) + self.level_nterms = np.ones(local_traversal.tree.nlevels, dtype=np.int32) + + def reorder_global_sources(self, source_array): + return source_array[self.global_traversal.tree.user_source_ids] + + def reorder_global_potentials(self, potentials): + return potentials[self.global_traversal.tree.sorted_target_ids] from mpi4py import MPI @@ -223,8 +232,9 @@ def _test_constantone(dims, nsources, ntargets, dtype): tree_indep = ConstantOneTreeIndependentDataForWrangler() - def wrangler_factory(traversal): - return ConstantOneExpansionWrangler(queue, tree_indep, traversal) + def wrangler_factory(local_traversal, global_traversal): + return ConstantOneExpansionWrangler( + queue, tree_indep, local_traversal, global_traversal) from boxtree.distributed import DistributedFMMRunner distributed_fmm_info = DistributedFMMRunner( -- GitLab From 767dcadd9ee325bdfb7fa96e533a4b058f714248 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Wed, 11 Aug 2021 22:30:59 -0700 Subject: [PATCH 239/260] Mark DistributedExpansionWrangler as an abstract class --- boxtree/distributed/calculation.py | 12 ++++++++++-- test/test_distributed.py | 3 ++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 69e324c..4669ebd 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -25,6 +25,7 @@ THE SOFTWARE. import numpy as np import pyopencl as cl +from abc import ABC, abstractmethod from boxtree.distributed import MPITags from mpi4py import MPI from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler @@ -39,8 +40,7 @@ logger = logging.getLogger(__name__) # {{{ Distributed FMM wrangler -# TODO: mark this as abstract, and add distributed specific interfaces -class DistributedExpansionWrangler: +class DistributedExpansionWrangler(ABC): def __init__(self, queue, global_traversal): self.queue = queue self.global_traversal = global_traversal @@ -338,6 +338,14 @@ class DistributedExpansionWrangler: if return_stats: return stats + @abstractmethod + def reorder_global_sources(self, source_array): + pass + + @abstractmethod + def reorder_global_potentials(self, potentials): + pass + class DistributedFMMLibExpansionWrangler( FMMLibExpansionWrangler, DistributedExpansionWrangler): diff --git a/test/test_distributed.py b/test/test_distributed.py index e9c6eca..4b25997 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -108,7 +108,8 @@ def _test_against_shared( # Get pyfmmlib expansion wrangler wrangler = FMMLibExpansionWrangler( - tree_indep, global_traversal_host, fmm_level_to_nterms=fmm_level_to_nterms) + tree_indep, global_traversal_host, + fmm_level_to_nterms=fmm_level_to_nterms) # Compute FMM using shared memory parallelism from boxtree.fmm import drive_fmm -- GitLab From 7112bdf6c851cab27856c4ea5d77f25352ab00e1 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Thu, 12 Aug 2021 00:43:04 -0700 Subject: [PATCH 240/260] Move distributed logic and MPI communicator to the distributed wrangler --- boxtree/distributed/__init__.py | 9 +- boxtree/distributed/calculation.py | 143 ++++++++++++++++++----------- boxtree/fmm.py | 136 +++++++++++---------------- test/test_distributed.py | 48 +++++----- 4 files changed, 174 insertions(+), 162 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 698dedd..b54ba10 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -229,17 +229,12 @@ class DistributedFMMRunner(object): self.wrangler = wrangler_factory(local_trav.get(None), global_trav) - def drive_dfmm( - self, source_weights, _communicate_mpoles_via_allreduce=False, - timing_data=None): + def drive_dfmm(self, source_weights, timing_data=None): """Calculate potentials at target points. """ from boxtree.fmm import drive_fmm return drive_fmm( self.wrangler, source_weights, timing_data=timing_data, - comm=self.comm, global_src_idx_all_ranks=self.src_idx_all_ranks, - global_tgt_idx_all_ranks=self.tgt_idx_all_ranks, - _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce - ) + global_tgt_idx_all_ranks=self.tgt_idx_all_ranks) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 4669ebd..4fa841c 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -25,10 +25,10 @@ THE SOFTWARE. import numpy as np import pyopencl as cl -from abc import ABC, abstractmethod from boxtree.distributed import MPITags from mpi4py import MPI from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler +from boxtree.fmm import ExpansionWranglerInterface from pytools import memoize_method from pyopencl.tools import dtype_to_ctype from pyopencl.elementwise import ElementwiseKernel @@ -40,31 +40,17 @@ logger = logging.getLogger(__name__) # {{{ Distributed FMM wrangler -class DistributedExpansionWrangler(ABC): - def __init__(self, queue, global_traversal): +class DistributedExpansionWrangler(ExpansionWranglerInterface): + def __init__(self, queue, comm, global_traversal, + communicate_mpoles_via_allreduce=False): self.queue = queue + self.comm = comm self.global_traversal = global_traversal + self.communicate_mpoles_via_allreduce = communicate_mpoles_via_allreduce - def distribute_source_weights( - self, src_weight_vecs, src_idx_all_ranks, comm=MPI.COMM_WORLD): - """Used for the distributed implementation. This method transfers needed - source weights from root rank to each worker rank in communicator *comm*. - - This method needs to be called collectively by all ranks in communicator - *comm*. - - :arg src_weight_vecs: a sequence of :class:`numpy.ndarray` of length - ``nsources``, representing the weights of sources on the root rank. - ``None`` on worker ranks. - :arg src_idx_all_ranks: a :class:`list` of length ``nranks``, where the - i-th entry is a :class:`numpy.ndarray` indexed into *source_weights* to - be sent from the root rank to rank *i*. Each entry can be generated by - *generate_local_tree*. ``None`` on worker ranks. - - :return: Received source weights of the current rank. - """ - mpi_rank = comm.Get_rank() - mpi_size = comm.Get_size() + def distribute_source_weights(self, src_weight_vecs, src_idx_all_ranks): + mpi_rank = self.comm.Get_rank() + mpi_size = self.comm.Get_size() if mpi_rank == 0: distribute_weight_req = [] @@ -76,7 +62,7 @@ class DistributedExpansionWrangler(ABC): for source_weights in src_weight_vecs] if irank != 0: - distribute_weight_req.append(comm.isend( + distribute_weight_req.append(self.comm.isend( local_src_weight_vecs[irank], dest=irank, tag=MPITags["DIST_WEIGHT"] )) @@ -84,10 +70,49 @@ class DistributedExpansionWrangler(ABC): MPI.Request.Waitall(distribute_weight_req) local_src_weight_vecs = local_src_weight_vecs[0] else: - local_src_weight_vecs = comm.recv(source=0, tag=MPITags["DIST_WEIGHT"]) + local_src_weight_vecs = self.comm.recv( + source=0, tag=MPITags["DIST_WEIGHT"]) return local_src_weight_vecs + def gather_potential_results(self, potentials, tgt_idx_all_ranks): + mpi_rank = self.comm.Get_rank() + mpi_size = self.comm.Get_size() + + from boxtree.distributed import dtype_to_mpi + potentials_mpi_type = dtype_to_mpi(potentials.dtype) + + if mpi_rank == 0: + # The root rank received calculated potentials from all worker ranks + potentials_all_ranks = np.empty((mpi_size,), dtype=object) + potentials_all_ranks[0] = potentials + + recv_reqs = [] + + for irank in range(1, mpi_size): + potentials_all_ranks[irank] = np.empty( + tgt_idx_all_ranks[irank].shape, dtype=potentials.dtype) + + recv_reqs.append( + self.comm.Irecv( + [potentials_all_ranks[irank], potentials_mpi_type], + source=irank, tag=MPITags["GATHER_POTENTIALS"])) + + MPI.Request.Waitall(recv_reqs) + + # Assemble potentials from worker ranks together on the root rank + potentials = np.empty( + self.global_traversal.tree.ntargets, dtype=potentials.dtype) + + for irank in range(mpi_size): + potentials[tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] + else: + # Worker ranks send calculated potentials to the root rank + self.comm.Send([potentials, potentials_mpi_type], + dest=0, tag=MPITags["GATHER_POTENTIALS"]) + + return potentials + def slice_mpoles(self, mpoles, slice_indices): if len(slice_indices) == 0: return np.empty((0,), dtype=mpoles.dtype) @@ -201,25 +226,31 @@ class DistributedExpansionWrangler(ABC): return box_in_subrange - def communicate_mpoles(self, comm, mpole_exps, return_stats=False): + def communicate_mpoles(self, mpole_exps, return_stats=False): """Based on Algorithm 3: Reduce and Scatter in [1]. The main idea is to mimic a allreduce as done on a hypercube network, but to decrease the bandwidth cost by sending only information that is relevant to the processes receiving the message. - This function needs to be called collectively by all processes in *comm*. - .. [1] Lashuk, Ilya, Aparna Chandramowlishwaran, Harper Langston, Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, Lexing Ying, Denis Zorin, and George Biros. “A massively parallel adaptive fast multipole method on heterogeneous architectures." Communications of the ACM 55, no. 5 (2012): 101-109. """ - mpi_rank = comm.Get_rank() - mpi_size = comm.Get_size() + mpi_rank = self.comm.Get_rank() + mpi_size = self.comm.Get_size() tree = self.traversal.tree + if self.communicate_mpoles_via_allreduce: + # Use MPI allreduce for communicating multipole expressions. It is slower + # but might be helpful for debugging purposes. + mpole_exps_all = np.zeros_like(mpole_exps) + self.comm.Allreduce(mpole_exps, mpole_exps_all) + mpole_exps[:] = mpole_exps_all + return + stats = {} # contributing_boxes: @@ -305,21 +336,21 @@ class DistributedExpansionWrangler(ABC): # Send the box subset to the other processors. for sink in comm_pattern.sinks(): - req = comm.Isend(relevant_mpole_exps, dest=sink, - tag=MPITags["REDUCE_POTENTIALS"]) + req = self.comm.Isend(relevant_mpole_exps, dest=sink, + tag=MPITags["REDUCE_POTENTIALS"]) send_requests.append(req) - req = comm.Isend(relevant_boxes_list, dest=sink, - tag=MPITags["REDUCE_INDICES"]) + req = self.comm.Isend(relevant_boxes_list, dest=sink, + tag=MPITags["REDUCE_INDICES"]) send_requests.append(req) # Receive data from other processors. for source in comm_pattern.sources(): - comm.Recv(mpole_exps_buf, source=source, - tag=MPITags["REDUCE_POTENTIALS"]) + self.comm.Recv(mpole_exps_buf, source=source, + tag=MPITags["REDUCE_POTENTIALS"]) status = MPI.Status() - comm.Recv( + self.comm.Recv( boxes_list_buf, source=source, tag=MPITags["REDUCE_INDICES"], status=status) nboxes = status.Get_count() // boxes_list_buf.dtype.itemsize @@ -338,30 +369,38 @@ class DistributedExpansionWrangler(ABC): if return_stats: return stats - @abstractmethod - def reorder_global_sources(self, source_array): - pass - - @abstractmethod - def reorder_global_potentials(self, potentials): - pass + def finalize_potentials(self, potentials, template_ary): + if self.comm.Get_rank() == 0: + return super().finalize_potentials(potentials, template_ary) + else: + return None class DistributedFMMLibExpansionWrangler( - FMMLibExpansionWrangler, DistributedExpansionWrangler): + DistributedExpansionWrangler, FMMLibExpansionWrangler): def __init__( - self, queue, tree_indep, local_traversal, global_traversal, - fmm_level_to_nterms=None, **kwargs): - DistributedExpansionWrangler.__init__(self, queue, global_traversal) + self, queue, comm, tree_indep, local_traversal, global_traversal, + fmm_level_to_nterms=None, + communicate_mpoles_via_allreduce=False, + **kwargs): + DistributedExpansionWrangler.__init__( + self, queue, comm, global_traversal, + communicate_mpoles_via_allreduce=communicate_mpoles_via_allreduce) FMMLibExpansionWrangler.__init__( self, tree_indep, local_traversal, fmm_level_to_nterms=fmm_level_to_nterms, **kwargs) #TODO: use log_process like FMMLibExpansionWrangler? - def reorder_global_sources(self, source_array): - return source_array[..., self.global_traversal.tree.user_source_ids] + def reorder_sources(self, source_array): + if self.comm.Get_rank() == 0: + return source_array[..., self.global_traversal.tree.user_source_ids] + else: + return None - def reorder_global_potentials(self, potentials): - return potentials[self.global_traversal.tree.sorted_target_ids] + def reorder_potentials(self, potentials): + if self.comm.Get_rank() == 0: + return potentials[self.global_traversal.tree.sorted_target_ids] + else: + return None # }}} diff --git a/boxtree/fmm.py b/boxtree/fmm.py index af31671..b9f0a9e 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -36,8 +36,6 @@ from boxtree.traversal import FMMTraversalInfo from pytools import ProcessLogger -import numpy as np - # {{{ expansion wrangler interface @@ -251,13 +249,56 @@ class ExpansionWranglerInterface(ABC): type (typically :class:`pyopencl.array.Array`). """ + def distribute_source_weights(self, src_weight_vecs, src_idx_all_ranks): + """Used by the distributed implementation for transferring needed source + weights from root rank to each worker rank in the communicator. + + This method needs to be called collectively by all ranks in the communicator. + + :arg src_weight_vecs: a sequence of :class:`numpy.ndarray` of length + ``nsources``, representing the weights of sources on the root rank. + ``None`` on worker ranks. + :arg src_idx_all_ranks: a :class:`list` of length ``nranks``, where the + i-th entry is a :class:`numpy.ndarray` indexed into *source_weights* to + be sent from the root rank to rank *i*. Each entry can be generated by + *generate_local_tree*. ``None`` on worker ranks. + + :return: Received source weights of the current rank. + """ + return src_weight_vecs + + def gather_potential_results(self, potentials, tgt_idx_all_ranks): + """Used by the distributed implementation for gathering calculated potentials + from all worker ranks in the communicator to the root rank. + + This method needs to be called collectively by all ranks in the communicator. + + :arg potentials: Calculated potentials on each rank. This argument is + significant on all ranks. + :arg tgt_idx_all_ranks: a :class:`list` of length ``nranks``, where the + i-th entry is a :class:`numpy.ndarray` of the global potential indices + of potentials from rank *i*. This argument is only significant on the + root rank. + + :return: Gathered potentials on the root rank. + """ + return potentials + + def communicate_mpoles(self, mpole_exps, return_stats=False): + """Used by the distributed implementation for forming the complete multipole + expansions from the partial multipole expansions. + + This function needs to be called collectively by all processes in the + communicator. + """ + pass + # }}} def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, - timing_data=None, comm=None, - global_src_idx_all_ranks=None, global_tgt_idx_all_ranks=None, - _communicate_mpoles_via_allreduce=False): + timing_data=None, + global_src_idx_all_ranks=None, global_tgt_idx_all_ranks=None): """Top-level driver routine for a fast multipole calculation. In part, this is intended as a template for custom FMMs, in the sense that @@ -268,10 +309,6 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, Nonetheless, many common applications (such as point-to-point FMMs) can be covered by supplying the right *expansion_wrangler* to this routine. - To enable distributed computation, set *comm* to a valid MPI communicator, and - call this function collectively for all ranks in *comm*. The distributed - implementation requires mpi4py. - :arg expansion_wrangler: An object exhibiting the :class:`ExpansionWranglerInterface`. For distributed implementation, this wrangler should be a subclass of @@ -290,7 +327,6 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, i-th entry is a :class:`numpy.ndarray` representing the global indices of targets in the local tree on rank *i*. Each entry can be returned from *generate_local_tree*. This argument is significant only on the root rank. - :arg comm: MPI communicator. Default to ``MPI_COMM_WORLD``. :return: the potentials computed by *expansion_wrangler*. For the distributed implementation, the potentials are gathered and returned on the root rank; @@ -306,19 +342,11 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, from boxtree.timing import TimingRecorder recorder = TimingRecorder() - if comm is None: - src_weight_vecs = [wrangler.reorder_sources(weight) for - weight in src_weight_vecs] - else: - mpi_rank = comm.Get_rank() - mpi_size = comm.Get_size() - if mpi_rank == 0: - src_weight_vecs = [wrangler.reorder_global_sources(weight) - for weight in src_weight_vecs] + src_weight_vecs = [wrangler.reorder_sources(weight) for + weight in src_weight_vecs] - src_weight_vecs = wrangler.distribute_source_weights( - src_weight_vecs, global_src_idx_all_ranks, comm=comm - ) + src_weight_vecs = wrangler.distribute_source_weights( + src_weight_vecs, global_src_idx_all_ranks) # {{{ "Step 2.1:" Construct local multipoles @@ -344,15 +372,7 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, # }}} - if comm is not None: - if _communicate_mpoles_via_allreduce: - # Use MPI allreduce for communicating multipole expressions. It is slower - # but might be helpful for debugging purposes. - mpole_exps_all = np.zeros_like(mpole_exps) - comm.Allreduce(mpole_exps, mpole_exps_all) - mpole_exps = mpole_exps_all - else: - wrangler.communicate_mpoles(comm, mpole_exps) + wrangler.communicate_mpoles(mpole_exps) # {{{ "Stage 3:" Direct evaluation from neighbor source boxes ("list 1") @@ -465,58 +485,12 @@ def drive_fmm(wrangler: ExpansionWranglerInterface, src_weight_vecs, # }}} - # {{{ Worker ranks send calculated potentials to the root rank - - if comm is not None: - from boxtree.distributed import dtype_to_mpi - from boxtree.distributed import MPITags - from mpi4py import MPI - - potentials_mpi_type = dtype_to_mpi(potentials.dtype) - - if mpi_rank == 0: - potentials_all_ranks = np.empty((mpi_size,), dtype=object) - potentials_all_ranks[0] = potentials - - recv_reqs = [] - - for irank in range(1, mpi_size): - potentials_all_ranks[irank] = np.empty( - global_tgt_idx_all_ranks[irank].shape, dtype=potentials.dtype - ) - - recv_reqs.append( - comm.Irecv([potentials_all_ranks[irank], potentials_mpi_type], - source=irank, tag=MPITags["GATHER_POTENTIALS"])) - - MPI.Request.Waitall(recv_reqs) - else: - comm.Send([potentials, potentials_mpi_type], - dest=0, tag=MPITags["GATHER_POTENTIALS"]) - - # }}} - - # {{{ Assemble potentials from worker ranks together on the root rank + potentials = wrangler.gather_potential_results( + potentials, global_tgt_idx_all_ranks) - if comm is not None and mpi_rank == 0: - potentials = np.empty( - wrangler.global_traversal.tree.ntargets, dtype=potentials.dtype) - - for irank in range(mpi_size): - potentials[global_tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] - - # }}} + result = wrangler.reorder_potentials(potentials) - if comm is not None: - result = None - if mpi_rank == 0: - result = wrangler.reorder_global_potentials(potentials) - result = wrangler.finalize_potentials( - result, template_ary=src_weight_vecs[0]) - else: - result = wrangler.reorder_potentials(potentials) - result = wrangler.finalize_potentials( - result, template_ary=src_weight_vecs[0]) + result = wrangler.finalize_potentials(result, template_ary=src_weight_vecs[0]) fmm_proc.done() diff --git a/test/test_distributed.py b/test/test_distributed.py index 4b25997..1f06f5f 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -54,7 +54,7 @@ def set_cache_dir(comm): def _test_against_shared( - dims, nsources, ntargets, dtype, _communicate_mpoles_via_allreduce=False): + dims, nsources, ntargets, dtype, communicate_mpoles_via_allreduce=False): from mpi4py import MPI # Get the current rank @@ -122,19 +122,17 @@ def _test_against_shared( DistributedFMMLibExpansionWrangler return DistributedFMMLibExpansionWrangler( - queue, tree_indep, local_traversal, global_traversal, - fmm_level_to_nterms=fmm_level_to_nterms) + queue, comm, tree_indep, local_traversal, global_traversal, + fmm_level_to_nterms=fmm_level_to_nterms, + communicate_mpoles_via_allreduce=communicate_mpoles_via_allreduce) from boxtree.distributed import DistributedFMMRunner distribued_fmm_info = DistributedFMMRunner( - queue, global_tree_dev, tg, wrangler_factory, comm=comm - ) + queue, global_tree_dev, tg, wrangler_factory, comm=comm) timing_data = {} pot_dfmm = distribued_fmm_info.drive_dfmm( - [sources_weights], timing_data=timing_data, - _communicate_mpoles_via_allreduce=_communicate_mpoles_via_allreduce - ) + [sources_weights], timing_data=timing_data) assert timing_data # Uncomment the following section to print the time taken of each stage @@ -183,17 +181,26 @@ def _test_constantone(dims, nsources, ntargets, dtype): class ConstantOneExpansionWrangler( ConstantOneExpansionWranglerBase, DistributedExpansionWrangler): - def __init__(self, queue, tree_indep, local_traversal, global_traversal): - DistributedExpansionWrangler.__init__(self, queue, global_traversal) + def __init__( + self, queue, comm, tree_indep, local_traversal, global_traversal): + DistributedExpansionWrangler.__init__( + self, queue, comm, global_traversal, + communicate_mpoles_via_allreduce=True) ConstantOneExpansionWranglerBase.__init__( self, tree_indep, local_traversal) self.level_nterms = np.ones(local_traversal.tree.nlevels, dtype=np.int32) - def reorder_global_sources(self, source_array): - return source_array[self.global_traversal.tree.user_source_ids] + def reorder_sources(self, source_array): + if self.comm.Get_rank() == 0: + return source_array[self.global_traversal.tree.user_source_ids] + else: + return None - def reorder_global_potentials(self, potentials): - return potentials[self.global_traversal.tree.sorted_target_ids] + def reorder_potentials(self, potentials): + if self.comm.Get_rank() == 0: + return potentials[self.global_traversal.tree.sorted_target_ids] + else: + return None from mpi4py import MPI @@ -235,16 +242,13 @@ def _test_constantone(dims, nsources, ntargets, dtype): def wrangler_factory(local_traversal, global_traversal): return ConstantOneExpansionWrangler( - queue, tree_indep, local_traversal, global_traversal) + queue, comm, tree_indep, local_traversal, global_traversal) from boxtree.distributed import DistributedFMMRunner distributed_fmm_info = DistributedFMMRunner( - queue, tree, tg, wrangler_factory, comm=MPI.COMM_WORLD - ) + queue, tree, tg, wrangler_factory, comm=MPI.COMM_WORLD) - pot_dfmm = distributed_fmm_info.drive_dfmm( - [sources_weights], _communicate_mpoles_via_allreduce=True - ) + pot_dfmm = distributed_fmm_info.drive_dfmm([sources_weights]) if rank == 0: assert (np.all(pot_dfmm == nsources)) @@ -279,11 +283,11 @@ if __name__ == "__main__": ntargets = int(os.environ["ntargets"]) from distutils.util import strtobool - _communicate_mpoles_via_allreduce = bool( + communicate_mpoles_via_allreduce = bool( strtobool(os.environ["communicate_mpoles_via_allreduce"])) _test_against_shared( - dims, nsources, ntargets, dtype, _communicate_mpoles_via_allreduce) + dims, nsources, ntargets, dtype, communicate_mpoles_via_allreduce) elif os.environ["PYTEST"] == "2": # Run "test_constantone" test case -- GitLab From 70dfbb4fa101305757f3b9b8d76120bbca5ab1fb Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 19 Sep 2021 10:01:10 -0700 Subject: [PATCH 241/260] Remove queue from the distributed wrangler --- boxtree/distributed/calculation.py | 39 +++++++++++++++--------------- test/test_distributed.py | 2 +- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 4fa841c..3399f8c 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -41,9 +41,9 @@ logger = logging.getLogger(__name__) # {{{ Distributed FMM wrangler class DistributedExpansionWrangler(ExpansionWranglerInterface): - def __init__(self, queue, comm, global_traversal, + def __init__(self, context, comm, global_traversal, communicate_mpoles_via_allreduce=False): - self.queue = queue + self.context = context self.comm = comm self.global_traversal = global_traversal self.communicate_mpoles_via_allreduce = communicate_mpoles_via_allreduce @@ -163,7 +163,7 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): @memoize_method def find_boxes_used_by_subrange_kernel(self, box_id_dtype): return ElementwiseKernel( - self.queue.context, + self.context, Template(r""" ${box_id_t} *contributing_boxes_list, int subrange_start, @@ -278,13 +278,12 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): stats["bytes_sent_by_stage"] = [] stats["bytes_recvd_by_stage"] = [] - box_to_user_starts_dev = cl.array.to_device( - self.queue, tree.box_to_user_starts - ) + with cl.CommandQueue(self.context) as queue: + box_to_user_starts_dev = cl.array.to_device( + queue, tree.box_to_user_starts).with_queue(None) - box_to_user_lists_dev = cl.array.to_device( - self.queue, tree.box_to_user_lists - ) + box_to_user_lists_dev = cl.array.to_device( + queue, tree.box_to_user_lists).with_queue(None) while not comm_pattern.done(): send_requests = [] @@ -298,17 +297,17 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): tree.box_id_dtype ) - contributing_boxes_list_dev = cl.array.to_device( - self.queue, contributing_boxes_list - ) + with cl.CommandQueue(self.context) as queue: + contributing_boxes_list_dev = cl.array.to_device( + queue, contributing_boxes_list) - box_in_subrange = self.find_boxes_used_by_subrange( - message_subrange, - box_to_user_starts_dev, box_to_user_lists_dev, - contributing_boxes_list_dev - ) + box_in_subrange = self.find_boxes_used_by_subrange( + message_subrange, + box_to_user_starts_dev, box_to_user_lists_dev, + contributing_boxes_list_dev + ) - box_in_subrange_host = box_in_subrange.get().astype(bool) + box_in_subrange_host = box_in_subrange.get().astype(bool) relevant_boxes_list = contributing_boxes_list[ box_in_subrange_host @@ -379,12 +378,12 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): class DistributedFMMLibExpansionWrangler( DistributedExpansionWrangler, FMMLibExpansionWrangler): def __init__( - self, queue, comm, tree_indep, local_traversal, global_traversal, + self, context, comm, tree_indep, local_traversal, global_traversal, fmm_level_to_nterms=None, communicate_mpoles_via_allreduce=False, **kwargs): DistributedExpansionWrangler.__init__( - self, queue, comm, global_traversal, + self, context, comm, global_traversal, communicate_mpoles_via_allreduce=communicate_mpoles_via_allreduce) FMMLibExpansionWrangler.__init__( self, tree_indep, local_traversal, diff --git a/test/test_distributed.py b/test/test_distributed.py index 1f06f5f..bab888a 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -122,7 +122,7 @@ def _test_against_shared( DistributedFMMLibExpansionWrangler return DistributedFMMLibExpansionWrangler( - queue, comm, tree_indep, local_traversal, global_traversal, + queue.context, comm, tree_indep, local_traversal, global_traversal, fmm_level_to_nterms=fmm_level_to_nterms, communicate_mpoles_via_allreduce=communicate_mpoles_via_allreduce) -- GitLab From 2276e7a2456ed837564b4f0604384452f632277e Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 19 Sep 2021 18:16:08 -0700 Subject: [PATCH 242/260] Partition boxes on the root rank and distribute the endpoints --- boxtree/distributed/__init__.py | 41 +++++++++-------- boxtree/distributed/partition.py | 77 ++++++++++++++++++-------------- 2 files changed, 64 insertions(+), 54 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index b54ba10..5ca8fa3 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -171,31 +171,30 @@ class DistributedFMMRunner(object): # {{{ Partiton work - cost_model = FMMCostModel() + cost_per_box = None - if calibration_params is None: - # Use default calibration parameters if not supplied - # TODO: should replace the default calibration params with a more - # accurate one - calibration_params = \ - FMMCostModel.get_unit_calibration_params() + if mpi_rank == 0: + cost_model = FMMCostModel() - # We need to construct a wrangler in order to access `level_nterms` - global_wrangler = wrangler_factory(global_trav, global_trav) + if calibration_params is None: + # Use default calibration parameters if not supplied + # TODO: should replace the default calibration params with a more + # accurate one + calibration_params = \ + FMMCostModel.get_unit_calibration_params() - cost_per_box = cost_model.cost_per_box( - # Currently only pyfmmlib has `level_nterms` field. - # See https://gitlab.tiker.net/inducer/boxtree/-/issues/25. - queue, global_trav_dev, global_wrangler.level_nterms, - calibration_params - ).get() + # We need to construct a wrangler in order to access `level_nterms` + global_wrangler = wrangler_factory(global_trav, global_trav) - from boxtree.distributed.partition import partition_work - responsible_boxes_list = partition_work( - cost_per_box, global_trav, comm.Get_size()) + cost_per_box = cost_model.cost_per_box( + # Currently only pyfmmlib has `level_nterms` field. + # See https://gitlab.tiker.net/inducer/boxtree/-/issues/25. + queue, global_trav_dev, global_wrangler.level_nterms, + calibration_params + ).get() - # It is assumed that, even if each rank computes `responsible_boxes_list` - # independently, it should be the same across ranks. + from boxtree.distributed.partition import partition_work + responsible_boxes_list = partition_work(cost_per_box, global_trav, comm) # }}} @@ -203,7 +202,7 @@ class DistributedFMMRunner(object): from boxtree.distributed.local_tree import generate_local_tree self.local_tree, self.src_idx, self.tgt_idx = generate_local_tree( - queue, global_trav, responsible_boxes_list[mpi_rank]) + queue, global_trav, responsible_boxes_list) # }}} diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 6f13d9e..212b80b 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -30,29 +30,29 @@ from mako.template import Template from pytools import memoize -def partition_work(boxes_time, traversal, total_rank): +def partition_work(boxes_time, traversal, comm): """This function assigns responsible boxes for each rank. - Each process is responsible for calculating the multiple expansions as well as - evaluating target potentials in *responsible_boxes*. + If a rank is responsible for a box, it will calculate the multiple expansion of + the box and evaluate target potentials in the box. - :arg boxes_time: The expected running time of each box. - :arg traversal: The traversal object built on root containing all particles. - :arg total_rank: The total number of ranks. - :return: A numpy array of shape ``(total_rank,)``, where the i-th element is an - numpy array containing the responsible boxes of process i. + :arg boxes_time: The expected running time of each box. This argument is only + significant on the root rank. + :arg traversal: The global traversal object containing all particles. This + argument is significant on all ranks. + :arg comm: MPI communicator. + :return: A numpy array containing the responsible boxes of the current rank. """ tree = traversal.tree + mpi_rank = comm.Get_rank() + mpi_size = comm.Get_size() - if total_rank > tree.nboxes: + if mpi_size > tree.nboxes: raise RuntimeError("Fail to partition work because the number of boxes is " "less than the number of processes.") - total_workload = 0 - for i in range(tree.nboxes): - total_workload += boxes_time[i] - - # transform tree from level order to dfs order + # transform tree from the level order to the dfs order + # dfs_order[i] stores the level-order box index of dfs index i dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) idx = 0 stack = [0] @@ -65,25 +65,36 @@ def partition_work(boxes_time, traversal, total_rank): if child_box_id > 0: stack.append(child_box_id) - # partition all boxes in dfs order evenly according to workload - responsible_boxes_list = np.empty((total_rank,), dtype=object) - rank = 0 - start = 0 - workload_count = 0 - for i in range(tree.nboxes): - if rank + 1 == total_rank: - responsible_boxes_list[rank] = dfs_order[start:tree.nboxes] - break - - box_idx = dfs_order[i] - workload_count += boxes_time[box_idx] - if (workload_count > (rank + 1)*total_workload/total_rank - or i == tree.nboxes - 1): - responsible_boxes_list[rank] = dfs_order[start:i+1] - start = i + 1 - rank += 1 - - return responsible_boxes_list + # partition all boxes in dfs order evenly according to workload on the root rank + + responsible_boxes_segments = None + responsible_boxes_current_rank = np.empty(2, dtype=tree.box_id_dtype) + + if mpi_rank == 0: + total_workload = 0 + for box_idx in range(tree.nboxes): + total_workload += boxes_time[box_idx] + + responsible_boxes_segments = np.empty([mpi_size, 2], dtype=tree.box_id_dtype) + segment_idx = 0 + start = 0 + workload_count = 0 + for box_idx_dfs_order in range(tree.nboxes): + if segment_idx + 1 == mpi_size: + responsible_boxes_segments[segment_idx, :] = [start, tree.nboxes] + break + + box_idx = dfs_order[box_idx_dfs_order] + workload_count += boxes_time[box_idx] + if (workload_count > (segment_idx + 1) * total_workload / mpi_size + or box_idx_dfs_order == tree.nboxes - 1): + responsible_boxes_segments[segment_idx, :] = [start, box_idx_dfs_order + 1] + start = box_idx_dfs_order + 1 + segment_idx += 1 + + comm.Scatter(responsible_boxes_segments, responsible_boxes_current_rank, root=0) + + return dfs_order[responsible_boxes_current_rank[0]:responsible_boxes_current_rank[1]] @memoize -- GitLab From 1a808874bf72256e3e84fe02470d5e1b1b492143 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 19 Sep 2021 19:05:25 -0700 Subject: [PATCH 243/260] Use PyOpenCL's fancy indexing in favor of custom kernels --- boxtree/distributed/local_tree.py | 134 +++++------------------------- boxtree/distributed/partition.py | 6 +- 2 files changed, 24 insertions(+), 116 deletions(-) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index cb8fa72..6be544b 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -144,57 +144,6 @@ def fetch_local_targets_kernel( ) -@memoize -def generate_box_particle_starts_kernel(context, particle_id_dtype): - return cl.elementwise.ElementwiseKernel( - context, - Template(""" - __global ${particle_id_t} *old_starts, - __global ${particle_id_t} *particle_scan, - __global ${particle_id_t} *new_starts - """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(particle_id_dtype) - ), - "new_starts[i] = particle_scan[old_starts[i]]", - name="generate_box_particle_starts" - ) - - -@memoize -def generate_box_particle_counts_nonchild_kernel(context, particle_id_dtype): - return cl.elementwise.ElementwiseKernel( - context, - Template(""" - __global char *res_boxes, - __global ${particle_id_t} *old_counts_nonchild, - __global ${particle_id_t} *new_counts_nonchild - """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(particle_id_dtype) - ), - "if(res_boxes[i]) new_counts_nonchild[i] = old_counts_nonchild[i];" - ) - - -@memoize -def generate_box_particle_counts_cumul_kernel(context, particle_id_dtype): - return cl.elementwise.ElementwiseKernel( - context, - Template(""" - __global ${particle_id_t} *old_counts_cumul, - __global ${particle_id_t} *old_starts, - __global ${particle_id_t} *new_counts_cumul, - __global ${particle_id_t} *particle_scan - """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(particle_id_dtype) - ), - """ - new_counts_cumul[i] = - particle_scan[old_starts[i] + old_counts_cumul[i]] - - particle_scan[old_starts[i]] - """ - ) - - def fetch_local_particles( queue, global_tree, src_box_mask, tgt_box_mask, local_tree): """This helper function generates particles of the local tree, and reconstruct @@ -272,50 +221,30 @@ def fetch_local_particles( # {{{ box_source_starts - local_box_source_starts = cl.array.empty( - queue, (global_tree.nboxes,), - dtype=global_tree.particle_id_dtype - ) - - generate_box_particle_starts_kernel( - queue.context, global_tree.particle_id_dtype)( - global_tree_dev.box_source_starts, - src_particle_scan, - local_box_source_starts - ) + local_box_source_starts = src_particle_scan[global_tree_dev.box_source_starts] # }}} # {{{ box_source_counts_nonchild - local_box_source_counts_nonchild = cl.array.zeros( + box_counts_all_zeros = cl.array.zeros( queue, (global_tree.nboxes,), - dtype=global_tree.particle_id_dtype - ) + dtype=global_tree.particle_id_dtype) - generate_box_particle_counts_nonchild_kernel( - queue.context, global_tree.particle_id_dtype)( - src_box_mask, - global_tree_dev.box_source_counts_nonchild, - local_box_source_counts_nonchild - ) + local_box_source_counts_nonchild = cl.array.if_positive( + src_box_mask, global_tree_dev.box_source_counts_nonchild, + box_counts_all_zeros) # }}} # {{{ box_source_counts_cumul - local_box_source_counts_cumul = cl.array.empty( - queue, (global_tree.nboxes,), - dtype=global_tree.particle_id_dtype - ) + box_source_ends_cumul = ( + global_tree_dev.box_source_starts + global_tree_dev.box_source_counts_cumul) - generate_box_particle_counts_cumul_kernel( - queue.context, global_tree.particle_id_dtype)( - global_tree_dev.box_source_counts_cumul, - global_tree_dev.box_source_starts, - local_box_source_counts_cumul, - src_particle_scan - ) + local_box_source_counts_cumul = ( + src_particle_scan[box_source_ends_cumul] + - src_particle_scan[global_tree_dev.box_source_starts]) # }}} @@ -399,49 +328,26 @@ def fetch_local_particles( # {{{ box_target_starts - local_box_target_starts = cl.array.empty( - queue, (global_tree.nboxes,), - dtype=global_tree.particle_id_dtype - ) - - generate_box_particle_starts_kernel( - queue.context, global_tree.particle_id_dtype)( - global_tree_dev.box_target_starts, - tgt_particle_scan, - local_box_target_starts - ) + local_box_target_starts = tgt_particle_scan[global_tree_dev.box_target_starts] # }}} # {{{ box_target_counts_nonchild - local_box_target_counts_nonchild = cl.array.zeros( - queue, (global_tree.nboxes,), - dtype=global_tree.particle_id_dtype) - - generate_box_particle_counts_nonchild_kernel( - queue.context, global_tree.particle_id_dtype)( - tgt_box_mask, - global_tree_dev.box_target_counts_nonchild, - local_box_target_counts_nonchild - ) + local_box_target_counts_nonchild = cl.array.if_positive( + tgt_box_mask, global_tree_dev.box_target_counts_nonchild, + box_counts_all_zeros) # }}} # {{{ box_target_counts_cumul - local_box_target_counts_cumul = cl.array.empty( - queue, (global_tree.nboxes,), - dtype=global_tree.particle_id_dtype - ) + box_target_ends_cumul = ( + global_tree_dev.box_target_starts + global_tree_dev.box_target_counts_cumul) - generate_box_particle_counts_cumul_kernel( - queue.context, global_tree.particle_id_dtype)( - global_tree_dev.box_target_counts_cumul, - global_tree_dev.box_target_starts, - local_box_target_counts_cumul, - tgt_particle_scan - ) + local_box_target_counts_cumul = ( + tgt_particle_scan[box_target_ends_cumul] + - tgt_particle_scan[global_tree_dev.box_target_starts]) # }}} diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 212b80b..7a6fcaa 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -88,13 +88,15 @@ def partition_work(boxes_time, traversal, comm): workload_count += boxes_time[box_idx] if (workload_count > (segment_idx + 1) * total_workload / mpi_size or box_idx_dfs_order == tree.nboxes - 1): - responsible_boxes_segments[segment_idx, :] = [start, box_idx_dfs_order + 1] + responsible_boxes_segments[segment_idx, :] = ( + [start, box_idx_dfs_order + 1]) start = box_idx_dfs_order + 1 segment_idx += 1 comm.Scatter(responsible_boxes_segments, responsible_boxes_current_rank, root=0) - return dfs_order[responsible_boxes_current_rank[0]:responsible_boxes_current_rank[1]] + return dfs_order[ + responsible_boxes_current_rank[0]:responsible_boxes_current_rank[1]] @memoize -- GitLab From fd646b5d517dddc60aab9c9110fff9e9c21ec8a1 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 19 Sep 2021 23:29:49 -0700 Subject: [PATCH 244/260] Address reviewer's comments --- boxtree/distributed/__init__.py | 2 +- boxtree/distributed/local_tree.py | 4 +--- boxtree/tools.py | 5 +++++ boxtree/traversal.py | 24 ------------------------ 4 files changed, 7 insertions(+), 28 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 5ca8fa3..84032a3 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -202,7 +202,7 @@ class DistributedFMMRunner(object): from boxtree.distributed.local_tree import generate_local_tree self.local_tree, self.src_idx, self.tgt_idx = generate_local_tree( - queue, global_trav, responsible_boxes_list) + queue, global_trav, responsible_boxes_list, comm) # }}} diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 6be544b..eeae04a 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -28,7 +28,6 @@ from mako.template import Template from pyopencl.tools import dtype_to_ctype from boxtree import Tree from boxtree.tools import ImmutableHostDeviceArray -from mpi4py import MPI import time import numpy as np from pytools import memoize @@ -428,8 +427,7 @@ class LocalTree(Tree): return self._dimensions -def generate_local_tree(queue, global_traversal, responsible_boxes_list, - comm=MPI.COMM_WORLD): +def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): """Generate the local tree for the current rank. :arg queue: a :class:`pyopencl.CommandQueue` object. diff --git a/boxtree/tools.py b/boxtree/tools.py index 3a1736a..fd2ea29 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -352,6 +352,8 @@ class DeviceDataRecord(Record): return cl.array.to_device(queue, attr).with_queue(None) elif isinstance(attr, ImmutableHostDeviceArray): return attr.device + elif isinstance(attr, DeviceDataRecord): + return attr.to_device(queue) else: return attr @@ -367,6 +369,8 @@ class DeviceDataRecord(Record): def _to_host_device_array(attr): if isinstance(attr, (np.ndarray, cl.array.Array)): return ImmutableHostDeviceArray(queue, attr) + elif isinstance(attr, DeviceDataRecord): + return attr.to_host_device_array(queue) else: return attr @@ -698,6 +702,7 @@ class MaskCompressorKernel(object): # for larger ones since the work is partitioned by row. knl = self.get_matrix_compressor_kernel(mask.dtype, list_dtype) size = mask.dtype.itemsize + assert size > 0 result, evt = knl(queue, mask.shape[0], mask.shape[1], mask.strides[0] // size, mask.strides[1] // size, mask.data) diff --git a/boxtree/traversal.py b/boxtree/traversal.py index 29e1d3d..8f1c9cc 100644 --- a/boxtree/traversal.py +++ b/boxtree/traversal.py @@ -1726,30 +1726,6 @@ class FMMTraversalInfo(DeviceDataRecord): def ntarget_or_target_parent_boxes(self): return len(self.target_or_target_parent_boxes) - def to_device(self, queue, exclude_fields=frozenset()): - exclude_fields = exclude_fields | { - "level_start_source_box_nrs", - "level_start_target_box_nrs", - "level_start_target_or_target_parent_box_nrs", - "level_start_source_parent_box_nrs", - "tree"} - - self_dev = super(FMMTraversalInfo, self).to_device(queue, exclude_fields) - self_dev.tree = self.tree.to_device(queue) - - return self_dev - - def to_host_device_array(self, queue, exclude_fields=frozenset()): - exclude_fields = exclude_fields | { - "level_start_source_box_nrs", - "level_start_target_box_nrs", - "level_start_target_or_target_parent_box_nrs", - "level_start_source_parent_box_nrs", - "tree"} - - return super(FMMTraversalInfo, self).to_host_device_array( - queue, exclude_fields) - # }}} -- GitLab From 2cfebb0220c278268e288c402bb5c9cf493de773 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 19 Sep 2021 23:58:24 -0700 Subject: [PATCH 245/260] placate pylint --- boxtree/distributed/calculation.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 3399f8c..fa53b3a 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -124,8 +124,9 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): for ilevel in range(self.traversal.tree.nlevels): start, stop = level_start_slice_indices[ilevel:ilevel+2] if stop > start: - level_start_box_idx, mpoles_current_level = \ - self.multipole_expansions_view(mpoles, ilevel) + level_start_box_idx, mpoles_current_level = ( + #pylint: disable-next=no-member + self.multipole_expansions_view(mpoles, ilevel)) mpoles_list.append( mpoles_current_level[ slice_indices[start:stop] - level_start_box_idx @@ -145,8 +146,9 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): for ilevel in range(self.traversal.tree.nlevels): start, stop = level_start_slice_indices[ilevel:ilevel+2] if stop > start: - level_start_box_idx, mpoles_current_level = \ - self.multipole_expansions_view(mpoles, ilevel) + level_start_box_idx, mpoles_current_level = ( + #pylint: disable-next=no-member + self.multipole_expansions_view(mpoles, ilevel)) mpoles_shape = (stop - start,) + mpoles_current_level.shape[1:] from pytools import product -- GitLab From f5dc5e014c8b6ab48a4630bc9dc4686ca10db101 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 20 Sep 2021 00:38:20 -0700 Subject: [PATCH 246/260] Improve documentation --- boxtree/distributed/__init__.py | 23 +++++++++++++---------- boxtree/distributed/calculation.py | 22 +++++++++++++++++----- boxtree/distributed/partition.py | 2 +- boxtree/tools.py | 17 +++++++++++++---- 4 files changed, 44 insertions(+), 20 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 84032a3..7d9e1b5 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -35,7 +35,6 @@ launching FMM. .. autoclass:: boxtree.distributed.DistributedFMMRunner - .. automethod:: drive_dfmm Distributed Algorithm Overview ------------------------------ @@ -47,7 +46,7 @@ Distributed Algorithm Overview 3. Each rank constructs the local tree and traversal lists independently, according to the partition. (See :ref:`construct-local-tree-traversal`) 4. Distribute source weights from the root rank to all worker ranks. (See - :ref:`distribute-source-weights`) + :ref:`distributed-wrangler`) 5. Each rank independently forms multipole expansions from the leaf nodes of the local tree and propagates the partial multipole expansions upwards. 6. Communicate multipole expansions so that all ranks have the complete multipole @@ -82,13 +81,12 @@ Construct Local Tree and Traversal .. autofunction:: boxtree.distributed.local_traversal.generate_local_travs -.. _distribute-source-weights: +.. _distributed-wrangler: -Distribute source weights -------------------------- +Distributed Wrangler +-------------------- -.. autofunction:: boxtree.distributed.calculation.DistributedExpansionWrangler\ -.distribute_source_weights +.. autoclass:: boxtree.distributed.calculation.DistributedExpansionWrangler .. _distributed-fmm-evaluation: @@ -96,8 +94,9 @@ Distributed FMM Evaluation -------------------------- The distributed version of the FMM evaluation shares the same interface as the -shared-memory version. To evaluate FMM in distributed manner, set ``comm`` to -a valid communicator in :func:`boxtree.fmm.drive_fmm`. +shared-memory version. To evaluate FMM in a distributed manner, use a subclass +of :class:`boxtree.distributed.calculation.DistributedExpansionWrangler` in +:func:`boxtree.fmm.drive_fmm`. """ @@ -130,6 +129,10 @@ def dtype_to_mpi(dtype): class DistributedFMMRunner(object): + """Helper class for setting up and running distributed point FMM. + + .. automethod:: drive_dfmm + """ def __init__(self, queue, global_tree_dev, traversal_builder, wrangler_factory, @@ -169,7 +172,7 @@ class DistributedFMMRunner(object): # }}} - # {{{ Partiton work + # {{{ Partition work cost_per_box = None diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index fa53b3a..58aa415 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -41,6 +41,16 @@ logger = logging.getLogger(__name__) # {{{ Distributed FMM wrangler class DistributedExpansionWrangler(ExpansionWranglerInterface): + """Distributed expansion wrangler base class. + + This is an abstract class and should not be directly instantiated. Instead, it is + expected that all distributed wranglers should be subclasses of this class. + + .. automethod:: __init__ + .. automethod:: distribute_source_weights + .. automethod:: gather_potential_results + .. automethod:: communicate_mpoles + """ def __init__(self, context, comm, global_traversal, communicate_mpoles_via_allreduce=False): self.context = context @@ -229,17 +239,19 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): return box_in_subrange def communicate_mpoles(self, mpole_exps, return_stats=False): - """Based on Algorithm 3: Reduce and Scatter in [1]. + """Based on Algorithm 3: Reduce and Scatter in [1]_. The main idea is to mimic a allreduce as done on a hypercube network, but to decrease the bandwidth cost by sending only information that is relevant to the processes receiving the message. + .. rubric:: Footnotes + .. [1] Lashuk, Ilya, Aparna Chandramowlishwaran, Harper Langston, - Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, Lexing - Ying, Denis Zorin, and George Biros. “A massively parallel adaptive fast - multipole method on heterogeneous architectures." Communications of the - ACM 55, no. 5 (2012): 101-109. + Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, + Lexing Ying, Denis Zorin, and George Biros. “A massively parallel + adaptive fast multipole method on heterogeneous architectures." + Communications of the ACM 55, no. 5 (2012): 101-109. """ mpi_rank = self.comm.Get_rank() mpi_size = self.comm.Get_size() diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 7a6fcaa..9981008 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -51,7 +51,7 @@ def partition_work(boxes_time, traversal, comm): raise RuntimeError("Fail to partition work because the number of boxes is " "less than the number of processes.") - # transform tree from the level order to the dfs order + # transform tree from the level order to the morton dfs order # dfs_order[i] stores the level-order box index of dfs index i dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) idx = 0 diff --git a/boxtree/tools.py b/boxtree/tools.py index fd2ea29..83ec8d2 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -640,7 +640,9 @@ void generate(LIST_ARG_DECL USER_ARG_DECL index_type i) class MaskCompressorKernel(object): - + """ + .. automethod:: __call__ + """ def __init__(self, context): self.context = context @@ -720,10 +722,17 @@ class AllReduceCommPattern(object): multipole expansions. Supports an arbitrary number of processes. Communication of multipoles will be break down into stages. At each stage, - :meth:`sources()` and :meth:`sinks()` obtain the lists of ranks for receiving and - sending multipoles. :meth:`messages()` can be used for determining boxes whose + :meth:`sources` and :meth:`sinks` obtain the lists of ranks for receiving and + sending multipoles. :meth:`messages` can be used for determining boxes whose multipole expansions need to be sent during the current stage. Use - :meth:`advance()` to advance to the next stage. + :meth:`advance` to advance to the next stage. + + .. automethod:: __init__ + .. automethod:: sources + .. automethod:: sinks + .. automethod:: messages + .. automethod:: advance + .. automethod:: done """ def __init__(self, rank, size): -- GitLab From 8b2e392bb8c863efcebc3f4c518292c22d23f259 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 11 Oct 2021 20:38:17 -0700 Subject: [PATCH 247/260] Add Github CI job with MPI tests --- .github/workflows/ci.yml | 12 ++++++++++++ .gitlab-ci.yml | 1 - 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3995df7..7b76b31 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -78,6 +78,18 @@ jobs: curl -L -O https://gitlab.tiker.net/inducer/ci-support/raw/main/run-examples.sh . ./run-examples.sh + distributed: + name: Python 3 POCL MPI + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: "Main Script" + run: | + export EXTRA_INSTALL="numpy mako mpi4py pybind11" + export PYTEST_ADDOPTS="-k mpi --capture=no" + curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh + . ./build-and-test-py-project.sh + downstream_tests: strategy: matrix: diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a417e10..8aaa7d0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -61,7 +61,6 @@ Python 3 POCL K40: Python 3 POCL MPI: script: - - export PY_EXE=python3 - export PYOPENCL_TEST=portable - export EXTRA_INSTALL="numpy mako mpi4py pybind11" - export PYTEST_ADDOPTS="-k mpi --capture=no" -- GitLab From b19a90f16bbe8791c6044dcb9d57e76e2b2b090d Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 11 Oct 2021 21:06:39 -0700 Subject: [PATCH 248/260] Revert "Add Github CI job with MPI tests" This reverts commit 8b2e392bb8c863efcebc3f4c518292c22d23f259. --- .github/workflows/ci.yml | 12 ------------ .gitlab-ci.yml | 1 + 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7b76b31..3995df7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -78,18 +78,6 @@ jobs: curl -L -O https://gitlab.tiker.net/inducer/ci-support/raw/main/run-examples.sh . ./run-examples.sh - distributed: - name: Python 3 POCL MPI - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: "Main Script" - run: | - export EXTRA_INSTALL="numpy mako mpi4py pybind11" - export PYTEST_ADDOPTS="-k mpi --capture=no" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - . ./build-and-test-py-project.sh - downstream_tests: strategy: matrix: diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8aaa7d0..a417e10 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -61,6 +61,7 @@ Python 3 POCL K40: Python 3 POCL MPI: script: + - export PY_EXE=python3 - export PYOPENCL_TEST=portable - export EXTRA_INSTALL="numpy mako mpi4py pybind11" - export PYTEST_ADDOPTS="-k mpi --capture=no" -- GitLab From 582a5623cd614229548ed59c2b810db969bdf0f5 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 11 Oct 2021 21:17:32 -0700 Subject: [PATCH 249/260] Placate pylint --- boxtree/distributed/calculation.py | 6 ++---- pytest.ini | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 58aa415..d4625b8 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -135,8 +135,7 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): start, stop = level_start_slice_indices[ilevel:ilevel+2] if stop > start: level_start_box_idx, mpoles_current_level = ( - #pylint: disable-next=no-member - self.multipole_expansions_view(mpoles, ilevel)) + self.multipole_expansions_view(mpoles, ilevel)) # noqa pylint: disable=no-member mpoles_list.append( mpoles_current_level[ slice_indices[start:stop] - level_start_box_idx @@ -157,8 +156,7 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): start, stop = level_start_slice_indices[ilevel:ilevel+2] if stop > start: level_start_box_idx, mpoles_current_level = ( - #pylint: disable-next=no-member - self.multipole_expansions_view(mpoles, ilevel)) + self.multipole_expansions_view(mpoles, ilevel)) # noqa pylint: disable=no-member mpoles_shape = (stop - start,) + mpoles_current_level.shape[1:] from pytools import product diff --git a/pytest.ini b/pytest.ini index a146247..72057bc 100644 --- a/pytest.ini +++ b/pytest.ini @@ -3,3 +3,4 @@ markers = opencl: uses OpenCL geo_lookup: test geometric lookups area_query: test area queries + mpi: test distributed FMM -- GitLab From 150b685347f1dda70e8a68715146062b81b95160 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 11 Oct 2021 23:14:10 -0700 Subject: [PATCH 250/260] Use MPICH version as default --- boxtree/distributed/partition.py | 1 + boxtree/tools.py | 14 ++++++-------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 9981008..0d1ea62 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -53,6 +53,7 @@ def partition_work(boxes_time, traversal, comm): # transform tree from the level order to the morton dfs order # dfs_order[i] stores the level-order box index of dfs index i + # TODO: optimize the performance with OpenCL dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) idx = 0 stack = [0] diff --git a/boxtree/tools.py b/boxtree/tools.py index 83ec8d2..00d3678 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -834,13 +834,7 @@ def run_mpi(script, num_processes, env): # See https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html for details. mpi_library_name = MPI.Get_library_version() - if mpi_library_name.startswith("MPICH"): - subprocess.run( - ["mpiexec", "-np", str(num_processes), sys.executable, - "-m", "mpi4py", script], - env=env, check=True - ) - elif mpi_library_name.startswith("Open MPI"): + if mpi_library_name.startswith("Open MPI"): command = ["mpiexec", "-np", str(num_processes), "--oversubscribe"] for env_variable_name in env: command.append("-x") @@ -849,7 +843,11 @@ def run_mpi(script, num_processes, env): subprocess.run(command, env=env, check=True) else: - raise NotImplementedError("Unrecognized MPI implementation") + subprocess.run( + ["mpiexec", "-np", str(num_processes), sys.executable, + "-m", "mpi4py", script], + env=env, check=True + ) # }}} -- GitLab From fd8b7e8f2219048043d152169b50d6f2eacf023e Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 15 Nov 2021 23:09:14 -0800 Subject: [PATCH 251/260] Address reviewer's suggestions --- boxtree/distributed/__init__.py | 31 ++++++++-------- boxtree/distributed/calculation.py | 42 +++++++++++----------- boxtree/distributed/local_traversal.py | 2 -- boxtree/distributed/local_tree.py | 2 -- boxtree/distributed/partition.py | 8 +++-- boxtree/fmm.py | 31 ++++++++++------ boxtree/tools.py | 49 ++++++++++++++------------ doc/distributed.rst | 2 +- 8 files changed, 89 insertions(+), 78 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 7d9e1b5..ce491bf 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -1,5 +1,3 @@ -from __future__ import division - __copyright__ = "Copyright (C) 2013 Andreas Kloeckner \ Copyright (C) 2018 Hao Gao" @@ -58,8 +56,8 @@ Distributed Algorithm Overview For step 5-7, see :ref:`distributed-fmm-evaluation`. Note that step 4-7 may be repeated multiple times with the same tree and traversal -object built from step 1-3. For example, when solving a PDE, step 4-7 is executed -for each GMRES iteration. +object built from step 1-3. For example, when iteratively solving a PDE, step 4-7 is +executed for each iteration of the linear solver. The next sections will cover the interfaces of these steps. @@ -102,16 +100,18 @@ of :class:`boxtree.distributed.calculation.DistributedExpansionWrangler` in from mpi4py import MPI import numpy as np +from enum import IntEnum +import warnings from boxtree.cost import FMMCostModel __all__ = ["DistributedFMMRunner"] -MPITags = dict( - DIST_WEIGHT=1, - GATHER_POTENTIALS=2, - REDUCE_POTENTIALS=3, - REDUCE_INDICES=4 -) + +class MPITags(IntEnum): + DIST_WEIGHT = 1 + GATHER_POTENTIALS = 2 + REDUCE_POTENTIALS = 3 + REDUCE_INDICES = 4 def dtype_to_mpi(dtype): @@ -128,19 +128,17 @@ def dtype_to_mpi(dtype): return mpi_type -class DistributedFMMRunner(object): +class DistributedFMMRunner: """Helper class for setting up and running distributed point FMM. + .. automethod:: __init__ .. automethod:: drive_dfmm """ def __init__(self, queue, global_tree_dev, traversal_builder, wrangler_factory, calibration_params=None, comm=MPI.COMM_WORLD): - """Constructor of the ``DistributedFMMRunner`` class. - - This constructor distributes the global tree from the root rank to each - worker rank. + """Distributes the global tree from the root rank to each worker rank. :arg global_tree_dev: a :class:`boxtree.Tree` object in device memory. :arg traversal_builder: an object which, when called, takes a @@ -155,7 +153,6 @@ class DistributedFMMRunner(object): each box, which is used for improving load balancing. :arg comm: MPI communicator. """ - self.comm = comm mpi_rank = comm.Get_rank() @@ -183,6 +180,8 @@ class DistributedFMMRunner(object): # Use default calibration parameters if not supplied # TODO: should replace the default calibration params with a more # accurate one + warnings.warn("Calibration parameters for the cost model are not " + "supplied. The default one will be used.") calibration_params = \ FMMCostModel.get_unit_calibration_params() diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index d4625b8..64935b6 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -74,14 +74,13 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): if irank != 0: distribute_weight_req.append(self.comm.isend( local_src_weight_vecs[irank], dest=irank, - tag=MPITags["DIST_WEIGHT"] + tag=MPITags.DIST_WEIGHT )) MPI.Request.Waitall(distribute_weight_req) local_src_weight_vecs = local_src_weight_vecs[0] else: - local_src_weight_vecs = self.comm.recv( - source=0, tag=MPITags["DIST_WEIGHT"]) + local_src_weight_vecs = self.comm.recv(source=0, tag=MPITags.DIST_WEIGHT) return local_src_weight_vecs @@ -92,6 +91,8 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): from boxtree.distributed import dtype_to_mpi potentials_mpi_type = dtype_to_mpi(potentials.dtype) + gathered_potentials = None + if mpi_rank == 0: # The root rank received calculated potentials from all worker ranks potentials_all_ranks = np.empty((mpi_size,), dtype=object) @@ -106,24 +107,25 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): recv_reqs.append( self.comm.Irecv( [potentials_all_ranks[irank], potentials_mpi_type], - source=irank, tag=MPITags["GATHER_POTENTIALS"])) + source=irank, tag=MPITags.GATHER_POTENTIALS)) MPI.Request.Waitall(recv_reqs) # Assemble potentials from worker ranks together on the root rank - potentials = np.empty( + gathered_potentials = np.empty( self.global_traversal.tree.ntargets, dtype=potentials.dtype) for irank in range(mpi_size): - potentials[tgt_idx_all_ranks[irank]] = potentials_all_ranks[irank] + gathered_potentials[tgt_idx_all_ranks[irank]] = ( + potentials_all_ranks[irank]) else: # Worker ranks send calculated potentials to the root rank self.comm.Send([potentials, potentials_mpi_type], - dest=0, tag=MPITags["GATHER_POTENTIALS"]) + dest=0, tag=MPITags.GATHER_POTENTIALS) - return potentials + return gathered_potentials - def slice_mpoles(self, mpoles, slice_indices): + def _slice_mpoles(self, mpoles, slice_indices): if len(slice_indices) == 0: return np.empty((0,), dtype=mpoles.dtype) @@ -144,7 +146,7 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): return np.concatenate(mpoles_list) - def update_mpoles(self, mpoles, mpole_updates, slice_indices): + def _update_mpoles(self, mpoles, mpole_updates, slice_indices): if len(slice_indices) == 0: return @@ -237,13 +239,11 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): return box_in_subrange def communicate_mpoles(self, mpole_exps, return_stats=False): - """Based on Algorithm 3: Reduce and Scatter in [1]_. + """Based on Algorithm 3: Reduce and Scatter in Lashuk et al. [1]_. - The main idea is to mimic a allreduce as done on a hypercube network, but to + The main idea is to mimic an allreduce as done on a hypercube network, but to decrease the bandwidth cost by sending only information that is relevant to - the processes receiving the message. - - .. rubric:: Footnotes + the rank receiving the message. .. [1] Lashuk, Ilya, Aparna Chandramowlishwaran, Harper Langston, Tuan-Anh Nguyen, Rahul Sampath, Aashay Shringarpure, Richard Vuduc, @@ -342,32 +342,32 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): relevant_boxes_list, dtype=tree.box_id_dtype ) - relevant_mpole_exps = self.slice_mpoles( + relevant_mpole_exps = self._slice_mpoles( mpole_exps, relevant_boxes_list) # Send the box subset to the other processors. for sink in comm_pattern.sinks(): req = self.comm.Isend(relevant_mpole_exps, dest=sink, - tag=MPITags["REDUCE_POTENTIALS"]) + tag=MPITags.REDUCE_POTENTIALS) send_requests.append(req) req = self.comm.Isend(relevant_boxes_list, dest=sink, - tag=MPITags["REDUCE_INDICES"]) + tag=MPITags.REDUCE_INDICES) send_requests.append(req) # Receive data from other processors. for source in comm_pattern.sources(): self.comm.Recv(mpole_exps_buf, source=source, - tag=MPITags["REDUCE_POTENTIALS"]) + tag=MPITags.REDUCE_POTENTIALS) status = MPI.Status() self.comm.Recv( - boxes_list_buf, source=source, tag=MPITags["REDUCE_INDICES"], + boxes_list_buf, source=source, tag=MPITags.REDUCE_INDICES, status=status) nboxes = status.Get_count() // boxes_list_buf.dtype.itemsize # Update data structures. - self.update_mpoles( + self._update_mpoles( mpole_exps, mpole_exps_buf, boxes_list_buf[:nboxes]) contributing_boxes[boxes_list_buf[:nboxes]] = 1 diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py index cc7c7dc..1fb461a 100644 --- a/boxtree/distributed/local_traversal.py +++ b/boxtree/distributed/local_traversal.py @@ -1,5 +1,3 @@ -from __future__ import division - __copyright__ = "Copyright (C) 2013 Andreas Kloeckner \ Copyright (C) 2018 Hao Gao" diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index eeae04a..c699244 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -1,5 +1,3 @@ -from __future__ import division - __copyright__ = "Copyright (C) 2013 Andreas Kloeckner \ Copyright (C) 2018 Hao Gao" diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 0d1ea62..18cf663 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -1,5 +1,3 @@ -from __future__ import division - __copyright__ = "Copyright (C) 2012 Andreas Kloeckner \ Copyright (C) 2018 Hao Gao" @@ -53,7 +51,7 @@ def partition_work(boxes_time, traversal, comm): # transform tree from the level order to the morton dfs order # dfs_order[i] stores the level-order box index of dfs index i - # TODO: optimize the performance with OpenCL + # FIXME: optimize the performance with OpenCL dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) idx = 0 stack = [0] @@ -71,6 +69,10 @@ def partition_work(boxes_time, traversal, comm): responsible_boxes_segments = None responsible_boxes_current_rank = np.empty(2, dtype=tree.box_id_dtype) + # FIXME: Right now, the responsible boxes assigned to all ranks are computed + # centrally on the root rank to avoid inconsistency risks of floating point + # operations. We could improve the efficiency by letting each rank compute the + # costs of a subset of boxes, and use MPI_Scan to aggregate the results. if mpi_rank == 0: total_workload = 0 for box_idx in range(tree.nboxes): diff --git a/boxtree/fmm.py b/boxtree/fmm.py index b9f0a9e..6fbccaa 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -255,15 +255,17 @@ class ExpansionWranglerInterface(ABC): This method needs to be called collectively by all ranks in the communicator. - :arg src_weight_vecs: a sequence of :class:`numpy.ndarray` of length + :arg src_weight_vecs: a sequence of :class:`numpy.ndarray`, each with length ``nsources``, representing the weights of sources on the root rank. - ``None`` on worker ranks. - :arg src_idx_all_ranks: a :class:`list` of length ``nranks``, where the - i-th entry is a :class:`numpy.ndarray` indexed into *source_weights* to - be sent from the root rank to rank *i*. Each entry can be generated by - *generate_local_tree*. ``None`` on worker ranks. - - :return: Received source weights of the current rank. + *None* on worker ranks. + :arg src_idx_all_ranks: a :class:`list` of length ``nranks``, including the + root rank, where the i-th entry is a :class:`numpy.ndarray` of indices, + of which *source_weights* to be sent from the root rank to rank *i*. Each + entry can be generated by :func:`.generate_local_tree`. *None* on worker + ranks. + + :return: Received source weights of the current rank, including the root + rank. """ return src_weight_vecs @@ -274,13 +276,13 @@ class ExpansionWranglerInterface(ABC): This method needs to be called collectively by all ranks in the communicator. :arg potentials: Calculated potentials on each rank. This argument is - significant on all ranks. + significant on all ranks, including the root rank. :arg tgt_idx_all_ranks: a :class:`list` of length ``nranks``, where the i-th entry is a :class:`numpy.ndarray` of the global potential indices of potentials from rank *i*. This argument is only significant on the root rank. - :return: Gathered potentials on the root rank. + :return: Gathered potentials on the root rank. *None* on worker ranks. """ return potentials @@ -288,8 +290,15 @@ class ExpansionWranglerInterface(ABC): """Used by the distributed implementation for forming the complete multipole expansions from the partial multipole expansions. - This function needs to be called collectively by all processes in the + This function accepts partial multipole expansions in the argument + *mpole_exps*, and modifies *mpole_exps* in place with the communicated and + reduced multipole expansions. + + This function needs to be called collectively by all ranks in the communicator. + + :returns: Statistics of the communication if *return_stats* is True. *None* + otherwise. """ pass diff --git a/boxtree/tools.py b/boxtree/tools.py index 00d3678..c6b52ff 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -639,7 +639,7 @@ void generate(LIST_ARG_DECL USER_ARG_DECL index_type i) """ -class MaskCompressorKernel(object): +class MaskCompressorKernel: """ .. automethod:: __call__ """ @@ -717,15 +717,20 @@ class MaskCompressorKernel(object): # {{{ Communication pattern for partial multipole expansions -class AllReduceCommPattern(object): +class AllReduceCommPattern: """Describes a tree-like communication pattern for exchanging and reducing multipole expansions. Supports an arbitrary number of processes. - Communication of multipoles will be break down into stages. At each stage, - :meth:`sources` and :meth:`sinks` obtain the lists of ranks for receiving and - sending multipoles. :meth:`messages` can be used for determining boxes whose - multipole expansions need to be sent during the current stage. Use - :meth:`advance` to advance to the next stage. + A user must instantiate a version of this with identical *size* and varying + *rank* on each rank. During each stage, each rank sends its contribution to + the reduction results on ranks returned by :meth:`sinks` and listens for + contributions from :meth:`source`. :meth:`messages` can be used for determining + array indices whose partial results need to be sent during the current stage. + Then, all ranks call :meth:`advance` and use :meth:`done` to check whether the + communication is complete. In the use case of multipole communication, the + reduction result is a vector of multipole expansions to which all ranks add + contribution. These contributions are communicated sparsely via arrays of box + indices and expansions. .. automethod:: __init__ .. automethod:: sources @@ -737,8 +742,8 @@ class AllReduceCommPattern(object): def __init__(self, rank, size): """ - :arg rank: My rank - :arg size: Total number of processors + :arg rank: Current rank. + :arg size: Total number of ranks. """ assert 0 <= rank < size self.rank = rank @@ -747,31 +752,31 @@ class AllReduceCommPattern(object): self.midpoint = size // 2 def sources(self): - """Return the set of source nodes at this communication stage. The current - process receives messages from these processes. + """Return the set of source nodes at the current communication stage. The + current rank receives messages from these ranks. """ if self.rank < self.midpoint: partner = self.midpoint + (self.rank - self.left) if self.rank == self.midpoint - 1 and partner == self.right: partners = set() elif self.rank == self.midpoint - 1 and partner == self.right - 2: - partners = set([partner, partner + 1]) + partners = {partner, partner + 1} else: - partners = set([partner]) + partners = {partner} else: partner = self.left + (self.rank - self.midpoint) if self.rank == self.right - 1 and partner == self.midpoint: partners = set() elif self.rank == self.right - 1 and partner == self.midpoint - 2: - partners = set([partner, partner + 1]) + partners = {partner, partner + 1} else: - partners = set([partner]) + partners = {partner} return partners def sinks(self): - """Return the set of sink nodes at this communication stage. The current process - sends a message to these processes. + """Return the set of sink nodes at this communication stage. The current rank + sends a message to these ranks. """ if self.rank < self.midpoint: partner = self.midpoint + (self.rank - self.left) @@ -782,12 +787,12 @@ class AllReduceCommPattern(object): if partner == self.midpoint: partner -= 1 - return set([partner]) + return {partner} def messages(self): - """Return a range of ranks, such that the multipole expansions used by - responsible boxes of these ranks are sent to the sinks. This is returned as - a [start, end) pair. By design, it is a consecutive range. + """Return a range of ranks, such that the partial results of array indices + used by these ranks are sent to the sinks. This is returned as a + [start, end) pair. By design, it is a consecutive range. """ if self.rank < self.midpoint: return (self.midpoint, self.right) @@ -808,7 +813,7 @@ class AllReduceCommPattern(object): self.midpoint = (self.midpoint + self.right) // 2 def done(self): - """Return whether this process is finished communicating. + """Return whether the current rank is finished communicating. """ return self.left + 1 == self.right diff --git a/doc/distributed.rst b/doc/distributed.rst index a9d90cb..91fbe7c 100644 --- a/doc/distributed.rst +++ b/doc/distributed.rst @@ -1,4 +1,4 @@ Distributed Computation ======================= -.. automodule:: boxtree.distributed.__init__ +.. automodule:: boxtree.distributed -- GitLab From 007abcbc3dee324ec391f5b8f241aff3b6fd1950 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 13 Dec 2021 22:56:05 -0800 Subject: [PATCH 252/260] Use code container to control cache lifetime --- boxtree/distributed/local_tree.py | 241 ++++++++++++++---------------- boxtree/distributed/partition.py | 92 ++++++------ boxtree/fmm.py | 6 +- 3 files changed, 167 insertions(+), 172 deletions(-) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index c699244..df47567 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -28,117 +28,125 @@ from boxtree import Tree from boxtree.tools import ImmutableHostDeviceArray import time import numpy as np -from pytools import memoize +from pytools import memoize_method import logging logger = logging.getLogger(__name__) -@memoize -def particle_mask_kernel(context, particle_id_dtype): - return cl.elementwise.ElementwiseKernel( - context, - arguments=Template(""" - __global char *responsible_boxes, - __global ${particle_id_t} *box_particle_starts, - __global ${particle_id_t} *box_particle_counts_nonchild, - __global ${particle_id_t} *particle_mask - """, strict_undefined=True).render( - particle_id_t=dtype_to_ctype(particle_id_dtype) - ), - operation=Template(""" - if(responsible_boxes[i]) { - for(${particle_id_t} pid = box_particle_starts[i]; - pid < box_particle_starts[i] + box_particle_counts_nonchild[i]; - ++pid) { - particle_mask[pid] = 1; - } - } - """).render(particle_id_t=dtype_to_ctype(particle_id_dtype)) - ) - - -@memoize -def mask_scan_kernel(context, particle_id_dtype): - from pyopencl.scan import GenericScanKernel - return GenericScanKernel( - context, particle_id_dtype, - arguments=Template(""" - __global ${mask_t} *ary, - __global ${mask_t} *scan +class FetchLocalParticlesCodeContainer: + """Objects of this type serve as a place to keep the code needed for + :func:`fetch_local_particles`. + """ + def __init__(self, cl_context, dimensions, particle_id_dtype, coord_dtype, + sources_have_extent, targets_have_extent): + self.cl_context = cl_context + self.dimensions = dimensions + self.particle_id_dtype = particle_id_dtype + self.coord_dtype = coord_dtype + self.sources_have_extent = sources_have_extent + self.targets_have_extent = targets_have_extent + + @memoize_method + def particle_mask_kernel(self): + return cl.elementwise.ElementwiseKernel( + self.cl_context, + arguments=Template(""" + __global char *responsible_boxes, + __global ${particle_id_t} *box_particle_starts, + __global ${particle_id_t} *box_particle_counts_nonchild, + __global ${particle_id_t} *particle_mask """, strict_undefined=True).render( - mask_t=dtype_to_ctype(particle_id_dtype) - ), - input_expr="ary[i]", - scan_expr="a+b", neutral="0", - output_statement="scan[i + 1] = item;" - ) + particle_id_t=dtype_to_ctype(self.particle_id_dtype) + ), + operation=Template(""" + if(responsible_boxes[i]) { + for(${particle_id_t} pid = box_particle_starts[i]; + pid < box_particle_starts[i] + + box_particle_counts_nonchild[i]; + ++pid) { + particle_mask[pid] = 1; + } + } + """).render(particle_id_t=dtype_to_ctype(self.particle_id_dtype)) + ) + @memoize_method + def mask_scan_kernel(self): + from pyopencl.scan import GenericScanKernel + return GenericScanKernel( + self.cl_context, self.particle_id_dtype, + arguments=Template(""" + __global ${mask_t} *ary, + __global ${mask_t} *scan + """, strict_undefined=True).render( + mask_t=dtype_to_ctype(self.particle_id_dtype) + ), + input_expr="ary[i]", + scan_expr="a+b", neutral="0", + output_statement="scan[i + 1] = item;" + ) -fetch_local_paticles_arguments = Template(""" - __global const ${mask_t} *particle_mask, - __global const ${mask_t} *particle_scan - % for dim in range(ndims): - , __global const ${coord_t} *particles_${dim} - % endfor - % for dim in range(ndims): - , __global ${coord_t} *local_particles_${dim} - % endfor - % if particles_have_extent: - , __global const ${coord_t} *particle_radii - , __global ${coord_t} *local_particle_radii - % endif -""", strict_undefined=True) - -fetch_local_particles_prg = Template(""" - if(particle_mask[i]) { - ${particle_id_t} des = particle_scan[i]; + fetch_local_paticles_arguments = Template(""" + __global const ${mask_t} *particle_mask, + __global const ${mask_t} *particle_scan % for dim in range(ndims): - local_particles_${dim}[des] = particles_${dim}[i]; + , __global const ${coord_t} *particles_${dim} + % endfor + % for dim in range(ndims): + , __global ${coord_t} *local_particles_${dim} % endfor % if particles_have_extent: - local_particle_radii[des] = particle_radii[i]; + , __global const ${coord_t} *particle_radii + , __global ${coord_t} *local_particle_radii % endif - } -""", strict_undefined=True) - - -@memoize -def fetch_local_sources_kernel( - context, particle_id_dtype, coord_dtype, dimensions, sources_have_extent): - return cl.elementwise.ElementwiseKernel( - context, - fetch_local_paticles_arguments.render( - mask_t=dtype_to_ctype(particle_id_dtype), - coord_t=dtype_to_ctype(coord_dtype), - ndims=dimensions, - particles_have_extent=sources_have_extent - ), - fetch_local_particles_prg.render( - particle_id_t=dtype_to_ctype(particle_id_dtype), - ndims=dimensions, - particles_have_extent=sources_have_extent + """, strict_undefined=True) + + fetch_local_particles_prg = Template(""" + if(particle_mask[i]) { + ${particle_id_t} des = particle_scan[i]; + % for dim in range(ndims): + local_particles_${dim}[des] = particles_${dim}[i]; + % endfor + % if particles_have_extent: + local_particle_radii[des] = particle_radii[i]; + % endif + } + """, strict_undefined=True) + + @memoize_method + def fetch_local_sources_kernel(self): + return cl.elementwise.ElementwiseKernel( + self.cl_context, + self.fetch_local_paticles_arguments.render( + mask_t=dtype_to_ctype(self.particle_id_dtype), + coord_t=dtype_to_ctype(self.coord_dtype), + ndims=self.dimensions, + particles_have_extent=self.sources_have_extent + ), + self.fetch_local_particles_prg.render( + particle_id_t=dtype_to_ctype(self.particle_id_dtype), + ndims=self.dimensions, + particles_have_extent=self.sources_have_extent + ) ) - ) - -@memoize -def fetch_local_targets_kernel( - context, particle_id_dtype, coord_dtype, dimensions, targets_have_extent): - return cl.elementwise.ElementwiseKernel( - context, - fetch_local_paticles_arguments.render( - mask_t=dtype_to_ctype(particle_id_dtype), - coord_t=dtype_to_ctype(coord_dtype), - ndims=dimensions, - particles_have_extent=targets_have_extent - ), - fetch_local_particles_prg.render( - particle_id_t=dtype_to_ctype(particle_id_dtype), - ndims=dimensions, - particles_have_extent=targets_have_extent + @memoize_method + def fetch_local_targets_kernel(self): + return cl.elementwise.ElementwiseKernel( + self.cl_context, + self.fetch_local_paticles_arguments.render( + mask_t=dtype_to_ctype(self.particle_id_dtype), + coord_t=dtype_to_ctype(self.coord_dtype), + ndims=self.dimensions, + particles_have_extent=self.targets_have_extent + ), + self.fetch_local_particles_prg.render( + particle_id_t=dtype_to_ctype(self.particle_id_dtype), + ndims=self.dimensions, + particles_have_extent=self.targets_have_extent + ) ) - ) def fetch_local_particles( @@ -153,6 +161,11 @@ def fetch_local_particles( These generated fields are stored directly into *local_tree*. """ + code = FetchLocalParticlesCodeContainer( + queue.context, global_tree.dimensions, + global_tree.particle_id_dtype, global_tree.coord_dtype, + global_tree.sources_have_extent, global_tree.targets_have_extent) + global_tree_dev = global_tree.to_device(queue).with_queue(queue) nsources = global_tree.nsources @@ -163,7 +176,7 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype ) - particle_mask_kernel(queue.context, global_tree.particle_id_dtype)( + code.particle_mask_kernel()( src_box_mask, global_tree_dev.box_source_starts, global_tree_dev.box_source_counts_nonchild, @@ -180,9 +193,7 @@ def fetch_local_particles( ) src_particle_scan[0] = 0 - mask_scan_kernel(queue.context, global_tree.particle_id_dtype)( - src_particle_mask, src_particle_scan - ) + code.mask_scan_kernel()(src_particle_mask, src_particle_scan) # }}} @@ -202,13 +213,7 @@ def fetch_local_particles( assert global_tree.sources_have_extent is False - fetch_local_sources_kernel( - queue.context, - global_tree.particle_id_dtype, - global_tree.coord_dtype, - global_tree.dimensions, - global_tree.sources_have_extent - )( + code.fetch_local_sources_kernel()( src_particle_mask, src_particle_scan, *global_tree_dev.sources.tolist(), *local_sources_list @@ -254,7 +259,7 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype ) - particle_mask_kernel(queue.context, global_tree.particle_id_dtype)( + code.particle_mask_kernel()( tgt_box_mask, global_tree_dev.box_target_starts, global_tree_dev.box_target_counts_nonchild, @@ -271,9 +276,7 @@ def fetch_local_particles( ) tgt_particle_scan[0] = 0 - mask_scan_kernel(queue.context, global_tree.particle_id_dtype)( - tgt_particle_mask, tgt_particle_scan - ) + code.mask_scan_kernel()(tgt_particle_mask, tgt_particle_scan) # }}} @@ -297,13 +300,7 @@ def fetch_local_particles( dtype=global_tree.coord_dtype ) - fetch_local_targets_kernel( - queue.context, - global_tree.particle_id_dtype, - global_tree.coord_dtype, - global_tree.dimensions, - True - )( + code.fetch_local_targets_kernel()( tgt_particle_mask, tgt_particle_scan, *global_tree_dev.targets.tolist(), *local_targets_list, @@ -311,13 +308,7 @@ def fetch_local_particles( local_target_radii ) else: - fetch_local_targets_kernel( - queue.context, - global_tree.particle_id_dtype, - global_tree.coord_dtype, - global_tree.dimensions, - False - )( + code.fetch_local_targets_kernel()( tgt_particle_mask, tgt_particle_scan, *global_tree_dev.targets.tolist(), *local_targets_list diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 18cf663..3164565 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -25,7 +25,7 @@ import numpy as np import pyopencl as cl from pyopencl.tools import dtype_to_ctype from mako.template import Template -from pytools import memoize +from pytools import memoize_method def partition_work(boxes_time, traversal, comm): @@ -102,7 +102,6 @@ def partition_work(boxes_time, traversal, comm): responsible_boxes_current_rank[0]:responsible_boxes_current_rank[1]] -@memoize def mark_parent_kernel(context, box_id_dtype): return cl.elementwise.ElementwiseKernel( context, @@ -112,35 +111,6 @@ def mark_parent_kernel(context, box_id_dtype): ) -# helper kernel for adding boxes from interaction list 1 and 4 -@memoize -def add_interaction_list_boxes_kernel(context, box_id_dtype): - return cl.elementwise.ElementwiseKernel( - context, - Template(""" - __global ${box_id_t} *box_list, - __global char *responsible_boxes_mask, - __global ${box_id_t} *interaction_boxes_starts, - __global ${box_id_t} *interaction_boxes_lists, - __global char *src_boxes_mask - """, strict_undefined=True).render( - box_id_t=dtype_to_ctype(box_id_dtype) - ), - Template(r""" - typedef ${box_id_t} box_id_t; - box_id_t current_box = box_list[i]; - if(responsible_boxes_mask[current_box]) { - for(box_id_t box_idx = interaction_boxes_starts[i]; - box_idx < interaction_boxes_starts[i + 1]; - ++box_idx) - src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; - } - """, strict_undefined=True).render( - box_id_t=dtype_to_ctype(box_id_dtype) - ), - ) - - def get_ancestor_boxes_mask(queue, traversal, responsible_boxes_mask): """Query the ancestors of responsible boxes. @@ -167,8 +137,42 @@ def get_ancestor_boxes_mask(queue, traversal, responsible_boxes_mask): return ancestor_boxes +class GetBoxesMaskCodeContainer: + def __init__(self, cl_context, box_id_dtype): + self.cl_context = cl_context + self.box_id_dtype = box_id_dtype + + # helper kernel for adding boxes from interaction list 1 and 4 + @memoize_method + def add_interaction_list_boxes_kernel(self): + return cl.elementwise.ElementwiseKernel( + self.cl_context, + Template(""" + __global ${box_id_t} *box_list, + __global char *responsible_boxes_mask, + __global ${box_id_t} *interaction_boxes_starts, + __global ${box_id_t} *interaction_boxes_lists, + __global char *src_boxes_mask + """, strict_undefined=True).render( + box_id_t=dtype_to_ctype(self.box_id_dtype) + ), + Template(r""" + typedef ${box_id_t} box_id_t; + box_id_t current_box = box_list[i]; + if(responsible_boxes_mask[current_box]) { + for(box_id_t box_idx = interaction_boxes_starts[i]; + box_idx < interaction_boxes_starts[i + 1]; + ++box_idx) + src_boxes_mask[interaction_boxes_lists[box_idx]] = 1; + } + """, strict_undefined=True).render( + box_id_t=dtype_to_ctype(self.box_id_dtype) + ), + ) + + def get_src_boxes_mask( - queue, traversal, responsible_boxes_mask, ancestor_boxes_mask): + queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask): """Query the boxes whose sources are needed in order to evaluate potentials of boxes represented by *responsible_boxes_mask*. @@ -181,10 +185,11 @@ def get_src_boxes_mask( i-th entry is 1 if souces of box ``i`` are needed for evaluating the potentials of targets in boxes represented by *responsible_boxes_mask*. """ + src_boxes_mask = responsible_boxes_mask.copy() # Add list 1 of responsible boxes - add_interaction_list_boxes_kernel(queue.context, traversal.tree.box_id_dtype)( + code.add_interaction_list_boxes_kernel()( traversal.target_boxes, responsible_boxes_mask, traversal.neighbor_source_boxes_starts, traversal.neighbor_source_boxes_lists, src_boxes_mask, @@ -193,7 +198,7 @@ def get_src_boxes_mask( ) # Add list 4 of responsible boxes or ancestor boxes - add_interaction_list_boxes_kernel(queue.context, traversal.tree.box_id_dtype)( + code.add_interaction_list_boxes_kernel()( traversal.target_or_target_parent_boxes, responsible_boxes_mask | ancestor_boxes_mask, traversal.from_sep_bigger_starts, traversal.from_sep_bigger_lists, @@ -205,8 +210,7 @@ def get_src_boxes_mask( if traversal.tree.targets_have_extent: # Add list 3 close of responsible boxes if traversal.from_sep_close_smaller_starts is not None: - add_interaction_list_boxes_kernel( - queue.context, traversal.tree.box_id_dtype)( + code.add_interaction_list_boxes_kernel()( traversal.target_boxes, responsible_boxes_mask, traversal.from_sep_close_smaller_starts, @@ -217,8 +221,7 @@ def get_src_boxes_mask( # Add list 4 close of responsible boxes if traversal.from_sep_close_bigger_starts is not None: - add_interaction_list_boxes_kernel( - queue.context, traversal.tree.box_id_dtype)( + code.add_interaction_list_boxes_kernel()( traversal.target_boxes, responsible_boxes_mask | ancestor_boxes_mask, traversal.from_sep_close_bigger_starts, @@ -231,7 +234,7 @@ def get_src_boxes_mask( def get_multipole_boxes_mask( - queue, traversal, responsible_boxes_mask, ancestor_boxes_mask): + queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask): """Query the boxes whose multipoles are used in order to evaluate potentials of targets in boxes represented by *responsible_boxes_mask*. @@ -251,7 +254,7 @@ def get_multipole_boxes_mask( # A mpole is used by process p if it is in the List 2 of either a box # owned by p or one of its ancestors. - add_interaction_list_boxes_kernel(queue.context, traversal.tree.box_id_dtype)( + code.add_interaction_list_boxes_kernel()( traversal.target_or_target_parent_boxes, responsible_boxes_mask | ancestor_boxes_mask, traversal.from_sep_siblings_starts, @@ -263,8 +266,7 @@ def get_multipole_boxes_mask( # A mpole is used by process p if it is in the List 3 of a box owned by p. for ilevel in range(traversal.tree.nlevels): - add_interaction_list_boxes_kernel( - queue.context, traversal.tree.box_id_dtype)( + code.add_interaction_list_boxes_kernel()( traversal.target_boxes_sep_smaller_by_source_level[ilevel], responsible_boxes_mask, traversal.from_sep_smaller_by_level[ilevel].starts, @@ -295,6 +297,8 @@ def get_boxes_mask(queue, traversal, responsible_boxes_list): :returns: responsible_box_mask, ancestor_boxes_mask, src_boxes_mask and multipole_boxes_mask, as described above. """ + code = GetBoxesMaskCodeContainer(queue.context, traversal.tree.box_id_dtype) + traversal = traversal.to_device(queue) responsible_boxes_mask = np.zeros((traversal.tree.nboxes,), dtype=np.int8) @@ -306,11 +310,11 @@ def get_boxes_mask(queue, traversal, responsible_boxes_list): ) src_boxes_mask = get_src_boxes_mask( - queue, traversal, responsible_boxes_mask, ancestor_boxes_mask + queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask ) multipole_boxes_mask = get_multipole_boxes_mask( - queue, traversal, responsible_boxes_mask, ancestor_boxes_mask + queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask ) return (responsible_boxes_mask, ancestor_boxes_mask, src_boxes_mask, diff --git a/boxtree/fmm.py b/boxtree/fmm.py index 6fbccaa..b66ee56 100644 --- a/boxtree/fmm.py +++ b/boxtree/fmm.py @@ -260,9 +260,9 @@ class ExpansionWranglerInterface(ABC): *None* on worker ranks. :arg src_idx_all_ranks: a :class:`list` of length ``nranks``, including the root rank, where the i-th entry is a :class:`numpy.ndarray` of indices, - of which *source_weights* to be sent from the root rank to rank *i*. Each - entry can be generated by :func:`.generate_local_tree`. *None* on worker - ranks. + of which *src_weight_vecs* to be sent from the root rank to rank *i*. + Each entry can be generated by :func:`.generate_local_tree`. *None* on + worker ranks. :return: Received source weights of the current rank, including the root rank. -- GitLab From 06539fada401bb6be515695b71f227d38be695ac Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Mon, 24 Jan 2022 23:30:46 -0800 Subject: [PATCH 253/260] Address reviewer's suggestions on distributed partitioning --- boxtree/distributed/__init__.py | 4 +- boxtree/distributed/local_tree.py | 30 +++-- boxtree/distributed/partition.py | 202 +++++++++++++++++------------- 3 files changed, 132 insertions(+), 104 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index ce491bf..6e44d2f 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -68,7 +68,9 @@ Partition Boxes .. autofunction:: boxtree.distributed.partition.partition_work -.. autofunction:: boxtree.distributed.partition.get_boxes_mask +.. autoclass:: boxtree.distributed.partition.BoxMasks + +.. autofunction:: boxtree.distributed.partition.get_box_masks .. _construct-local-tree-traversal: diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index df47567..a560941 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -440,13 +440,12 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): start_time = time.time() - from boxtree.distributed.partition import get_boxes_mask - (responsible_boxes_mask, ancestor_boxes, src_boxes_mask, box_mpole_is_used) = \ - get_boxes_mask(queue, global_traversal, responsible_boxes_list) + from boxtree.distributed.partition import get_box_masks + box_masks = get_box_masks(queue, global_traversal, responsible_boxes_list) local_tree = global_tree.copy( responsible_boxes_list=responsible_boxes_list, - ancestor_mask=ancestor_boxes.get(), + ancestor_mask=box_masks.ancestor_boxes.get(), box_to_user_starts=None, box_to_user_lists=None, _dimensions=None, @@ -460,8 +459,8 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): local_tree, src_idx, tgt_idx = fetch_local_particles( queue, global_tree, - src_boxes_mask, - responsible_boxes_mask, + box_masks.point_src_boxes, + box_masks.responsible_boxes, local_tree ) @@ -473,29 +472,28 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): # {{{ compute the users of multipole expansions of each box on the root rank - box_mpole_is_used_all_ranks = None + multipole_src_boxes_all_ranks = None if mpi_rank == 0: - box_mpole_is_used_all_ranks = np.empty( - (mpi_size, global_tree.nboxes), dtype=box_mpole_is_used.dtype - ) - comm.Gather(box_mpole_is_used.get(), box_mpole_is_used_all_ranks, root=0) + multipole_src_boxes_all_ranks = np.empty( + (mpi_size, global_tree.nboxes), + dtype=box_masks.multipole_src_boxes.dtype) + comm.Gather( + box_masks.multipole_src_boxes.get(), multipole_src_boxes_all_ranks, root=0) box_to_user_starts = None box_to_user_lists = None if mpi_rank == 0: - box_mpole_is_used_all_ranks = cl.array.to_device( - queue, box_mpole_is_used_all_ranks - ) + multipole_src_boxes_all_ranks = cl.array.to_device( + queue, multipole_src_boxes_all_ranks) from boxtree.tools import MaskCompressorKernel matcompr = MaskCompressorKernel(queue.context) (box_to_user_starts, box_to_user_lists, evt) = \ - matcompr(queue, box_mpole_is_used_all_ranks.transpose(), + matcompr(queue, multipole_src_boxes_all_ranks.transpose(), list_dtype=np.int32) cl.wait_for_events([evt]) - del box_mpole_is_used box_to_user_starts = box_to_user_starts.get() box_to_user_lists = box_to_user_lists.get() diff --git a/boxtree/distributed/partition.py b/boxtree/distributed/partition.py index 3164565..5f8d43d 100644 --- a/boxtree/distributed/partition.py +++ b/boxtree/distributed/partition.py @@ -26,15 +26,38 @@ import pyopencl as cl from pyopencl.tools import dtype_to_ctype from mako.template import Template from pytools import memoize_method +from dataclasses import dataclass -def partition_work(boxes_time, traversal, comm): +def get_box_ids_dfs_order(tree): + """Helper function for getting box ids of a tree in depth-first order. + + :arg tree: A :class:`boxtree.Tree` object in the host memory. See + :meth:`boxtree.Tree.get` for getting a tree object in host memory. + :return: A numpy array of box ids in depth-first order. + """ + # FIXME: optimize the performance with OpenCL + dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) + idx = 0 + stack = [0] + while stack: + box_id = stack.pop() + dfs_order[idx] = box_id + idx += 1 + for i in range(2**tree.dimensions): + child_box_id = tree.box_child_ids[i][box_id] + if child_box_id > 0: + stack.append(child_box_id) + return dfs_order + + +def partition_work(cost_per_box, traversal, comm): """This function assigns responsible boxes for each rank. If a rank is responsible for a box, it will calculate the multiple expansion of the box and evaluate target potentials in the box. - :arg boxes_time: The expected running time of each box. This argument is only + :arg cost_per_box: The expected running time of each box. This argument is only significant on the root rank. :arg traversal: The global traversal object containing all particles. This argument is significant on all ranks. @@ -51,22 +74,12 @@ def partition_work(boxes_time, traversal, comm): # transform tree from the level order to the morton dfs order # dfs_order[i] stores the level-order box index of dfs index i - # FIXME: optimize the performance with OpenCL - dfs_order = np.empty((tree.nboxes,), dtype=tree.box_id_dtype) - idx = 0 - stack = [0] - while len(stack) > 0: - box_id = stack.pop() - dfs_order[idx] = box_id - idx += 1 - for i in range(2**tree.dimensions): - child_box_id = tree.box_child_ids[i][box_id] - if child_box_id > 0: - stack.append(child_box_id) + dfs_order = get_box_ids_dfs_order(tree) # partition all boxes in dfs order evenly according to workload on the root rank responsible_boxes_segments = None + # contains: [start_index, end_index) responsible_boxes_current_rank = np.empty(2, dtype=tree.box_id_dtype) # FIXME: Right now, the responsible boxes assigned to all ranks are computed @@ -74,11 +87,10 @@ def partition_work(boxes_time, traversal, comm): # operations. We could improve the efficiency by letting each rank compute the # costs of a subset of boxes, and use MPI_Scan to aggregate the results. if mpi_rank == 0: - total_workload = 0 - for box_idx in range(tree.nboxes): - total_workload += boxes_time[box_idx] + total_workload = np.sum(cost_per_box) - responsible_boxes_segments = np.empty([mpi_size, 2], dtype=tree.box_id_dtype) + # second axis: [start_index, end_index) + responsible_boxes_segments = np.empty((mpi_size, 2), dtype=tree.box_id_dtype) segment_idx = 0 start = 0 workload_count = 0 @@ -88,9 +100,10 @@ def partition_work(boxes_time, traversal, comm): break box_idx = dfs_order[box_idx_dfs_order] - workload_count += boxes_time[box_idx] + workload_count += cost_per_box[box_idx] if (workload_count > (segment_idx + 1) * total_workload / mpi_size or box_idx_dfs_order == tree.nboxes - 1): + # record "end of rank segment" responsible_boxes_segments[segment_idx, :] = ( [start, box_idx_dfs_order + 1]) start = box_idx_dfs_order + 1 @@ -102,49 +115,16 @@ def partition_work(boxes_time, traversal, comm): responsible_boxes_current_rank[0]:responsible_boxes_current_rank[1]] -def mark_parent_kernel(context, box_id_dtype): - return cl.elementwise.ElementwiseKernel( - context, - "__global char *current, __global char *parent, " - "__global %s *box_parent_ids" % dtype_to_ctype(box_id_dtype), - "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" - ) - - -def get_ancestor_boxes_mask(queue, traversal, responsible_boxes_mask): - """Query the ancestors of responsible boxes. - - :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape - ``(tree.nboxes,)`` whose i-th entry is 1 if ``i`` is a responsible box. - :return: A :class:`pyopencl.array.Array` object of shape ``(tree.nboxes,)`` whose - i-th entry is 1 if ``i`` is an ancestor of the responsible boxes specified by - *responsible_boxes_mask*. - """ - ancestor_boxes = cl.array.zeros(queue, (traversal.tree.nboxes,), dtype=np.int8) - ancestor_boxes_last = responsible_boxes_mask.copy() - - while ancestor_boxes_last.any(): - ancestor_boxes_new = cl.array.zeros( - queue, (traversal.tree.nboxes,), dtype=np.int8 - ) - mark_parent_kernel(queue.context, traversal.tree.box_id_dtype)( - ancestor_boxes_last, ancestor_boxes_new, traversal.tree.box_parent_ids - ) - ancestor_boxes_new = ancestor_boxes_new & (~ancestor_boxes) - ancestor_boxes = ancestor_boxes | ancestor_boxes_new - ancestor_boxes_last = ancestor_boxes_new - - return ancestor_boxes - - -class GetBoxesMaskCodeContainer: +class GetBoxMasksCodeContainer: def __init__(self, cl_context, box_id_dtype): self.cl_context = cl_context self.box_id_dtype = box_id_dtype - # helper kernel for adding boxes from interaction list 1 and 4 @memoize_method def add_interaction_list_boxes_kernel(self): + """Given a ``responsible_boxes_mask`` and an interaction list, mark source + boxes for target boxes in ``responsible_boxes_mask`` in a new separate mask. + """ return cl.elementwise.ElementwiseKernel( self.cl_context, Template(""" @@ -170,8 +150,41 @@ class GetBoxesMaskCodeContainer: ), ) + @memoize_method + def add_parent_boxes_kernel(self): + return cl.elementwise.ElementwiseKernel( + self.cl_context, + "__global char *current, __global char *parent, " + "__global %s *box_parent_ids" % dtype_to_ctype(self.box_id_dtype), + "if(i != 0 && current[i]) parent[box_parent_ids[i]] = 1" + ) + + +def get_ancestor_boxes_mask(queue, code, traversal, responsible_boxes_mask): + """Query the ancestors of responsible boxes. + + :arg responsible_boxes_mask: A :class:`pyopencl.array.Array` object of shape + ``(tree.nboxes,)`` whose i-th entry is 1 if ``i`` is a responsible box. + :return: A :class:`pyopencl.array.Array` object of shape ``(tree.nboxes,)`` whose + i-th entry is 1 if ``i`` is an ancestor of the responsible boxes specified by + *responsible_boxes_mask*. + """ + ancestor_boxes = cl.array.zeros(queue, (traversal.tree.nboxes,), dtype=np.int8) + ancestor_boxes_last = responsible_boxes_mask.copy() + + while ancestor_boxes_last.any(): + ancestor_boxes_new = cl.array.zeros( + queue, (traversal.tree.nboxes,), dtype=np.int8) + code.add_parent_boxes_kernel()( + ancestor_boxes_last, ancestor_boxes_new, traversal.tree.box_parent_ids) + ancestor_boxes_new = ancestor_boxes_new & (~ancestor_boxes) + ancestor_boxes = ancestor_boxes | ancestor_boxes_new + ancestor_boxes_last = ancestor_boxes_new + + return ancestor_boxes + -def get_src_boxes_mask( +def get_point_src_boxes_mask( queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask): """Query the boxes whose sources are needed in order to evaluate potentials of boxes represented by *responsible_boxes_mask*. @@ -193,9 +206,7 @@ def get_src_boxes_mask( traversal.target_boxes, responsible_boxes_mask, traversal.neighbor_source_boxes_starts, traversal.neighbor_source_boxes_lists, src_boxes_mask, - range=range(0, traversal.target_boxes.shape[0]), - queue=queue - ) + queue=queue) # Add list 4 of responsible boxes or ancestor boxes code.add_interaction_list_boxes_kernel()( @@ -203,9 +214,7 @@ def get_src_boxes_mask( responsible_boxes_mask | ancestor_boxes_mask, traversal.from_sep_bigger_starts, traversal.from_sep_bigger_lists, src_boxes_mask, - range=range(0, traversal.target_or_target_parent_boxes.shape[0]), - queue=queue - ) + queue=queue) if traversal.tree.targets_have_extent: # Add list 3 close of responsible boxes @@ -233,7 +242,7 @@ def get_src_boxes_mask( return src_boxes_mask -def get_multipole_boxes_mask( +def get_multipole_src_boxes_mask( queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask): """Query the boxes whose multipoles are used in order to evaluate potentials of targets in boxes represented by *responsible_boxes_mask*. @@ -280,24 +289,45 @@ def get_multipole_boxes_mask( return multipole_boxes_mask -def get_boxes_mask(queue, traversal, responsible_boxes_list): - """Given the responsible boxes for a rank, this helper function calculates the - following four masks: +@dataclass +class BoxMasks: + """ + Box masks needed for the distributed calculation. Each of these masks is a + PyOpenCL array with length ``tree.nboxes``, whose `i`-th entry is 1 if box `i` is + set. + + .. attribute:: responsible_boxes + + Current process will evaluate target potentials and multipole expansions in + these boxes. Sources and targets in these boxes are needed. - * responsible_box_mask: Current process will evaluate target potentials and - multipole expansions in these boxes. Sources and targets in these boxes - are needed. - * ancestor_boxes_mask: The the ancestor of the responsible boxes. - * src_boxes_mask: Current process needs sources but not targets in these boxes. - * multipole_boxes_mask: Current process needs multipole expressions in these - boxes. + .. attribute:: ancestor_boxes + + Ancestors of the responsible boxes. + + .. attribute:: point_src_boxes + + Current process needs sources but not targets in these boxes. + + .. attribute:: multipole_src_boxes + + Current process needs multipole expressions in these boxes. + """ + responsible_boxes: cl.array.Array + ancestor_boxes: cl.array.Array + point_src_boxes: cl.array.Array + multipole_src_boxes: cl.array.Array + + +def get_box_masks(queue, traversal, responsible_boxes_list): + """Given the responsible boxes for a rank, this helper function calculates the + relevant masks. :arg responsible_boxes_list: A numpy array of responsible box indices. - :returns: responsible_box_mask, ancestor_boxes_mask, src_boxes_mask and - multipole_boxes_mask, as described above. + :returns: A :class:`BoxMasks` object of the relevant masks. """ - code = GetBoxesMaskCodeContainer(queue.context, traversal.tree.box_id_dtype) + code = GetBoxMasksCodeContainer(queue.context, traversal.tree.box_id_dtype) traversal = traversal.to_device(queue) @@ -306,16 +336,14 @@ def get_boxes_mask(queue, traversal, responsible_boxes_list): responsible_boxes_mask = cl.array.to_device(queue, responsible_boxes_mask) ancestor_boxes_mask = get_ancestor_boxes_mask( - queue, traversal, responsible_boxes_mask - ) + queue, code, traversal, responsible_boxes_mask) - src_boxes_mask = get_src_boxes_mask( - queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask - ) + point_src_boxes_mask = get_point_src_boxes_mask( + queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask) - multipole_boxes_mask = get_multipole_boxes_mask( - queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask - ) + multipole_src_boxes_mask = get_multipole_src_boxes_mask( + queue, code, traversal, responsible_boxes_mask, ancestor_boxes_mask) - return (responsible_boxes_mask, ancestor_boxes_mask, src_boxes_mask, - multipole_boxes_mask) + return BoxMasks( + responsible_boxes_mask, ancestor_boxes_mask, point_src_boxes_mask, + multipole_src_boxes_mask) -- GitLab From 6b7eb2850aac03c4bd6aaf72b20260292038bbad Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 30 Jan 2022 22:55:50 -0800 Subject: [PATCH 254/260] Address reviewer's comments on local tree --- boxtree/distributed/__init__.py | 2 + boxtree/distributed/calculation.py | 41 ++++---- boxtree/distributed/local_tree.py | 148 ++++++++++++++--------------- boxtree/tools.py | 1 + 4 files changed, 93 insertions(+), 99 deletions(-) diff --git a/boxtree/distributed/__init__.py b/boxtree/distributed/__init__.py index 6e44d2f..26dd346 100644 --- a/boxtree/distributed/__init__.py +++ b/boxtree/distributed/__init__.py @@ -77,6 +77,8 @@ Partition Boxes Construct Local Tree and Traversal ---------------------------------- +.. autoclass:: boxtree.distributed.local_tree.LocalTree + .. autofunction:: boxtree.distributed.local_tree.generate_local_tree .. autofunction:: boxtree.distributed.local_traversal.generate_local_travs diff --git a/boxtree/distributed/calculation.py b/boxtree/distributed/calculation.py index 8678e20..6df05ec 100644 --- a/boxtree/distributed/calculation.py +++ b/boxtree/distributed/calculation.py @@ -180,18 +180,18 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): ${box_id_t} *contributing_boxes_list, int subrange_start, int subrange_end, - ${box_id_t} *box_to_user_starts, - int *box_to_user_lists, + ${box_id_t} *box_to_user_rank_starts, + int *box_to_user_rank_lists, char *box_in_subrange """).render( box_id_t=dtype_to_ctype(box_id_dtype), ), Template(r""" ${box_id_t} ibox = contributing_boxes_list[i]; - ${box_id_t} iuser_start = box_to_user_starts[ibox]; - ${box_id_t} iuser_end = box_to_user_starts[ibox + 1]; + ${box_id_t} iuser_start = box_to_user_rank_starts[ibox]; + ${box_id_t} iuser_end = box_to_user_rank_starts[ibox + 1]; for(${box_id_t} iuser = iuser_start; iuser < iuser_end; iuser++) { - int useri = box_to_user_lists[iuser]; + int useri = box_to_user_rank_lists[iuser]; if(subrange_start <= useri && useri < subrange_end) { box_in_subrange[i] = 1; } @@ -203,17 +203,17 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): ) def find_boxes_used_by_subrange( - self, subrange, box_to_user_starts, box_to_user_lists, + self, subrange, box_to_user_rank_starts, box_to_user_rank_lists, contributing_boxes_list): """Test whether the multipole expansions of the contributing boxes are used by at least one box in a range. :arg subrange: the range is represented by ``(subrange[0], subrange[1])``. - :arg box_to_user_start: a :class:`pyopencl.array.Array` object indicating the - start and end index in *box_to_user_lists* for each box in - *contributing_boxes_list*. - :arg box_to_user_lists: a :class:`pyopencl.array.Array` object storing the - users of each box in *contributing_boxes_list*. + :arg box_to_user_rank_starts: a :class:`pyopencl.array.Array` object + indicating the start and end index in *box_to_user_rank_lists* for each + box in *contributing_boxes_list*. + :arg box_to_user_rank_lists: a :class:`pyopencl.array.Array` object storing + the users of each box in *contributing_boxes_list*. :returns: a :class:`pyopencl.array.Array` object with the same shape as *contributing_boxes_list*, where the i-th entry is 1 if ``contributing_boxes_list[i]`` is used by at least on box in the @@ -231,8 +231,8 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): contributing_boxes_list, subrange[0], subrange[1], - box_to_user_starts, - box_to_user_lists, + box_to_user_rank_starts, + box_to_user_rank_lists, box_in_subrange ) @@ -291,11 +291,11 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): stats["bytes_recvd_by_stage"] = [] with cl.CommandQueue(self.context) as queue: - box_to_user_starts_dev = cl.array.to_device( - queue, tree.box_to_user_starts).with_queue(None) + box_to_user_rank_starts_dev = cl.array.to_device( + queue, tree.box_to_user_rank_starts).with_queue(None) - box_to_user_lists_dev = cl.array.to_device( - queue, tree.box_to_user_lists).with_queue(None) + box_to_user_rank_lists_dev = cl.array.to_device( + queue, tree.box_to_user_rank_lists).with_queue(None) while not comm_pattern.done(): send_requests = [] @@ -315,7 +315,7 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): box_in_subrange = self.find_boxes_used_by_subrange( message_subrange, - box_to_user_starts_dev, box_to_user_lists_dev, + box_to_user_rank_starts_dev, box_to_user_rank_lists_dev, contributing_boxes_list_dev ) @@ -329,10 +329,11 @@ class DistributedExpansionWrangler(ExpansionWranglerInterface): # Pure Python version for debugging purpose relevant_boxes_list = [] for contrib_box in contributing_boxes_list: - iuser_start, iuser_end = tree.box_to_user_starts[ + iuser_start, iuser_end = tree.box_to_user_rank_starts[ contrib_box:contrib_box + 2 ] - for user_box in tree.box_to_user_lists[iuser_start:iuser_end]: + for user_box in tree.box_to_user_rank_lists[ + iuser_start:iuser_end]: if subrange_start <= user_box < subrange_end: relevant_boxes_list.append(contrib_box) break diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index a560941..dcfc1c6 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -25,7 +25,6 @@ import pyopencl as cl from mako.template import Template from pyopencl.tools import dtype_to_ctype from boxtree import Tree -from boxtree.tools import ImmutableHostDeviceArray import time import numpy as np from pytools import memoize_method @@ -34,18 +33,21 @@ import logging logger = logging.getLogger(__name__) +# FIXME: The logic in this file has a lot in common with +# the particle filtering functionality that already exists. +# We should refactor this to make use of this commonality. +# https://documen.tician.de/boxtree/tree.html#filtering-the-lists-of-targets + + class FetchLocalParticlesCodeContainer: """Objects of this type serve as a place to keep the code needed for :func:`fetch_local_particles`. """ - def __init__(self, cl_context, dimensions, particle_id_dtype, coord_dtype, - sources_have_extent, targets_have_extent): + def __init__(self, cl_context, dimensions, particle_id_dtype, coord_dtype): self.cl_context = cl_context self.dimensions = dimensions self.particle_id_dtype = particle_id_dtype self.coord_dtype = coord_dtype - self.sources_have_extent = sources_have_extent - self.targets_have_extent = targets_have_extent @memoize_method def particle_mask_kernel(self): @@ -115,43 +117,26 @@ class FetchLocalParticlesCodeContainer: """, strict_undefined=True) @memoize_method - def fetch_local_sources_kernel(self): + def fetch_local_particles_kernel(self, particles_have_extent): return cl.elementwise.ElementwiseKernel( self.cl_context, self.fetch_local_paticles_arguments.render( mask_t=dtype_to_ctype(self.particle_id_dtype), coord_t=dtype_to_ctype(self.coord_dtype), ndims=self.dimensions, - particles_have_extent=self.sources_have_extent + particles_have_extent=particles_have_extent ), self.fetch_local_particles_prg.render( particle_id_t=dtype_to_ctype(self.particle_id_dtype), ndims=self.dimensions, - particles_have_extent=self.sources_have_extent - ) - ) - - @memoize_method - def fetch_local_targets_kernel(self): - return cl.elementwise.ElementwiseKernel( - self.cl_context, - self.fetch_local_paticles_arguments.render( - mask_t=dtype_to_ctype(self.particle_id_dtype), - coord_t=dtype_to_ctype(self.coord_dtype), - ndims=self.dimensions, - particles_have_extent=self.targets_have_extent - ), - self.fetch_local_particles_prg.render( - particle_id_t=dtype_to_ctype(self.particle_id_dtype), - ndims=self.dimensions, - particles_have_extent=self.targets_have_extent + particles_have_extent=particles_have_extent ) ) def fetch_local_particles( queue, global_tree, src_box_mask, tgt_box_mask, local_tree): - """This helper function generates particles of the local tree, and reconstruct + """This helper function generates particles of the local tree, and reconstructs list of lists indexing accordingly. Specifically, this function generates the following fields for the local tree: @@ -163,8 +148,7 @@ def fetch_local_particles( """ code = FetchLocalParticlesCodeContainer( queue.context, global_tree.dimensions, - global_tree.particle_id_dtype, global_tree.coord_dtype, - global_tree.sources_have_extent, global_tree.targets_have_extent) + global_tree.particle_id_dtype, global_tree.coord_dtype) global_tree_dev = global_tree.to_device(queue).with_queue(queue) nsources = global_tree.nsources @@ -187,19 +171,23 @@ def fetch_local_particles( # {{{ scan of source particle mask - src_particle_scan = cl.array.empty( - queue, (nsources + 1,), - dtype=global_tree.particle_id_dtype - ) + # Mapping from the source index in the global tree to that in the local tree. + # Note that in general some global sources have no mapping, and + # *global_to_local_source_index* of those sources are undefined. + # *src_particle_mask* should be used to check whether the corresponding + # *global_to_local_source_index* is valid. + global_to_local_source_index = cl.array.empty( + queue, nsources + 1, + dtype=global_tree.particle_id_dtype) - src_particle_scan[0] = 0 - code.mask_scan_kernel()(src_particle_mask, src_particle_scan) + global_to_local_source_index[0] = 0 + code.mask_scan_kernel()(src_particle_mask, global_to_local_source_index) # }}} # {{{ local sources - local_nsources = src_particle_scan[-1].get(queue) + local_nsources = global_to_local_source_index[-1].get(queue) local_sources = cl.array.empty( queue, (global_tree.dimensions, local_nsources), @@ -213,8 +201,8 @@ def fetch_local_particles( assert global_tree.sources_have_extent is False - code.fetch_local_sources_kernel()( - src_particle_mask, src_particle_scan, + code.fetch_local_particles_kernel(global_tree.sources_have_extent)( + src_particle_mask, global_to_local_source_index, *global_tree_dev.sources.tolist(), *local_sources_list ) @@ -223,7 +211,8 @@ def fetch_local_particles( # {{{ box_source_starts - local_box_source_starts = src_particle_scan[global_tree_dev.box_source_starts] + local_box_source_starts = global_to_local_source_index[ + global_tree_dev.box_source_starts] # }}} @@ -234,6 +223,7 @@ def fetch_local_particles( dtype=global_tree.particle_id_dtype) local_box_source_counts_nonchild = cl.array.if_positive( + # We're responsible for boxes in their entirety or not at all. src_box_mask, global_tree_dev.box_source_counts_nonchild, box_counts_all_zeros) @@ -245,8 +235,8 @@ def fetch_local_particles( global_tree_dev.box_source_starts + global_tree_dev.box_source_counts_cumul) local_box_source_counts_cumul = ( - src_particle_scan[box_source_ends_cumul] - - src_particle_scan[global_tree_dev.box_source_starts]) + global_to_local_source_index[box_source_ends_cumul] + - global_to_local_source_index[global_tree_dev.box_source_starts]) # }}} @@ -270,19 +260,18 @@ def fetch_local_particles( # {{{ scan of target particle mask - tgt_particle_scan = cl.array.empty( - queue, (ntargets + 1,), - dtype=global_tree.particle_id_dtype - ) + global_to_local_target_index = cl.array.empty( + queue, ntargets + 1, + dtype=global_tree.particle_id_dtype) - tgt_particle_scan[0] = 0 - code.mask_scan_kernel()(tgt_particle_mask, tgt_particle_scan) + global_to_local_target_index[0] = 0 + code.mask_scan_kernel()(tgt_particle_mask, global_to_local_target_index) # }}} # {{{ local targets - local_ntargets = tgt_particle_scan[-1].get(queue) + local_ntargets = global_to_local_target_index[-1].get(queue) local_targets = cl.array.empty( queue, (local_tree.dimensions, local_ntargets), @@ -300,23 +289,24 @@ def fetch_local_particles( dtype=global_tree.coord_dtype ) - code.fetch_local_targets_kernel()( - tgt_particle_mask, tgt_particle_scan, + code.fetch_local_particles_kernel(True)( + tgt_particle_mask, global_to_local_target_index, *global_tree_dev.targets.tolist(), *local_targets_list, global_tree_dev.target_radii, local_target_radii ) else: - code.fetch_local_targets_kernel()( - tgt_particle_mask, tgt_particle_scan, + code.fetch_local_particles_kernel(False)( + tgt_particle_mask, global_to_local_target_index, *global_tree_dev.targets.tolist(), *local_targets_list ) # {{{ box_target_starts - local_box_target_starts = tgt_particle_scan[global_tree_dev.box_target_starts] + local_box_target_starts = global_to_local_target_index[ + global_tree_dev.box_target_starts] # }}} @@ -334,8 +324,8 @@ def fetch_local_particles( global_tree_dev.box_target_starts + global_tree_dev.box_target_counts_cumul) local_box_target_counts_cumul = ( - tgt_particle_scan[box_target_ends_cumul] - - tgt_particle_scan[global_tree_dev.box_target_starts]) + global_to_local_target_index[box_target_ends_cumul] + - global_to_local_target_index[global_tree_dev.box_target_starts]) # }}} @@ -383,25 +373,25 @@ def fetch_local_particles( class LocalTree(Tree): """ - .. attribute:: box_to_user_starts + Inherits from :class:`boxtree.Tree`. + + .. attribute:: box_to_user_rank_starts ``box_id_t [nboxes + 1]`` - .. attribute:: box_to_user_lists + .. attribute:: box_to_user_rank_lists ``int32 [*]`` - A :ref:`csr` array. For each box, the list of processes which own - targets that *use* the multipole expansion at this box, via either List - 3 or (possibly downward propagated from an ancestor) List 2. + A :ref:`csr` array, together with :attr:`box_to_user_rank_starts`. + For each box, the list of ranks which own targets that *use* the + multipole expansion at this box, via either List 3 or (possibly downward + propagated from an ancestor) List 2. """ @property def nboxes(self): - if isinstance(self.box_source_starts, ImmutableHostDeviceArray): - return self.box_source_starts.host.shape[0] - else: - return self.box_source_starts.shape[0] + return self.box_source_starts.shape[0] @property def nsources(self): @@ -426,11 +416,11 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): responsible boxes of the current rank. :return: a tuple of ``(local_tree, src_idx, tgt_idx)``, where ``local_tree`` is - an object with class `boxtree.tools.ImmutableHostDeviceArray` of generated - local tree, ``src_idx`` is the indices of the local sources in the global - tree, and ``tgt_idx`` is the indices of the local targets in the global tree. - ``src_idx`` and ``tgt_idx`` are needed for distributing source weights from - root rank and assembling calculated potentials on the root rank. + an object with class :class:`boxtree.distributed.local_tree.LocalTree` of the + generated local tree, ``src_idx`` is the indices of the local sources in the + global tree, and ``tgt_idx`` is the indices of the local targets in the + global tree. ``src_idx`` and ``tgt_idx`` are needed for distributing source + weights from root rank and assembling calculated potentials on the root rank. """ global_tree = global_traversal.tree @@ -446,8 +436,8 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): local_tree = global_tree.copy( responsible_boxes_list=responsible_boxes_list, ancestor_mask=box_masks.ancestor_boxes.get(), - box_to_user_starts=None, - box_to_user_lists=None, + box_to_user_rank_starts=None, + box_to_user_rank_lists=None, _dimensions=None, _ntargets=None, _nsources=None, @@ -480,8 +470,8 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): comm.Gather( box_masks.multipole_src_boxes.get(), multipole_src_boxes_all_ranks, root=0) - box_to_user_starts = None - box_to_user_lists = None + box_to_user_rank_starts = None + box_to_user_rank_lists = None if mpi_rank == 0: multipole_src_boxes_all_ranks = cl.array.to_device( @@ -489,22 +479,22 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): from boxtree.tools import MaskCompressorKernel matcompr = MaskCompressorKernel(queue.context) - (box_to_user_starts, box_to_user_lists, evt) = \ + (box_to_user_rank_starts, box_to_user_rank_lists, evt) = \ matcompr(queue, multipole_src_boxes_all_ranks.transpose(), list_dtype=np.int32) cl.wait_for_events([evt]) - box_to_user_starts = box_to_user_starts.get() - box_to_user_lists = box_to_user_lists.get() + box_to_user_rank_starts = box_to_user_rank_starts.get() + box_to_user_rank_lists = box_to_user_rank_lists.get() logger.debug("computing box_to_user: done") - box_to_user_starts = comm.bcast(box_to_user_starts, root=0) - box_to_user_lists = comm.bcast(box_to_user_lists, root=0) + box_to_user_rank_starts = comm.bcast(box_to_user_rank_starts, root=0) + box_to_user_rank_lists = comm.bcast(box_to_user_rank_lists, root=0) - local_tree.box_to_user_starts = box_to_user_starts - local_tree.box_to_user_lists = box_to_user_lists + local_tree.box_to_user_rank_starts = box_to_user_rank_starts + local_tree.box_to_user_rank_lists = box_to_user_rank_lists # }}} diff --git a/boxtree/tools.py b/boxtree/tools.py index c6b52ff..d9549be 100644 --- a/boxtree/tools.py +++ b/boxtree/tools.py @@ -870,6 +870,7 @@ class ImmutableHostDeviceArray: """ def __init__(self, queue, array): self.queue = queue + self.shape = array.shape self.host_array = None self.device_array = None -- GitLab From 9d8e6668a69641bb520e3e1fae709c0bea780188 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 13 Feb 2022 21:55:33 -0800 Subject: [PATCH 255/260] Refactor repeated logic of generating local particles --- boxtree/distributed/local_tree.py | 354 ++++++++++++------------------ 1 file changed, 137 insertions(+), 217 deletions(-) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index dcfc1c6..9fadee9 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -21,13 +21,15 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import pyopencl as cl +from boxtree import Tree from mako.template import Template from pyopencl.tools import dtype_to_ctype -from boxtree import Tree -import time -import numpy as np from pytools import memoize_method +import numpy as np +import pyopencl as cl +from dataclasses import dataclass +from typing import Optional +import time import logging logger = logging.getLogger(__name__) @@ -39,9 +41,9 @@ logger = logging.getLogger(__name__) # https://documen.tician.de/boxtree/tree.html#filtering-the-lists-of-targets -class FetchLocalParticlesCodeContainer: +class LocalTreeGeneratorCodeContainer: """Objects of this type serve as a place to keep the code needed for - :func:`fetch_local_particles`. + :func:`generate_local_tree`. """ def __init__(self, cl_context, dimensions, particle_id_dtype, coord_dtype): self.cl_context = cl_context @@ -134,241 +136,99 @@ class FetchLocalParticlesCodeContainer: ) -def fetch_local_particles( - queue, global_tree, src_box_mask, tgt_box_mask, local_tree): - """This helper function generates particles of the local tree, and reconstructs - list of lists indexing accordingly. - - Specifically, this function generates the following fields for the local tree: - sources, targets, target_radii, box_source_starts, box_source_counts_nonchild, - box_source_counts_cumul, box_target_starts, box_target_counts_nonchild, - box_target_counts_cumul. - - These generated fields are stored directly into *local_tree*. +@dataclass +class LocalParticlesAndLists: + particles: cl.array.Array + particle_radii: Optional[cl.array.Array] + box_particle_starts: cl.array.Array + box_particle_counts_nonchild: cl.array.Array + box_particle_counts_cumul: cl.array.Array + particle_idx: np.ndarray + + +def construct_local_particles_and_lists( + queue, code, dimensions, num_boxes, num_global_particles, + particle_id_dtype, coord_dtype, particles_have_extent, + box_mask, + global_particles, global_particle_radii, + box_particle_starts, box_particle_counts_nonchild, + box_particle_counts_cumul): + """This helper function generates particles (either sources or targets) of the + local tree, and reconstructs list of lists indexing accordingly. """ - code = FetchLocalParticlesCodeContainer( - queue.context, global_tree.dimensions, - global_tree.particle_id_dtype, global_tree.coord_dtype) - - global_tree_dev = global_tree.to_device(queue).with_queue(queue) - nsources = global_tree.nsources - - # {{{ source particle mask - - src_particle_mask = cl.array.zeros( - queue, (nsources,), - dtype=global_tree.particle_id_dtype - ) - - code.particle_mask_kernel()( - src_box_mask, - global_tree_dev.box_source_starts, - global_tree_dev.box_source_counts_nonchild, - src_particle_mask - ) - - # }}} - - # {{{ scan of source particle mask - - # Mapping from the source index in the global tree to that in the local tree. - # Note that in general some global sources have no mapping, and - # *global_to_local_source_index* of those sources are undefined. - # *src_particle_mask* should be used to check whether the corresponding - # *global_to_local_source_index* is valid. - global_to_local_source_index = cl.array.empty( - queue, nsources + 1, - dtype=global_tree.particle_id_dtype) - - global_to_local_source_index[0] = 0 - code.mask_scan_kernel()(src_particle_mask, global_to_local_source_index) - - # }}} - - # {{{ local sources - - local_nsources = global_to_local_source_index[-1].get(queue) + # {{{ calculate the particle mask - local_sources = cl.array.empty( - queue, (global_tree.dimensions, local_nsources), - dtype=global_tree.coord_dtype - ) - - local_sources_list = [ - local_sources[idim, :] - for idim in range(global_tree.dimensions) - ] - - assert global_tree.sources_have_extent is False - - code.fetch_local_particles_kernel(global_tree.sources_have_extent)( - src_particle_mask, global_to_local_source_index, - *global_tree_dev.sources.tolist(), - *local_sources_list - ) - - # }}} - - # {{{ box_source_starts - - local_box_source_starts = global_to_local_source_index[ - global_tree_dev.box_source_starts] - - # }}} - - # {{{ box_source_counts_nonchild - - box_counts_all_zeros = cl.array.zeros( - queue, (global_tree.nboxes,), - dtype=global_tree.particle_id_dtype) - - local_box_source_counts_nonchild = cl.array.if_positive( - # We're responsible for boxes in their entirety or not at all. - src_box_mask, global_tree_dev.box_source_counts_nonchild, - box_counts_all_zeros) - - # }}} - - # {{{ box_source_counts_cumul - - box_source_ends_cumul = ( - global_tree_dev.box_source_starts + global_tree_dev.box_source_counts_cumul) - - local_box_source_counts_cumul = ( - global_to_local_source_index[box_source_ends_cumul] - - global_to_local_source_index[global_tree_dev.box_source_starts]) - - # }}} - - # {{{ target particle mask - - ntargets = global_tree.ntargets - - tgt_particle_mask = cl.array.zeros( - queue, (ntargets,), - dtype=global_tree.particle_id_dtype - ) + particle_mask = cl.array.zeros( + queue, num_global_particles, dtype=particle_id_dtype) code.particle_mask_kernel()( - tgt_box_mask, - global_tree_dev.box_target_starts, - global_tree_dev.box_target_counts_nonchild, - tgt_particle_mask - ) + box_mask, box_particle_starts, box_particle_counts_nonchild, particle_mask) # }}} - # {{{ scan of target particle mask + # {{{ calculate the scan of the particle mask - global_to_local_target_index = cl.array.empty( - queue, ntargets + 1, - dtype=global_tree.particle_id_dtype) + global_to_local_particle_index = cl.array.empty( + queue, num_global_particles + 1, dtype=particle_id_dtype) - global_to_local_target_index[0] = 0 - code.mask_scan_kernel()(tgt_particle_mask, global_to_local_target_index) + global_to_local_particle_index[0] = 0 + code.mask_scan_kernel()(particle_mask, global_to_local_particle_index) # }}} - # {{{ local targets + # {{{ fetch the local particles - local_ntargets = global_to_local_target_index[-1].get(queue) + num_local_particles = global_to_local_particle_index[-1].get(queue).item() - local_targets = cl.array.empty( - queue, (local_tree.dimensions, local_ntargets), - dtype=local_tree.coord_dtype - ) + local_particles = cl.array.empty( + queue, (dimensions, num_local_particles), dtype=coord_dtype) - local_targets_list = [ - local_targets[idim, :] - for idim in range(local_tree.dimensions) - ] + local_particles_list = [local_particles[idim, :] for idim in range(dimensions)] - if local_tree.targets_have_extent: - local_target_radii = cl.array.empty( - queue, (local_ntargets,), - dtype=global_tree.coord_dtype - ) + local_particle_radii = None + if particles_have_extent: + local_particle_radii = cl.array.empty( + queue, num_local_particles, dtype=coord_dtype) code.fetch_local_particles_kernel(True)( - tgt_particle_mask, global_to_local_target_index, - *global_tree_dev.targets.tolist(), - *local_targets_list, - global_tree_dev.target_radii, - local_target_radii - ) + particle_mask, global_to_local_particle_index, + *global_particles.tolist(), + *local_particles_list, + global_particle_radii, + local_particle_radii) else: code.fetch_local_particles_kernel(False)( - tgt_particle_mask, global_to_local_target_index, - *global_tree_dev.targets.tolist(), - *local_targets_list - ) - - # {{{ box_target_starts - - local_box_target_starts = global_to_local_target_index[ - global_tree_dev.box_target_starts] - - # }}} + particle_mask, global_to_local_particle_index, + *global_particles.tolist(), + *local_particles_list) - # {{{ box_target_counts_nonchild + # {{{ construct the list of list indices - local_box_target_counts_nonchild = cl.array.if_positive( - tgt_box_mask, global_tree_dev.box_target_counts_nonchild, - box_counts_all_zeros) + local_box_particle_starts = global_to_local_particle_index[box_particle_starts] - # }}} + box_counts_all_zeros = cl.array.zeros(queue, num_boxes, dtype=particle_id_dtype) - # {{{ box_target_counts_cumul + local_box_particle_counts_nonchild = cl.array.if_positive( + box_mask, box_particle_counts_nonchild, box_counts_all_zeros) - box_target_ends_cumul = ( - global_tree_dev.box_target_starts + global_tree_dev.box_target_counts_cumul) + box_particle_ends_cumul = box_particle_starts + box_particle_counts_cumul - local_box_target_counts_cumul = ( - global_to_local_target_index[box_target_ends_cumul] - - global_to_local_target_index[global_tree_dev.box_target_starts]) + local_box_particle_counts_cumul = ( + global_to_local_particle_index[box_particle_ends_cumul] + - global_to_local_particle_index[box_particle_starts]) # }}} - # {{{ Fetch fields to local_tree - - local_sources = local_sources.get(queue=queue) - local_tree.sources = local_sources - - local_targets = local_targets.get(queue=queue) - local_tree.targets = local_targets - - if global_tree.targets_have_extent: - local_tree.target_radii = local_target_radii.get(queue=queue) - - local_tree.box_source_starts = local_box_source_starts.get(queue=queue) + particle_mask = particle_mask.get(queue=queue).astype(bool) + particle_idx = np.arange(num_global_particles)[particle_mask] - local_tree.box_source_counts_nonchild = \ - local_box_source_counts_nonchild.get(queue=queue) - - local_tree.box_source_counts_cumul = \ - local_box_source_counts_cumul.get(queue=queue) - - local_tree.box_target_starts = local_box_target_starts.get(queue=queue) - - local_tree.box_target_counts_nonchild = \ - local_box_target_counts_nonchild.get(queue=queue) - - local_tree.box_target_counts_cumul = \ - local_box_target_counts_cumul.get(queue=queue) - - # }}} - - # {{{ src_idx and tgt_idx - - src_particle_mask = src_particle_mask.get(queue=queue).astype(bool) - src_idx = np.arange(nsources)[src_particle_mask] - - tgt_particle_mask = tgt_particle_mask.get(queue=queue).astype(bool) - tgt_idx = np.arange(ntargets)[tgt_particle_mask] - - # }}} - - return local_tree, src_idx, tgt_idx + return LocalParticlesAndLists( + local_particles, + local_particle_radii, + local_box_particle_starts, + local_box_particle_counts_nonchild, + local_box_particle_counts_cumul, + particle_idx) class LocalTree(Tree): @@ -423,6 +283,9 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): weights from root rank and assembling calculated potentials on the root rank. """ global_tree = global_traversal.tree + code = LocalTreeGeneratorCodeContainer( + queue.context, global_tree.dimensions, + global_tree.particle_id_dtype, global_tree.coord_dtype) # Get MPI information mpi_rank = comm.Get_rank() @@ -446,13 +309,67 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): local_tree.user_source_ids = None local_tree.sorted_target_ids = None - local_tree, src_idx, tgt_idx = fetch_local_particles( - queue, - global_tree, + global_tree_dev = global_tree.to_device(queue).with_queue(queue) + + local_sources_and_lists = construct_local_particles_and_lists( + queue, code, global_tree.dimensions, global_tree.nboxes, + global_tree.nsources, + global_tree.particle_id_dtype, global_tree.coord_dtype, + global_tree.sources_have_extent, box_masks.point_src_boxes, + global_tree_dev.sources, + global_tree_dev.sources_radii if global_tree.sources_have_extent else None, + global_tree_dev.box_source_starts, + global_tree_dev.box_source_counts_nonchild, + global_tree_dev.box_source_counts_cumul) + + local_targets_and_lists = construct_local_particles_and_lists( + queue, code, global_tree.dimensions, global_tree.nboxes, + global_tree.ntargets, + global_tree.particle_id_dtype, global_tree.coord_dtype, + global_tree.targets_have_extent, box_masks.responsible_boxes, - local_tree - ) + global_tree_dev.targets, + global_tree_dev.target_radii if global_tree.targets_have_extent else None, + global_tree_dev.box_target_starts, + global_tree_dev.box_target_counts_nonchild, + global_tree_dev.box_target_counts_cumul) + + # {{{ Fetch fields to local_tree + + local_sources = local_sources_and_lists.particles.get(queue=queue) + local_tree.sources = local_sources + + local_targets = local_targets_and_lists.particles.get(queue=queue) + local_tree.targets = local_targets + + if global_tree.sources_have_extent: + local_tree.source_radii = \ + local_sources_and_lists.particle_radii.get(queue=queue) + + if global_tree.targets_have_extent: + local_tree.target_radii = \ + local_targets_and_lists.particle_radii.get(queue=queue) + + local_tree.box_source_starts = \ + local_sources_and_lists.box_particle_starts.get(queue=queue) + + local_tree.box_source_counts_nonchild = \ + local_sources_and_lists.box_particle_counts_nonchild.get(queue=queue) + + local_tree.box_source_counts_cumul = \ + local_sources_and_lists.box_particle_counts_cumul.get(queue=queue) + + local_tree.box_target_starts = \ + local_targets_and_lists.box_particle_starts.get(queue=queue) + + local_tree.box_target_counts_nonchild = \ + local_targets_and_lists.box_particle_counts_nonchild.get(queue=queue) + + local_tree.box_target_counts_cumul = \ + local_targets_and_lists.box_particle_counts_cumul.get(queue=queue) + + # }}} local_tree._dimensions = local_tree.dimensions local_tree._ntargets = local_tree.targets[0].shape[0] @@ -505,4 +422,7 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): mpi_rank, str(time.time() - start_time) )) - return local_tree, src_idx, tgt_idx + return ( + local_tree, + local_sources_and_lists.particle_idx, + local_targets_and_lists.particle_idx) -- GitLab From 81798d3a42321dffabb3fcf7ebd463ec796f8e34 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 13 Feb 2022 22:40:32 -0800 Subject: [PATCH 256/260] Make local trees by calling constructors instead of copying --- boxtree/distributed/local_tree.py | 121 +++++++++++++++--------------- 1 file changed, 62 insertions(+), 59 deletions(-) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 9fadee9..31e3347 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -287,7 +287,6 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): queue.context, global_tree.dimensions, global_tree.particle_id_dtype, global_tree.coord_dtype) - # Get MPI information mpi_rank = comm.Get_rank() mpi_size = comm.Get_size() @@ -296,19 +295,6 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): from boxtree.distributed.partition import get_box_masks box_masks = get_box_masks(queue, global_traversal, responsible_boxes_list) - local_tree = global_tree.copy( - responsible_boxes_list=responsible_boxes_list, - ancestor_mask=box_masks.ancestor_boxes.get(), - box_to_user_rank_starts=None, - box_to_user_rank_lists=None, - _dimensions=None, - _ntargets=None, - _nsources=None, - ) - - local_tree.user_source_ids = None - local_tree.sorted_target_ids = None - global_tree_dev = global_tree.to_device(queue).with_queue(queue) local_sources_and_lists = construct_local_particles_and_lists( @@ -335,48 +321,6 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): global_tree_dev.box_target_counts_nonchild, global_tree_dev.box_target_counts_cumul) - # {{{ Fetch fields to local_tree - - local_sources = local_sources_and_lists.particles.get(queue=queue) - local_tree.sources = local_sources - - local_targets = local_targets_and_lists.particles.get(queue=queue) - local_tree.targets = local_targets - - if global_tree.sources_have_extent: - local_tree.source_radii = \ - local_sources_and_lists.particle_radii.get(queue=queue) - - if global_tree.targets_have_extent: - local_tree.target_radii = \ - local_targets_and_lists.particle_radii.get(queue=queue) - - local_tree.box_source_starts = \ - local_sources_and_lists.box_particle_starts.get(queue=queue) - - local_tree.box_source_counts_nonchild = \ - local_sources_and_lists.box_particle_counts_nonchild.get(queue=queue) - - local_tree.box_source_counts_cumul = \ - local_sources_and_lists.box_particle_counts_cumul.get(queue=queue) - - local_tree.box_target_starts = \ - local_targets_and_lists.box_particle_starts.get(queue=queue) - - local_tree.box_target_counts_nonchild = \ - local_targets_and_lists.box_particle_counts_nonchild.get(queue=queue) - - local_tree.box_target_counts_cumul = \ - local_targets_and_lists.box_particle_counts_cumul.get(queue=queue) - - # }}} - - local_tree._dimensions = local_tree.dimensions - local_tree._ntargets = local_tree.targets[0].shape[0] - local_tree._nsources = local_tree.sources[0].shape[0] - - local_tree.__class__ = LocalTree - # {{{ compute the users of multipole expansions of each box on the root rank multipole_src_boxes_all_ranks = None @@ -410,11 +354,70 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): box_to_user_rank_starts = comm.bcast(box_to_user_rank_starts, root=0) box_to_user_rank_lists = comm.bcast(box_to_user_rank_lists, root=0) - local_tree.box_to_user_rank_starts = box_to_user_rank_starts - local_tree.box_to_user_rank_lists = box_to_user_rank_lists - # }}} + local_sources = local_sources_and_lists.particles.get(queue=queue) + local_targets = local_targets_and_lists.particles.get(queue=queue) + + local_tree = LocalTree( + sources_are_targets=global_tree.sources_are_targets, + sources_have_extent=global_tree.sources_have_extent, + targets_have_extent=global_tree.targets_have_extent, + + particle_id_dtype=global_tree.particle_id_dtype, + box_id_dtype=global_tree.box_id_dtype, + coord_dtype=global_tree.coord_dtype, + box_level_dtype=global_tree.box_level_dtype, + + root_extent=global_tree.root_extent, + stick_out_factor=global_tree.stick_out_factor, + extent_norm=global_tree.extent_norm, + + bounding_box=global_tree.bounding_box, + level_start_box_nrs=global_tree.level_start_box_nrs, + level_start_box_nrs_dev=global_tree.level_start_box_nrs_dev, + + sources=local_sources, + targets=local_targets, + source_radii=(local_sources_and_lists.particle_radii.get(queue=queue) + if global_tree.sources_have_extent else None), + target_radii=(local_targets_and_lists.particle_radii.get(queue=queue) + if global_tree.targets_have_extent else None), + + box_source_starts=( + local_sources_and_lists.box_particle_starts.get(queue=queue)), + box_source_counts_nonchild=( + local_sources_and_lists.box_particle_counts_nonchild.get(queue=queue)), + box_source_counts_cumul=( + local_sources_and_lists.box_particle_counts_cumul.get(queue=queue)), + box_target_starts=( + local_targets_and_lists.box_particle_starts.get(queue=queue)), + box_target_counts_nonchild=( + local_targets_and_lists.box_particle_counts_nonchild.get(queue=queue)), + box_target_counts_cumul=( + local_targets_and_lists.box_particle_counts_cumul.get(queue=queue)), + + box_parent_ids=global_tree.box_parent_ids, + box_child_ids=global_tree.box_child_ids, + box_centers=global_tree.box_centers, + box_levels=global_tree.box_levels, + box_flags=global_tree.box_flags, + + user_source_ids=None, + sorted_target_ids=None, + + _is_pruned=global_tree._is_pruned, + + responsible_boxes_list=responsible_boxes_list, + ancestor_mask=box_masks.ancestor_boxes.get(), + box_to_user_rank_starts=box_to_user_rank_starts, + box_to_user_rank_lists=box_to_user_rank_lists, + + _dimensions=global_tree.dimensions, + _ntargets=local_targets[0].shape[0], + _nsources=local_sources[0].shape[0] + ) + local_tree = local_tree.to_host_device_array(queue) local_tree.with_queue(None) -- GitLab From a553df63733738cc04dae79b4f42ce44fc2c8592 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 13 Feb 2022 22:53:52 -0800 Subject: [PATCH 257/260] Continue to address reviewer's comments on local tree generation --- boxtree/distributed/local_tree.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 31e3347..7e5a75c 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -135,6 +135,11 @@ class LocalTreeGeneratorCodeContainer: ) ) + @memoize_method + def mask_compressor_kernel(self): + from boxtree.tools import MaskCompressorKernel + return MaskCompressorKernel(self.cl_context) + @dataclass class LocalParticlesAndLists: @@ -269,6 +274,8 @@ class LocalTree(Tree): def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): """Generate the local tree for the current rank. + This is an MPI-collective routine on *comm*. + :arg queue: a :class:`pyopencl.CommandQueue` object. :arg global_traversal: Global :class:`boxtree.traversal.FMMTraversalInfo` object on host memory. @@ -338,11 +345,10 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): multipole_src_boxes_all_ranks = cl.array.to_device( queue, multipole_src_boxes_all_ranks) - from boxtree.tools import MaskCompressorKernel - matcompr = MaskCompressorKernel(queue.context) (box_to_user_rank_starts, box_to_user_rank_lists, evt) = \ - matcompr(queue, multipole_src_boxes_all_ranks.transpose(), - list_dtype=np.int32) + code.mask_compressor_kernel()( + queue, multipole_src_boxes_all_ranks.transpose(), + list_dtype=np.int32) cl.wait_for_events([evt]) -- GitLab From 31f1e1ae62a4bafb9734c08d843476fa4876e7e2 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 27 Feb 2022 17:01:25 -0800 Subject: [PATCH 258/260] Use the inherited version of Tree to calculate --- boxtree/distributed/local_tree.py | 22 +--------------------- boxtree/tree.py | 4 ++-- 2 files changed, 3 insertions(+), 23 deletions(-) diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 7e5a75c..c0dce78 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -254,22 +254,6 @@ class LocalTree(Tree): propagated from an ancestor) List 2. """ - @property - def nboxes(self): - return self.box_source_starts.shape[0] - - @property - def nsources(self): - return self._nsources - - @property - def ntargets(self): - return self._ntargets - - @property - def dimensions(self): - return self._dimensions - def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): """Generate the local tree for the current rank. @@ -417,11 +401,7 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): responsible_boxes_list=responsible_boxes_list, ancestor_mask=box_masks.ancestor_boxes.get(), box_to_user_rank_starts=box_to_user_rank_starts, - box_to_user_rank_lists=box_to_user_rank_lists, - - _dimensions=global_tree.dimensions, - _ntargets=local_targets[0].shape[0], - _nsources=local_sources[0].shape[0] + box_to_user_rank_lists=box_to_user_rank_lists ) local_tree = local_tree.to_host_device_array(queue) diff --git a/boxtree/tree.py b/boxtree/tree.py index 800bf53..cc672b0 100644 --- a/boxtree/tree.py +++ b/boxtree/tree.py @@ -379,11 +379,11 @@ class Tree(DeviceDataRecord): @property def nsources(self): - return len(self.user_source_ids) + return len(self.sources[0]) @property def ntargets(self): - return len(self.sorted_target_ids) + return len(self.targets[0]) @property def nlevels(self): -- GitLab From 954c27839e8f60baddf32596bb6a7d3250e42adf Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 13 Mar 2022 22:14:03 -0700 Subject: [PATCH 259/260] Restrict source boxes directly instead of relying on local_box_flags --- boxtree/distributed/local_traversal.py | 49 +++----------------- boxtree/distributed/local_tree.py | 1 + boxtree/traversal.py | 63 ++++++++++++++++++-------- 3 files changed, 51 insertions(+), 62 deletions(-) diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py index 1fb461a..db45902 100644 --- a/boxtree/distributed/local_traversal.py +++ b/boxtree/distributed/local_traversal.py @@ -86,52 +86,15 @@ def generate_local_travs( local_tree.box_target_counts_cumul.device, local_tree.box_flags.device) - # Generate local source flags - local_box_flags = \ - local_tree.box_flags.device & (255 - box_flags_enum.HAS_OWN_SOURCES) - local_box_flags = local_box_flags & (255 - box_flags_enum.HAS_CHILD_SOURCES) - - modify_own_sources_knl = cl.elementwise.ElementwiseKernel( - queue.context, - Template(r""" - __global ${box_id_t} *responsible_box_list, - __global ${box_flag_t} *box_flags - """).render( - box_id_t=dtype_to_ctype(local_tree.box_id_dtype), - box_flag_t=box_flag_t - ), - Template(r""" - box_flags[responsible_box_list[i]] |= ${HAS_OWN_SOURCES}; - """).render( - HAS_OWN_SOURCES=( - "(" + box_flag_t + ") " + str(box_flags_enum.HAS_OWN_SOURCES)) - ) - ) - - modify_child_sources_knl = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global char *ancestor_box_mask, - __global ${box_flag_t} *box_flags - """).render( - box_flag_t=box_flag_t - ), - Template(""" - if(ancestor_box_mask[i]) box_flags[i] |= ${HAS_CHILD_SOURCES}; - """).render( - HAS_CHILD_SOURCES=( - "(" + box_flag_t + ") " + str(box_flags_enum.HAS_CHILD_SOURCES) - ) - ) - ) - - modify_own_sources_knl(local_tree.responsible_boxes_list.device, local_box_flags) - modify_child_sources_knl(local_tree.ancestor_mask.device, local_box_flags) - + # We need `source_boxes_mask` and `source_parent_boxes_mask` here to restrict the + # multipole formation and upward propagation within the rank's responsible boxes + # region. Had there not been such restrictions, some sources might be distributed + # to more than 1 rank and counted multiple times. d_local_trav, _ = traversal_builder( queue, local_tree.to_device(queue), box_bounding_box=box_bounding_box, - local_box_flags=local_box_flags + source_boxes_mask=local_tree.responsible_boxes_mask.device, + source_parent_boxes_mask=local_tree.ancestor_mask.device ) if merge_close_lists and local_tree.targets_have_extent: diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index c0dce78..3aacf3b 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -399,6 +399,7 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): _is_pruned=global_tree._is_pruned, responsible_boxes_list=responsible_boxes_list, + responsible_boxes_mask=box_masks.responsible_boxes.get(), ancestor_mask=box_masks.ancestor_boxes.get(), box_to_user_rank_starts=box_to_user_rank_starts, box_to_user_rank_lists=box_to_user_rank_lists diff --git a/boxtree/traversal.py b/boxtree/traversal.py index 8f1c9cc..ce03738 100644 --- a/boxtree/traversal.py +++ b/boxtree/traversal.py @@ -285,11 +285,21 @@ void generate(LIST_ARG_DECL USER_ARG_DECL box_id_t box_id) { box_flags_t flags = box_flags[box_id]; - if (flags & BOX_HAS_OWN_SOURCES) - { APPEND_source_boxes(box_id); } + %if source_boxes_has_mask: + if (flags & BOX_HAS_OWN_SOURCES && source_boxes_mask[box_id]) + { APPEND_source_boxes(box_id); } + %else: + if (flags & BOX_HAS_OWN_SOURCES) + { APPEND_source_boxes(box_id); } + %endif - if (flags & BOX_HAS_CHILD_SOURCES) - { APPEND_source_parent_boxes(box_id); } + %if source_parent_boxes_has_mask: + if (flags & BOX_HAS_CHILD_SOURCES && source_parent_boxes_mask[box_id]) + { APPEND_source_parent_boxes(box_id); } + %else: + if (flags & BOX_HAS_CHILD_SOURCES) + { APPEND_source_parent_boxes(box_id); } + %endif %if not sources_are_targets: if (flags & BOX_HAS_OWN_TARGETS) @@ -772,7 +782,6 @@ void generate(LIST_ARG_DECL USER_ARG_DECL box_id_t target_box_number) %elif from_sep_smaller_crit == "precise_linf": { - coord_t source_rad = LEVEL_TO_RAD(walk_level); // l^infty distance between source box and target box. @@ -788,7 +797,6 @@ void generate(LIST_ARG_DECL USER_ARG_DECL box_id_t target_box_number) meets_sep_crit = l_inf_dist >= (2 - 8 * COORD_T_MACH_EPS) * source_rad; - } %elif from_sep_smaller_crit == "static_l2": @@ -1765,7 +1773,9 @@ class FMMTraversalBuilder: def get_kernel_info(self, dimensions, particle_id_dtype, box_id_dtype, coord_dtype, box_level_dtype, max_levels, sources_are_targets, sources_have_extent, targets_have_extent, - extent_norm): + extent_norm, + source_boxes_has_mask, + source_parent_boxes_has_mask): # {{{ process from_sep_smaller_crit @@ -1828,6 +1838,8 @@ class FMMTraversalBuilder: targets_have_extent=targets_have_extent, well_sep_is_n_away=self.well_sep_is_n_away, from_sep_smaller_crit=from_sep_smaller_crit, + source_boxes_has_mask=source_boxes_has_mask, + source_parent_boxes_has_mask=source_parent_boxes_has_mask ) from pyopencl.algorithm import ListOfListsBuilder from boxtree.tools import VectorArg, ScalarArg @@ -1841,6 +1853,12 @@ class FMMTraversalBuilder: + SOURCES_PARENTS_AND_TARGETS_TEMPLATE, strict_undefined=True).render(**render_vars) + arg_decls = [VectorArg(box_flags_enum.dtype, "box_flags")] + if source_boxes_has_mask: + arg_decls.append(VectorArg(np.int8, "source_boxes_mask")) + if source_parent_boxes_has_mask: + arg_decls.append(VectorArg(np.int8, "source_parent_boxes_mask")) + result["sources_parents_and_targets_builder"] = \ ListOfListsBuilder(self.context, [ @@ -1852,9 +1870,7 @@ class FMMTraversalBuilder: if not sources_are_targets else []), str(src), - arg_decls=[ - VectorArg(box_flags_enum.dtype, "box_flags"), - ], + arg_decls=arg_decls, debug=debug, name_prefix="sources_parents_and_targets") @@ -1974,23 +1990,24 @@ class FMMTraversalBuilder: def __call__(self, queue, tree, wait_for=None, debug=False, _from_sep_smaller_min_nsources_cumul=None, - box_bounding_box=None, local_box_flags=None): + box_bounding_box=None, + source_boxes_mask=None, + source_parent_boxes_mask=None): """ :arg queue: A :class:`pyopencl.CommandQueue` instance. :arg tree: A :class:`boxtree.Tree` instance. :arg wait_for: may either be *None* or a list of :class:`pyopencl.Event` instances for whose completion this command waits before starting exeuction. - :arg local_box_flags: Used by distributed FMM for building source boxes - for local trees. + :arg source_boxes_mask: Only boxes passing this mask will be considered for + `source_boxes`. Used by the distributed implementation. + :arg source_parent_boxes_mask: Only boxes passing this mask will be + considered for `source_parent_boxes`. Used by the distributed + implementation. :return: A tuple *(trav, event)*, where *trav* is a new instance of :class:`FMMTraversalInfo` and *event* is a :class:`pyopencl.Event` for dependency management. """ - - if local_box_flags is None: - local_box_flags = tree.box_flags - if _from_sep_smaller_min_nsources_cumul is None: # default to old no-threshold behavior _from_sep_smaller_min_nsources_cumul = 0 @@ -2014,7 +2031,9 @@ class FMMTraversalBuilder: tree.coord_dtype, tree.box_level_dtype, max_levels, tree.sources_are_targets, tree.sources_have_extent, tree.targets_have_extent, - tree.extent_norm) + tree.extent_norm, + source_boxes_mask is not None, + source_parent_boxes_mask is not None) def fin_debug(s): if debug: @@ -2028,8 +2047,14 @@ class FMMTraversalBuilder: fin_debug("building list of source boxes, their parents, and target boxes") + extra_args = [] + if source_boxes_mask is not None: + extra_args.append(source_boxes_mask) + if source_parent_boxes_mask is not None: + extra_args.append(source_parent_boxes_mask) + result, evt = knl_info.sources_parents_and_targets_builder( - queue, tree.nboxes, local_box_flags, wait_for=wait_for + queue, tree.nboxes, tree.box_flags, *extra_args, wait_for=wait_for ) wait_for = [evt] -- GitLab From d4368f188cb45fc974357d86848ae325eaf320a6 Mon Sep 17 00:00:00 2001 From: Hao Gao Date: Sun, 13 Mar 2022 22:50:41 -0700 Subject: [PATCH 260/260] Move the logic of modifying target box flags to local tree construction --- boxtree/distributed/local_traversal.py | 43 +-------------------- boxtree/distributed/local_tree.py | 53 +++++++++++++++++++++++++- 2 files changed, 53 insertions(+), 43 deletions(-) diff --git a/boxtree/distributed/local_traversal.py b/boxtree/distributed/local_traversal.py index db45902..3751496 100644 --- a/boxtree/distributed/local_traversal.py +++ b/boxtree/distributed/local_traversal.py @@ -22,11 +22,8 @@ THE SOFTWARE. """ import time -from pyopencl.tools import dtype_to_ctype -import pyopencl as cl -from mako.template import Template - import logging + logger = logging.getLogger(__name__) @@ -48,44 +45,6 @@ def generate_local_travs( local_tree.with_queue(queue) - # TODO: Maybe move the logic here to local tree construction? - # Modify box flags for targets - from boxtree import box_flags_enum - box_flag_t = dtype_to_ctype(box_flags_enum.dtype) - modify_target_flags_knl = cl.elementwise.ElementwiseKernel( - queue.context, - Template(""" - __global ${particle_id_t} *box_target_counts_nonchild, - __global ${particle_id_t} *box_target_counts_cumul, - __global ${box_flag_t} *box_flags - """).render( - particle_id_t=dtype_to_ctype(local_tree.particle_id_dtype), - box_flag_t=box_flag_t - ), - Template(r""" - // reset HAS_OWN_TARGETS and HAS_CHILD_TARGETS bits in the flag of each - // box - box_flags[i] &= (~${HAS_OWN_TARGETS}); - box_flags[i] &= (~${HAS_CHILD_TARGETS}); - - // rebuild HAS_OWN_TARGETS and HAS_CHILD_TARGETS bits - if(box_target_counts_nonchild[i]) box_flags[i] |= ${HAS_OWN_TARGETS}; - if(box_target_counts_nonchild[i] < box_target_counts_cumul[i]) - box_flags[i] |= ${HAS_CHILD_TARGETS}; - """).render( - HAS_OWN_TARGETS=( - "(" + box_flag_t + ") " + str(box_flags_enum.HAS_OWN_TARGETS) - ), - HAS_CHILD_TARGETS=( - "(" + box_flag_t + ") " + str(box_flags_enum.HAS_CHILD_TARGETS) - ) - ) - ) - - modify_target_flags_knl(local_tree.box_target_counts_nonchild.device, - local_tree.box_target_counts_cumul.device, - local_tree.box_flags.device) - # We need `source_boxes_mask` and `source_parent_boxes_mask` here to restrict the # multipole formation and upward propagation within the rank's responsible boxes # region. Had there not been such restrictions, some sources might be distributed diff --git a/boxtree/distributed/local_tree.py b/boxtree/distributed/local_tree.py index 3aacf3b..59b5844 100644 --- a/boxtree/distributed/local_tree.py +++ b/boxtree/distributed/local_tree.py @@ -140,6 +140,41 @@ class LocalTreeGeneratorCodeContainer: from boxtree.tools import MaskCompressorKernel return MaskCompressorKernel(self.cl_context) + @memoize_method + def modify_target_flags_kernel(self): + from boxtree import box_flags_enum + box_flag_t = dtype_to_ctype(box_flags_enum.dtype) + + return cl.elementwise.ElementwiseKernel( + self.cl_context, + Template(""" + __global ${particle_id_t} *box_target_counts_nonchild, + __global ${particle_id_t} *box_target_counts_cumul, + __global ${box_flag_t} *box_flags + """).render( + particle_id_t=dtype_to_ctype(self.particle_id_dtype), + box_flag_t=box_flag_t + ), + Template(r""" + // reset HAS_OWN_TARGETS and HAS_CHILD_TARGETS bits in the flag of + // each box + box_flags[i] &= (~${HAS_OWN_TARGETS}); + box_flags[i] &= (~${HAS_CHILD_TARGETS}); + + // rebuild HAS_OWN_TARGETS and HAS_CHILD_TARGETS bits + if(box_target_counts_nonchild[i]) box_flags[i] |= ${HAS_OWN_TARGETS}; + if(box_target_counts_nonchild[i] < box_target_counts_cumul[i]) + box_flags[i] |= ${HAS_CHILD_TARGETS}; + """).render( + HAS_OWN_TARGETS=( + "(" + box_flag_t + ") " + str(box_flags_enum.HAS_OWN_TARGETS) + ), + HAS_CHILD_TARGETS=( + "(" + box_flag_t + ") " + str(box_flags_enum.HAS_CHILD_TARGETS) + ) + ) + ) + @dataclass class LocalParticlesAndLists: @@ -346,6 +381,22 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): # }}} + # {{{ Reconstruct the target box flags + + # Note: We do not change the source box flags despite the local tree may only + # contain a subset of sources. This is because evaluating target potentials in + # the responsible boxes of the current rank may depend on the multipole + # expansions formed by souces in other ranks. Modifying the source box flags + # could result in incomplete interaction lists. + + local_box_flags = global_tree_dev.box_flags.copy(queue=queue) + code.modify_target_flags_kernel()( + local_targets_and_lists.box_particle_counts_nonchild, + local_targets_and_lists.box_particle_counts_cumul, + local_box_flags) + + # }}} + local_sources = local_sources_and_lists.particles.get(queue=queue) local_targets = local_targets_and_lists.particles.get(queue=queue) @@ -391,7 +442,7 @@ def generate_local_tree(queue, global_traversal, responsible_boxes_list, comm): box_child_ids=global_tree.box_child_ids, box_centers=global_tree.box_centers, box_levels=global_tree.box_levels, - box_flags=global_tree.box_flags, + box_flags=local_box_flags.get(queue=queue), user_source_ids=None, sorted_target_ids=None, -- GitLab